commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
886dae7cbf433f2b0d65016164db62883cae0925
|
tests/unit/spec_tests.py
|
tests/unit/spec_tests.py
|
# -*- coding: utf8 -*-
"""
Tests for pika.spec
"""
import unittest
from pika import spec
class BasicPropertiesTests(unittest.TestCase):
def test_equality(self):
a = spec.BasicProperties(content_type='text/plain')
self.assertEqual(a, a)
self.assertNotEqual(a, None)
b = spec.BasicProperties()
self.assertNotEqual(a, b)
b.content_type = 'text/plain'
self.assertEqual(a, b)
a.correlation_id = 'abc123'
self.assertNotEqual(a, b)
b.correlation_id = 'abc123'
self.assertEqual(a, b)
|
Add explicit BasicProperties equality test
|
Add explicit BasicProperties equality test
In Python equality of objects, if not defined otherwise, is based on the
objects hash.
By extending amqp_objects with an __eq__ method we can now test equality
of two BasicProperties objects and can expect True if their internal
__dict__'s are equal.
|
Python
|
bsd-3-clause
|
pika/pika
|
Add explicit BasicProperties equality test
In Python equality of objects, if not defined otherwise, is based on the
objects hash.
By extending amqp_objects with an __eq__ method we can now test equality
of two BasicProperties objects and can expect True if their internal
__dict__'s are equal.
|
# -*- coding: utf8 -*-
"""
Tests for pika.spec
"""
import unittest
from pika import spec
class BasicPropertiesTests(unittest.TestCase):
def test_equality(self):
a = spec.BasicProperties(content_type='text/plain')
self.assertEqual(a, a)
self.assertNotEqual(a, None)
b = spec.BasicProperties()
self.assertNotEqual(a, b)
b.content_type = 'text/plain'
self.assertEqual(a, b)
a.correlation_id = 'abc123'
self.assertNotEqual(a, b)
b.correlation_id = 'abc123'
self.assertEqual(a, b)
|
<commit_before><commit_msg>Add explicit BasicProperties equality test
In Python equality of objects, if not defined otherwise, is based on the
objects hash.
By extending amqp_objects with an __eq__ method we can now test equality
of two BasicProperties objects and can expect True if their internal
__dict__'s are equal.<commit_after>
|
# -*- coding: utf8 -*-
"""
Tests for pika.spec
"""
import unittest
from pika import spec
class BasicPropertiesTests(unittest.TestCase):
def test_equality(self):
a = spec.BasicProperties(content_type='text/plain')
self.assertEqual(a, a)
self.assertNotEqual(a, None)
b = spec.BasicProperties()
self.assertNotEqual(a, b)
b.content_type = 'text/plain'
self.assertEqual(a, b)
a.correlation_id = 'abc123'
self.assertNotEqual(a, b)
b.correlation_id = 'abc123'
self.assertEqual(a, b)
|
Add explicit BasicProperties equality test
In Python equality of objects, if not defined otherwise, is based on the
objects hash.
By extending amqp_objects with an __eq__ method we can now test equality
of two BasicProperties objects and can expect True if their internal
__dict__'s are equal.# -*- coding: utf8 -*-
"""
Tests for pika.spec
"""
import unittest
from pika import spec
class BasicPropertiesTests(unittest.TestCase):
def test_equality(self):
a = spec.BasicProperties(content_type='text/plain')
self.assertEqual(a, a)
self.assertNotEqual(a, None)
b = spec.BasicProperties()
self.assertNotEqual(a, b)
b.content_type = 'text/plain'
self.assertEqual(a, b)
a.correlation_id = 'abc123'
self.assertNotEqual(a, b)
b.correlation_id = 'abc123'
self.assertEqual(a, b)
|
<commit_before><commit_msg>Add explicit BasicProperties equality test
In Python equality of objects, if not defined otherwise, is based on the
objects hash.
By extending amqp_objects with an __eq__ method we can now test equality
of two BasicProperties objects and can expect True if their internal
__dict__'s are equal.<commit_after># -*- coding: utf8 -*-
"""
Tests for pika.spec
"""
import unittest
from pika import spec
class BasicPropertiesTests(unittest.TestCase):
def test_equality(self):
a = spec.BasicProperties(content_type='text/plain')
self.assertEqual(a, a)
self.assertNotEqual(a, None)
b = spec.BasicProperties()
self.assertNotEqual(a, b)
b.content_type = 'text/plain'
self.assertEqual(a, b)
a.correlation_id = 'abc123'
self.assertNotEqual(a, b)
b.correlation_id = 'abc123'
self.assertEqual(a, b)
|
|
27b74424e6a9193636bc533dbf758d40b5dc0d3e
|
lazyblacksmith/utils/crestutils.py
|
lazyblacksmith/utils/crestutils.py
|
# -*- encoding: utf-8 -*-
import pycrest
import config
def get_crest():
""" Return a CREST object initialized """
crest = pycrest.EVE()
crest()
return crest
def get_by_attr(objlist, attr, val):
''' Searches list of dicts for a dict with dict[attr] == val '''
matches = [getattr(obj, attr) == val for obj in objlist]
index = matches.index(True) # find first match, raise ValueError if not found
return objlist[index]
def get_all_items(page):
''' Fetch data from all pages '''
ret = page().items
while hasattr(page(), 'next'):
page = page().next()
ret.extend(page().items)
return ret
|
Add utils functions for the crest api
|
Add utils functions for the crest api
|
Python
|
bsd-3-clause
|
Kyria/LazyBlacksmith,Kyria/LazyBlacksmith,Kyria/LazyBlacksmith,Kyria/LazyBlacksmith
|
Add utils functions for the crest api
|
# -*- encoding: utf-8 -*-
import pycrest
import config
def get_crest():
""" Return a CREST object initialized """
crest = pycrest.EVE()
crest()
return crest
def get_by_attr(objlist, attr, val):
''' Searches list of dicts for a dict with dict[attr] == val '''
matches = [getattr(obj, attr) == val for obj in objlist]
index = matches.index(True) # find first match, raise ValueError if not found
return objlist[index]
def get_all_items(page):
''' Fetch data from all pages '''
ret = page().items
while hasattr(page(), 'next'):
page = page().next()
ret.extend(page().items)
return ret
|
<commit_before><commit_msg>Add utils functions for the crest api<commit_after>
|
# -*- encoding: utf-8 -*-
import pycrest
import config
def get_crest():
""" Return a CREST object initialized """
crest = pycrest.EVE()
crest()
return crest
def get_by_attr(objlist, attr, val):
''' Searches list of dicts for a dict with dict[attr] == val '''
matches = [getattr(obj, attr) == val for obj in objlist]
index = matches.index(True) # find first match, raise ValueError if not found
return objlist[index]
def get_all_items(page):
''' Fetch data from all pages '''
ret = page().items
while hasattr(page(), 'next'):
page = page().next()
ret.extend(page().items)
return ret
|
Add utils functions for the crest api# -*- encoding: utf-8 -*-
import pycrest
import config
def get_crest():
""" Return a CREST object initialized """
crest = pycrest.EVE()
crest()
return crest
def get_by_attr(objlist, attr, val):
''' Searches list of dicts for a dict with dict[attr] == val '''
matches = [getattr(obj, attr) == val for obj in objlist]
index = matches.index(True) # find first match, raise ValueError if not found
return objlist[index]
def get_all_items(page):
''' Fetch data from all pages '''
ret = page().items
while hasattr(page(), 'next'):
page = page().next()
ret.extend(page().items)
return ret
|
<commit_before><commit_msg>Add utils functions for the crest api<commit_after># -*- encoding: utf-8 -*-
import pycrest
import config
def get_crest():
""" Return a CREST object initialized """
crest = pycrest.EVE()
crest()
return crest
def get_by_attr(objlist, attr, val):
''' Searches list of dicts for a dict with dict[attr] == val '''
matches = [getattr(obj, attr) == val for obj in objlist]
index = matches.index(True) # find first match, raise ValueError if not found
return objlist[index]
def get_all_items(page):
''' Fetch data from all pages '''
ret = page().items
while hasattr(page(), 'next'):
page = page().next()
ret.extend(page().items)
return ret
|
|
b0cf9904023c5ee20c5f29b3e88899420405550b
|
examples/puttiff.py
|
examples/puttiff.py
|
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import urllib, urllib2
import cStringIO
import sys
import zlib
import libtiff
def main():
parser = argparse.ArgumentParser(description='Post a file as a tiff')
parser.add_argument('baseurl', action="store" )
parser.add_argument('token', action="store" )
parser.add_argument('channel', action="store" )
parser.add_argument('filename', action="store" )
parser.add_argument('resolution', action="store", type=int, default=0 )
parser.add_argument('xoffset', action="store", type=int, default=0 )
parser.add_argument('yoffset', action="store", type=int, default=0)
parser.add_argument('zoffset', action="store", type=int, default=0)
result = parser.parse_args()
url = 'http://%s/ca/%s/%s/tiff/%s/%s/%s/%s/' % ( result.baseurl, result.token, result.channel, result.resolution, result.xoffset, result.yoffset, result.zoffset )
# open the file name as a tiff file
fh = open ( result.filename )
# Get cube in question
try:
f = urllib2.urlopen ( url, fh.read() )
except urllib2.URLError, e:
print "Failed %s. Exception %s." % (url,e)
sys.exit(-1)
if __name__ == "__main__":
main()
|
Migrate this version to new workstation.
|
Migrate this version to new workstation.
|
Python
|
apache-2.0
|
neurodata/ndstore,openconnectome/open-connectome,openconnectome/open-connectome,openconnectome/open-connectome,neurodata/ndstore,openconnectome/open-connectome,neurodata/ndstore,openconnectome/open-connectome,openconnectome/open-connectome,neurodata/ndstore
|
Migrate this version to new workstation.
|
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import urllib, urllib2
import cStringIO
import sys
import zlib
import libtiff
def main():
parser = argparse.ArgumentParser(description='Post a file as a tiff')
parser.add_argument('baseurl', action="store" )
parser.add_argument('token', action="store" )
parser.add_argument('channel', action="store" )
parser.add_argument('filename', action="store" )
parser.add_argument('resolution', action="store", type=int, default=0 )
parser.add_argument('xoffset', action="store", type=int, default=0 )
parser.add_argument('yoffset', action="store", type=int, default=0)
parser.add_argument('zoffset', action="store", type=int, default=0)
result = parser.parse_args()
url = 'http://%s/ca/%s/%s/tiff/%s/%s/%s/%s/' % ( result.baseurl, result.token, result.channel, result.resolution, result.xoffset, result.yoffset, result.zoffset )
# open the file name as a tiff file
fh = open ( result.filename )
# Get cube in question
try:
f = urllib2.urlopen ( url, fh.read() )
except urllib2.URLError, e:
print "Failed %s. Exception %s." % (url,e)
sys.exit(-1)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Migrate this version to new workstation.<commit_after>
|
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import urllib, urllib2
import cStringIO
import sys
import zlib
import libtiff
def main():
parser = argparse.ArgumentParser(description='Post a file as a tiff')
parser.add_argument('baseurl', action="store" )
parser.add_argument('token', action="store" )
parser.add_argument('channel', action="store" )
parser.add_argument('filename', action="store" )
parser.add_argument('resolution', action="store", type=int, default=0 )
parser.add_argument('xoffset', action="store", type=int, default=0 )
parser.add_argument('yoffset', action="store", type=int, default=0)
parser.add_argument('zoffset', action="store", type=int, default=0)
result = parser.parse_args()
url = 'http://%s/ca/%s/%s/tiff/%s/%s/%s/%s/' % ( result.baseurl, result.token, result.channel, result.resolution, result.xoffset, result.yoffset, result.zoffset )
# open the file name as a tiff file
fh = open ( result.filename )
# Get cube in question
try:
f = urllib2.urlopen ( url, fh.read() )
except urllib2.URLError, e:
print "Failed %s. Exception %s." % (url,e)
sys.exit(-1)
if __name__ == "__main__":
main()
|
Migrate this version to new workstation.# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import urllib, urllib2
import cStringIO
import sys
import zlib
import libtiff
def main():
parser = argparse.ArgumentParser(description='Post a file as a tiff')
parser.add_argument('baseurl', action="store" )
parser.add_argument('token', action="store" )
parser.add_argument('channel', action="store" )
parser.add_argument('filename', action="store" )
parser.add_argument('resolution', action="store", type=int, default=0 )
parser.add_argument('xoffset', action="store", type=int, default=0 )
parser.add_argument('yoffset', action="store", type=int, default=0)
parser.add_argument('zoffset', action="store", type=int, default=0)
result = parser.parse_args()
url = 'http://%s/ca/%s/%s/tiff/%s/%s/%s/%s/' % ( result.baseurl, result.token, result.channel, result.resolution, result.xoffset, result.yoffset, result.zoffset )
# open the file name as a tiff file
fh = open ( result.filename )
# Get cube in question
try:
f = urllib2.urlopen ( url, fh.read() )
except urllib2.URLError, e:
print "Failed %s. Exception %s." % (url,e)
sys.exit(-1)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Migrate this version to new workstation.<commit_after># Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import urllib, urllib2
import cStringIO
import sys
import zlib
import libtiff
def main():
parser = argparse.ArgumentParser(description='Post a file as a tiff')
parser.add_argument('baseurl', action="store" )
parser.add_argument('token', action="store" )
parser.add_argument('channel', action="store" )
parser.add_argument('filename', action="store" )
parser.add_argument('resolution', action="store", type=int, default=0 )
parser.add_argument('xoffset', action="store", type=int, default=0 )
parser.add_argument('yoffset', action="store", type=int, default=0)
parser.add_argument('zoffset', action="store", type=int, default=0)
result = parser.parse_args()
url = 'http://%s/ca/%s/%s/tiff/%s/%s/%s/%s/' % ( result.baseurl, result.token, result.channel, result.resolution, result.xoffset, result.yoffset, result.zoffset )
# open the file name as a tiff file
fh = open ( result.filename )
# Get cube in question
try:
f = urllib2.urlopen ( url, fh.read() )
except urllib2.URLError, e:
print "Failed %s. Exception %s." % (url,e)
sys.exit(-1)
if __name__ == "__main__":
main()
|
|
01d35d13aaedea0ef87ae1d78ee1368e5e0f407c
|
corehq/apps/locations/management/commands/set_location_id.py
|
corehq/apps/locations/management/commands/set_location_id.py
|
from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.users.models import CouchUser, CommCareUser
class Command(BaseCommand):
help = ''
def handle(self, *args, **options):
self.stdout.write("Population location_id field...\n")
relevant_ids = set([r['id'] for r in CouchUser.get_db().view(
'users/by_username',
reduce=False,
).all()])
to_save = []
for user_doc in iter_docs(CouchUser.get_db(), relevant_ids):
user = CommCareUser.get(user_doc['_id'])
if user._locations:
user_doc['location_id'] = user._locations[0]._id
to_save.append(user_doc)
if len(to_save) > 500:
CouchUser.get_db().bulk_save(to_save)
to_save = []
if to_save:
CouchUser.get_db().bulk_save(to_save)
|
Move migration into main branch
|
Move migration into main branch
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq
|
Move migration into main branch
|
from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.users.models import CouchUser, CommCareUser
class Command(BaseCommand):
help = ''
def handle(self, *args, **options):
self.stdout.write("Population location_id field...\n")
relevant_ids = set([r['id'] for r in CouchUser.get_db().view(
'users/by_username',
reduce=False,
).all()])
to_save = []
for user_doc in iter_docs(CouchUser.get_db(), relevant_ids):
user = CommCareUser.get(user_doc['_id'])
if user._locations:
user_doc['location_id'] = user._locations[0]._id
to_save.append(user_doc)
if len(to_save) > 500:
CouchUser.get_db().bulk_save(to_save)
to_save = []
if to_save:
CouchUser.get_db().bulk_save(to_save)
|
<commit_before><commit_msg>Move migration into main branch<commit_after>
|
from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.users.models import CouchUser, CommCareUser
class Command(BaseCommand):
help = ''
def handle(self, *args, **options):
self.stdout.write("Population location_id field...\n")
relevant_ids = set([r['id'] for r in CouchUser.get_db().view(
'users/by_username',
reduce=False,
).all()])
to_save = []
for user_doc in iter_docs(CouchUser.get_db(), relevant_ids):
user = CommCareUser.get(user_doc['_id'])
if user._locations:
user_doc['location_id'] = user._locations[0]._id
to_save.append(user_doc)
if len(to_save) > 500:
CouchUser.get_db().bulk_save(to_save)
to_save = []
if to_save:
CouchUser.get_db().bulk_save(to_save)
|
Move migration into main branchfrom django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.users.models import CouchUser, CommCareUser
class Command(BaseCommand):
help = ''
def handle(self, *args, **options):
self.stdout.write("Population location_id field...\n")
relevant_ids = set([r['id'] for r in CouchUser.get_db().view(
'users/by_username',
reduce=False,
).all()])
to_save = []
for user_doc in iter_docs(CouchUser.get_db(), relevant_ids):
user = CommCareUser.get(user_doc['_id'])
if user._locations:
user_doc['location_id'] = user._locations[0]._id
to_save.append(user_doc)
if len(to_save) > 500:
CouchUser.get_db().bulk_save(to_save)
to_save = []
if to_save:
CouchUser.get_db().bulk_save(to_save)
|
<commit_before><commit_msg>Move migration into main branch<commit_after>from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.users.models import CouchUser, CommCareUser
class Command(BaseCommand):
help = ''
def handle(self, *args, **options):
self.stdout.write("Population location_id field...\n")
relevant_ids = set([r['id'] for r in CouchUser.get_db().view(
'users/by_username',
reduce=False,
).all()])
to_save = []
for user_doc in iter_docs(CouchUser.get_db(), relevant_ids):
user = CommCareUser.get(user_doc['_id'])
if user._locations:
user_doc['location_id'] = user._locations[0]._id
to_save.append(user_doc)
if len(to_save) > 500:
CouchUser.get_db().bulk_save(to_save)
to_save = []
if to_save:
CouchUser.get_db().bulk_save(to_save)
|
|
1b28330beda151cc2660143926a9bf9178b6af2b
|
openfisca_france_data/tests/test_fake_survey_simulation.py
|
openfisca_france_data/tests/test_fake_survey_simulation.py
|
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_france_data.input_data_builders.fake_openfisca_data_builder import get_fake_input_data_frame
from openfisca_france_data.surveys import SurveyScenario
def test_fake_survey_simulation():
year = 2006
input_data_frame = get_fake_input_data_frame(year)
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
year = year,
)
simulation = survey_scenario.new_simulation()
simulation.calculate('revdisp')
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
test_fake_survey_simulation()
df_by_entity = test_fake_survey_simulation()
df_i = df_by_entity['individus']
df_m = df_by_entity['menages']
print df_i
print df_m
|
Move fake survey simulation to a separate file
|
Move fake survey simulation to a separate file
|
Python
|
agpl-3.0
|
adrienpacifico/openfisca-france-data,openfisca/openfisca-france-data,LouisePaulDelvaux/openfisca-france-data,openfisca/openfisca-france-data,LouisePaulDelvaux/openfisca-france-data,benjello/openfisca-france-data,MalkIPP/openfisca-france-data,benjello/openfisca-france-data,openfisca/openfisca-france-data,adrienpacifico/openfisca-france-data,MalkIPP/openfisca-france-data
|
Move fake survey simulation to a separate file
|
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_france_data.input_data_builders.fake_openfisca_data_builder import get_fake_input_data_frame
from openfisca_france_data.surveys import SurveyScenario
def test_fake_survey_simulation():
year = 2006
input_data_frame = get_fake_input_data_frame(year)
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
year = year,
)
simulation = survey_scenario.new_simulation()
simulation.calculate('revdisp')
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
test_fake_survey_simulation()
df_by_entity = test_fake_survey_simulation()
df_i = df_by_entity['individus']
df_m = df_by_entity['menages']
print df_i
print df_m
|
<commit_before><commit_msg>Move fake survey simulation to a separate file<commit_after>
|
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_france_data.input_data_builders.fake_openfisca_data_builder import get_fake_input_data_frame
from openfisca_france_data.surveys import SurveyScenario
def test_fake_survey_simulation():
year = 2006
input_data_frame = get_fake_input_data_frame(year)
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
year = year,
)
simulation = survey_scenario.new_simulation()
simulation.calculate('revdisp')
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
test_fake_survey_simulation()
df_by_entity = test_fake_survey_simulation()
df_i = df_by_entity['individus']
df_m = df_by_entity['menages']
print df_i
print df_m
|
Move fake survey simulation to a separate file# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_france_data.input_data_builders.fake_openfisca_data_builder import get_fake_input_data_frame
from openfisca_france_data.surveys import SurveyScenario
def test_fake_survey_simulation():
year = 2006
input_data_frame = get_fake_input_data_frame(year)
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
year = year,
)
simulation = survey_scenario.new_simulation()
simulation.calculate('revdisp')
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
test_fake_survey_simulation()
df_by_entity = test_fake_survey_simulation()
df_i = df_by_entity['individus']
df_m = df_by_entity['menages']
print df_i
print df_m
|
<commit_before><commit_msg>Move fake survey simulation to a separate file<commit_after># -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_france_data.input_data_builders.fake_openfisca_data_builder import get_fake_input_data_frame
from openfisca_france_data.surveys import SurveyScenario
def test_fake_survey_simulation():
year = 2006
input_data_frame = get_fake_input_data_frame(year)
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
year = year,
)
simulation = survey_scenario.new_simulation()
simulation.calculate('revdisp')
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
test_fake_survey_simulation()
df_by_entity = test_fake_survey_simulation()
df_i = df_by_entity['individus']
df_m = df_by_entity['menages']
print df_i
print df_m
|
|
420a1a76c7c9460768e6d41e7508f36d3215fbfb
|
kolibri/core/exams/migrations/0003_auto_20190426_1015.py
|
kolibri/core/exams/migrations/0003_auto_20190426_1015.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-26 17:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exams', '0002_update_exam_data_model'),
]
operations = [
migrations.AlterField(
model_name='exam',
name='data_model_version',
field=models.SmallIntegerField(default=2),
),
]
|
Add migration for new Exam data_model_version default
|
Add migration for new Exam data_model_version default
|
Python
|
mit
|
learningequality/kolibri,indirectlylit/kolibri,lyw07/kolibri,learningequality/kolibri,mrpau/kolibri,lyw07/kolibri,lyw07/kolibri,learningequality/kolibri,indirectlylit/kolibri,mrpau/kolibri,indirectlylit/kolibri,mrpau/kolibri,learningequality/kolibri,mrpau/kolibri,lyw07/kolibri,indirectlylit/kolibri
|
Add migration for new Exam data_model_version default
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-26 17:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exams', '0002_update_exam_data_model'),
]
operations = [
migrations.AlterField(
model_name='exam',
name='data_model_version',
field=models.SmallIntegerField(default=2),
),
]
|
<commit_before><commit_msg>Add migration for new Exam data_model_version default<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-26 17:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exams', '0002_update_exam_data_model'),
]
operations = [
migrations.AlterField(
model_name='exam',
name='data_model_version',
field=models.SmallIntegerField(default=2),
),
]
|
Add migration for new Exam data_model_version default# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-26 17:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exams', '0002_update_exam_data_model'),
]
operations = [
migrations.AlterField(
model_name='exam',
name='data_model_version',
field=models.SmallIntegerField(default=2),
),
]
|
<commit_before><commit_msg>Add migration for new Exam data_model_version default<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-26 17:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exams', '0002_update_exam_data_model'),
]
operations = [
migrations.AlterField(
model_name='exam',
name='data_model_version',
field=models.SmallIntegerField(default=2),
),
]
|
|
f84284b14d21669f9a9aae5d482f1e626f42df03
|
src/clincoded/upgrade/pathogenicity.py
|
src/clincoded/upgrade/pathogenicity.py
|
from contentbase.upgrader import upgrade_step
@upgrade_step('pathogenicity', '1', '2')
def pathogenicity_1_2(value, system):
# https://github.com/ClinGen/clincoded/issues/1507
# Add affiliation property and update schema version
return
|
Add affiliation property and update schema version
|
Add affiliation property and update schema version
|
Python
|
mit
|
ClinGen/clincoded,ClinGen/clincoded,ClinGen/clincoded,ClinGen/clincoded,ClinGen/clincoded
|
Add affiliation property and update schema version
|
from contentbase.upgrader import upgrade_step
@upgrade_step('pathogenicity', '1', '2')
def pathogenicity_1_2(value, system):
# https://github.com/ClinGen/clincoded/issues/1507
# Add affiliation property and update schema version
return
|
<commit_before><commit_msg>Add affiliation property and update schema version<commit_after>
|
from contentbase.upgrader import upgrade_step
@upgrade_step('pathogenicity', '1', '2')
def pathogenicity_1_2(value, system):
# https://github.com/ClinGen/clincoded/issues/1507
# Add affiliation property and update schema version
return
|
Add affiliation property and update schema versionfrom contentbase.upgrader import upgrade_step
@upgrade_step('pathogenicity', '1', '2')
def pathogenicity_1_2(value, system):
# https://github.com/ClinGen/clincoded/issues/1507
# Add affiliation property and update schema version
return
|
<commit_before><commit_msg>Add affiliation property and update schema version<commit_after>from contentbase.upgrader import upgrade_step
@upgrade_step('pathogenicity', '1', '2')
def pathogenicity_1_2(value, system):
# https://github.com/ClinGen/clincoded/issues/1507
# Add affiliation property and update schema version
return
|
|
2355ebd04ed8b5c14c42966b71415ae638c50551
|
app/models/datastore_adapter.py
|
app/models/datastore_adapter.py
|
from flask_user import DBAdapter
class DataStoreAdapter(DBAdapter):
""" An Wrapper to be use by Flask User to interact with
the database in this case, the DataStore """
def __init__(self, db, objMOdel):
super().__init__(db, objMOdel)
def get_object(self, ObjectClass, pk):
""" Retrieve an single Entity specified by a pk or id. """
entity = ObjectClass().get(pk)
entity = ObjectClass(**entity)
return entity
def find_all_objects(self, ObjectClass, **kwargs):
""" Retrieve all Entity matching all the filters in kwargs. """
# TODO:
# The filters should be case sensitive
return ObjectClass().fetch()
def find_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the filters in kwargs or None. """
# TODO:
# The filters should be case sensitive
entity = ObjectClass()
for key, value in kwargs.items():
entity.query.add_filter(key, "=", value)
entity = entity.fetch(limit=1)
# Just return the Entity is the list is not empty and the
# first element is not None or Falsy else return None
entity = ObjectClass(**entity[0]) if entity and entity[0] else None
return entity
def ifind_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the filters in kwargs or None. """
# TODO:
# The filters should be case insensitive
entity = ObjectClass()
for key, value in kwargs.items():
entity.query.add_filter(key, "=", value)
entity = entity.fetch(limit=1)
# Just return the Entity is the list is not empty and the
# first element is not None or Falsy else return None
entity = ObjectClass(**entity[0]) if entity and entity[0] else None
return entity
def add_object(self, ObjectClass, **kwargs):
""" Create an Entity with the fields specified in kwargs. """
entity = ObjectClass(**kwargs)
entity.put()
return entity
def update_object(self, entity, **kwargs):
""" Update an Entity with the fields specified in kwargs. """
entity.update(entity.get_id(), kwargs)
return entity
def delete_object(self, entity):
""" Delete and Entity. """
entity.delete(entity.id)
return entity
def commit(self):
""" Should commit a create, update, all delete to the DataStore. """
pass
|
Create a datastore adapter to use by flask user
|
Create a datastore adapter to use by flask user
|
Python
|
mit
|
oldani/nanodegree-blog,oldani/nanodegree-blog,oldani/nanodegree-blog
|
Create a datastore adapter to use by flask user
|
from flask_user import DBAdapter
class DataStoreAdapter(DBAdapter):
""" An Wrapper to be use by Flask User to interact with
the database in this case, the DataStore """
def __init__(self, db, objMOdel):
super().__init__(db, objMOdel)
def get_object(self, ObjectClass, pk):
""" Retrieve an single Entity specified by a pk or id. """
entity = ObjectClass().get(pk)
entity = ObjectClass(**entity)
return entity
def find_all_objects(self, ObjectClass, **kwargs):
""" Retrieve all Entity matching all the filters in kwargs. """
# TODO:
# The filters should be case sensitive
return ObjectClass().fetch()
def find_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the filters in kwargs or None. """
# TODO:
# The filters should be case sensitive
entity = ObjectClass()
for key, value in kwargs.items():
entity.query.add_filter(key, "=", value)
entity = entity.fetch(limit=1)
# Just return the Entity is the list is not empty and the
# first element is not None or Falsy else return None
entity = ObjectClass(**entity[0]) if entity and entity[0] else None
return entity
def ifind_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the filters in kwargs or None. """
# TODO:
# The filters should be case insensitive
entity = ObjectClass()
for key, value in kwargs.items():
entity.query.add_filter(key, "=", value)
entity = entity.fetch(limit=1)
# Just return the Entity is the list is not empty and the
# first element is not None or Falsy else return None
entity = ObjectClass(**entity[0]) if entity and entity[0] else None
return entity
def add_object(self, ObjectClass, **kwargs):
""" Create an Entity with the fields specified in kwargs. """
entity = ObjectClass(**kwargs)
entity.put()
return entity
def update_object(self, entity, **kwargs):
""" Update an Entity with the fields specified in kwargs. """
entity.update(entity.get_id(), kwargs)
return entity
def delete_object(self, entity):
""" Delete and Entity. """
entity.delete(entity.id)
return entity
def commit(self):
""" Should commit a create, update, all delete to the DataStore. """
pass
|
<commit_before><commit_msg>Create a datastore adapter to use by flask user<commit_after>
|
from flask_user import DBAdapter
class DataStoreAdapter(DBAdapter):
""" An Wrapper to be use by Flask User to interact with
the database in this case, the DataStore """
def __init__(self, db, objMOdel):
super().__init__(db, objMOdel)
def get_object(self, ObjectClass, pk):
""" Retrieve an single Entity specified by a pk or id. """
entity = ObjectClass().get(pk)
entity = ObjectClass(**entity)
return entity
def find_all_objects(self, ObjectClass, **kwargs):
""" Retrieve all Entity matching all the filters in kwargs. """
# TODO:
# The filters should be case sensitive
return ObjectClass().fetch()
def find_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the filters in kwargs or None. """
# TODO:
# The filters should be case sensitive
entity = ObjectClass()
for key, value in kwargs.items():
entity.query.add_filter(key, "=", value)
entity = entity.fetch(limit=1)
# Just return the Entity is the list is not empty and the
# first element is not None or Falsy else return None
entity = ObjectClass(**entity[0]) if entity and entity[0] else None
return entity
def ifind_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the filters in kwargs or None. """
# TODO:
# The filters should be case insensitive
entity = ObjectClass()
for key, value in kwargs.items():
entity.query.add_filter(key, "=", value)
entity = entity.fetch(limit=1)
# Just return the Entity is the list is not empty and the
# first element is not None or Falsy else return None
entity = ObjectClass(**entity[0]) if entity and entity[0] else None
return entity
def add_object(self, ObjectClass, **kwargs):
""" Create an Entity with the fields specified in kwargs. """
entity = ObjectClass(**kwargs)
entity.put()
return entity
def update_object(self, entity, **kwargs):
""" Update an Entity with the fields specified in kwargs. """
entity.update(entity.get_id(), kwargs)
return entity
def delete_object(self, entity):
""" Delete and Entity. """
entity.delete(entity.id)
return entity
def commit(self):
""" Should commit a create, update, all delete to the DataStore. """
pass
|
Create a datastore adapter to use by flask userfrom flask_user import DBAdapter
class DataStoreAdapter(DBAdapter):
""" An Wrapper to be use by Flask User to interact with
the database in this case, the DataStore """
def __init__(self, db, objMOdel):
super().__init__(db, objMOdel)
def get_object(self, ObjectClass, pk):
""" Retrieve an single Entity specified by a pk or id. """
entity = ObjectClass().get(pk)
entity = ObjectClass(**entity)
return entity
def find_all_objects(self, ObjectClass, **kwargs):
""" Retrieve all Entity matching all the filters in kwargs. """
# TODO:
# The filters should be case sensitive
return ObjectClass().fetch()
def find_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the filters in kwargs or None. """
# TODO:
# The filters should be case sensitive
entity = ObjectClass()
for key, value in kwargs.items():
entity.query.add_filter(key, "=", value)
entity = entity.fetch(limit=1)
# Just return the Entity is the list is not empty and the
# first element is not None or Falsy else return None
entity = ObjectClass(**entity[0]) if entity and entity[0] else None
return entity
def ifind_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the filters in kwargs or None. """
# TODO:
# The filters should be case insensitive
entity = ObjectClass()
for key, value in kwargs.items():
entity.query.add_filter(key, "=", value)
entity = entity.fetch(limit=1)
# Just return the Entity is the list is not empty and the
# first element is not None or Falsy else return None
entity = ObjectClass(**entity[0]) if entity and entity[0] else None
return entity
def add_object(self, ObjectClass, **kwargs):
""" Create an Entity with the fields specified in kwargs. """
entity = ObjectClass(**kwargs)
entity.put()
return entity
def update_object(self, entity, **kwargs):
""" Update an Entity with the fields specified in kwargs. """
entity.update(entity.get_id(), kwargs)
return entity
def delete_object(self, entity):
""" Delete and Entity. """
entity.delete(entity.id)
return entity
def commit(self):
""" Should commit a create, update, all delete to the DataStore. """
pass
|
<commit_before><commit_msg>Create a datastore adapter to use by flask user<commit_after>from flask_user import DBAdapter
class DataStoreAdapter(DBAdapter):
""" An Wrapper to be use by Flask User to interact with
the database in this case, the DataStore """
def __init__(self, db, objMOdel):
super().__init__(db, objMOdel)
def get_object(self, ObjectClass, pk):
""" Retrieve an single Entity specified by a pk or id. """
entity = ObjectClass().get(pk)
entity = ObjectClass(**entity)
return entity
def find_all_objects(self, ObjectClass, **kwargs):
""" Retrieve all Entity matching all the filters in kwargs. """
# TODO:
# The filters should be case sensitive
return ObjectClass().fetch()
def find_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the filters in kwargs or None. """
# TODO:
# The filters should be case sensitive
entity = ObjectClass()
for key, value in kwargs.items():
entity.query.add_filter(key, "=", value)
entity = entity.fetch(limit=1)
# Just return the Entity is the list is not empty and the
# first element is not None or Falsy else return None
entity = ObjectClass(**entity[0]) if entity and entity[0] else None
return entity
def ifind_first_object(self, ObjectClass, **kwargs):
""" Retrieve the first Entity matching the filters in kwargs or None. """
# TODO:
# The filters should be case insensitive
entity = ObjectClass()
for key, value in kwargs.items():
entity.query.add_filter(key, "=", value)
entity = entity.fetch(limit=1)
# Just return the Entity is the list is not empty and the
# first element is not None or Falsy else return None
entity = ObjectClass(**entity[0]) if entity and entity[0] else None
return entity
def add_object(self, ObjectClass, **kwargs):
""" Create an Entity with the fields specified in kwargs. """
entity = ObjectClass(**kwargs)
entity.put()
return entity
def update_object(self, entity, **kwargs):
""" Update an Entity with the fields specified in kwargs. """
entity.update(entity.get_id(), kwargs)
return entity
def delete_object(self, entity):
""" Delete and Entity. """
entity.delete(entity.id)
return entity
def commit(self):
""" Should commit a create, update, all delete to the DataStore. """
pass
|
|
6d46c1beb8f5f916e3fb2e08d086e5b4167383be
|
ftfy/test_unicode.py
|
ftfy/test_unicode.py
|
# -*- coding: utf-8 -*-
from ftfy import fix_bad_encoding, WINDOWS_1252_GREMLINS
import unicodedata
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in xrange(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn'):
garble = char.encode('utf-8').decode('latin-1')
assert fix_bad_encoding(garble) == char
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
print phrase
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
assert fix_bad_encoding(text) == text, text
|
Move this test from metanl.
|
Move this test from metanl.
|
Python
|
mit
|
rspeer/python-ftfy
|
Move this test from metanl.
|
# -*- coding: utf-8 -*-
from ftfy import fix_bad_encoding, WINDOWS_1252_GREMLINS
import unicodedata
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in xrange(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn'):
garble = char.encode('utf-8').decode('latin-1')
assert fix_bad_encoding(garble) == char
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
print phrase
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
assert fix_bad_encoding(text) == text, text
|
<commit_before><commit_msg>Move this test from metanl.<commit_after>
|
# -*- coding: utf-8 -*-
from ftfy import fix_bad_encoding, WINDOWS_1252_GREMLINS
import unicodedata
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in xrange(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn'):
garble = char.encode('utf-8').decode('latin-1')
assert fix_bad_encoding(garble) == char
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
print phrase
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
assert fix_bad_encoding(text) == text, text
|
Move this test from metanl.# -*- coding: utf-8 -*-
from ftfy import fix_bad_encoding, WINDOWS_1252_GREMLINS
import unicodedata
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in xrange(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn'):
garble = char.encode('utf-8').decode('latin-1')
assert fix_bad_encoding(garble) == char
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
print phrase
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
assert fix_bad_encoding(text) == text, text
|
<commit_before><commit_msg>Move this test from metanl.<commit_after># -*- coding: utf-8 -*-
from ftfy import fix_bad_encoding, WINDOWS_1252_GREMLINS
import unicodedata
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in xrange(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn'):
garble = char.encode('utf-8').decode('latin-1')
assert fix_bad_encoding(garble) == char
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
print phrase
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
assert fix_bad_encoding(text) == text, text
|
|
d07dfe5392a0ea9bde4bff174c4dc4e4fdba33be
|
tests/rest/client/v2_alpha/__init__.py
|
tests/rest/client/v2_alpha/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
Create (empty) v2_alpha REST tests directory
|
Create (empty) v2_alpha REST tests directory
|
Python
|
apache-2.0
|
rzr/synapse,TribeMedia/synapse,matrix-org/synapse,illicitonion/synapse,howethomas/synapse,howethomas/synapse,TribeMedia/synapse,howethomas/synapse,illicitonion/synapse,illicitonion/synapse,matrix-org/synapse,iot-factory/synapse,rzr/synapse,matrix-org/synapse,rzr/synapse,TribeMedia/synapse,iot-factory/synapse,matrix-org/synapse,matrix-org/synapse,illicitonion/synapse,rzr/synapse,TribeMedia/synapse,howethomas/synapse,iot-factory/synapse,rzr/synapse,iot-factory/synapse,matrix-org/synapse,illicitonion/synapse,howethomas/synapse,iot-factory/synapse,TribeMedia/synapse
|
Create (empty) v2_alpha REST tests directory
|
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
<commit_before><commit_msg>Create (empty) v2_alpha REST tests directory<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
Create (empty) v2_alpha REST tests directory# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
<commit_before><commit_msg>Create (empty) v2_alpha REST tests directory<commit_after># -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
|
955856171a4d689d1db68eb46c17d6b3fe92c2fc
|
examples/halt_asset.py
|
examples/halt_asset.py
|
from grapheneapi.grapheneclient import GrapheneClient
import json
symbol = "TOKEN"
class Config():
wallet_host = "localhost"
wallet_port = 8092
wallet_user = ""
wallet_password = ""
## no edits below this line #####################
perm = {}
perm["charge_market_fee"] = 0x01
perm["white_list"] = 0x02
perm["override_authority"] = 0x04
perm["transfer_restricted"] = 0x08
perm["disable_force_settle"] = 0x10
perm["global_settle"] = 0x20
perm["disable_confidential"] = 0x40
perm["witness_fed_asset"] = 0x80
perm["committee_fed_asset"] = 0x100
graphene = GrapheneClient(Config)
nullaccount = graphene.rpc.get_account("null-account")
asset = graphene.rpc.get_asset(symbol)
flags = {
"white_list" : True,
"transfer_restricted" : True,
}
flags_int = 0
for p in flags :
if flags[p]:
flags_int += perm[p]
options = asset["options"]
options.update({
"flags" : flags_int,
"whitelist_authorities" : [nullaccount["id"]],
"blacklist_authorities" : [nullaccount["id"]],
"whitelist_markets" : [],
"blacklist_markets" : [],
})
tx = graphene.rpc.update_asset(symbol, None, options, True)
print(json.dumps(tx, indent=4))
|
Halt an asset, prevent people from transfers and trading
|
[example] Halt an asset, prevent people from transfers and trading
|
Python
|
mit
|
xeroc/python-graphenelib
|
[example] Halt an asset, prevent people from transfers and trading
|
from grapheneapi.grapheneclient import GrapheneClient
import json
symbol = "TOKEN"
class Config():
wallet_host = "localhost"
wallet_port = 8092
wallet_user = ""
wallet_password = ""
## no edits below this line #####################
perm = {}
perm["charge_market_fee"] = 0x01
perm["white_list"] = 0x02
perm["override_authority"] = 0x04
perm["transfer_restricted"] = 0x08
perm["disable_force_settle"] = 0x10
perm["global_settle"] = 0x20
perm["disable_confidential"] = 0x40
perm["witness_fed_asset"] = 0x80
perm["committee_fed_asset"] = 0x100
graphene = GrapheneClient(Config)
nullaccount = graphene.rpc.get_account("null-account")
asset = graphene.rpc.get_asset(symbol)
flags = {
"white_list" : True,
"transfer_restricted" : True,
}
flags_int = 0
for p in flags :
if flags[p]:
flags_int += perm[p]
options = asset["options"]
options.update({
"flags" : flags_int,
"whitelist_authorities" : [nullaccount["id"]],
"blacklist_authorities" : [nullaccount["id"]],
"whitelist_markets" : [],
"blacklist_markets" : [],
})
tx = graphene.rpc.update_asset(symbol, None, options, True)
print(json.dumps(tx, indent=4))
|
<commit_before><commit_msg>[example] Halt an asset, prevent people from transfers and trading<commit_after>
|
from grapheneapi.grapheneclient import GrapheneClient
import json
symbol = "TOKEN"
class Config():
wallet_host = "localhost"
wallet_port = 8092
wallet_user = ""
wallet_password = ""
## no edits below this line #####################
perm = {}
perm["charge_market_fee"] = 0x01
perm["white_list"] = 0x02
perm["override_authority"] = 0x04
perm["transfer_restricted"] = 0x08
perm["disable_force_settle"] = 0x10
perm["global_settle"] = 0x20
perm["disable_confidential"] = 0x40
perm["witness_fed_asset"] = 0x80
perm["committee_fed_asset"] = 0x100
graphene = GrapheneClient(Config)
nullaccount = graphene.rpc.get_account("null-account")
asset = graphene.rpc.get_asset(symbol)
flags = {
"white_list" : True,
"transfer_restricted" : True,
}
flags_int = 0
for p in flags :
if flags[p]:
flags_int += perm[p]
options = asset["options"]
options.update({
"flags" : flags_int,
"whitelist_authorities" : [nullaccount["id"]],
"blacklist_authorities" : [nullaccount["id"]],
"whitelist_markets" : [],
"blacklist_markets" : [],
})
tx = graphene.rpc.update_asset(symbol, None, options, True)
print(json.dumps(tx, indent=4))
|
[example] Halt an asset, prevent people from transfers and tradingfrom grapheneapi.grapheneclient import GrapheneClient
import json
symbol = "TOKEN"
class Config():
wallet_host = "localhost"
wallet_port = 8092
wallet_user = ""
wallet_password = ""
## no edits below this line #####################
perm = {}
perm["charge_market_fee"] = 0x01
perm["white_list"] = 0x02
perm["override_authority"] = 0x04
perm["transfer_restricted"] = 0x08
perm["disable_force_settle"] = 0x10
perm["global_settle"] = 0x20
perm["disable_confidential"] = 0x40
perm["witness_fed_asset"] = 0x80
perm["committee_fed_asset"] = 0x100
graphene = GrapheneClient(Config)
nullaccount = graphene.rpc.get_account("null-account")
asset = graphene.rpc.get_asset(symbol)
flags = {
"white_list" : True,
"transfer_restricted" : True,
}
flags_int = 0
for p in flags :
if flags[p]:
flags_int += perm[p]
options = asset["options"]
options.update({
"flags" : flags_int,
"whitelist_authorities" : [nullaccount["id"]],
"blacklist_authorities" : [nullaccount["id"]],
"whitelist_markets" : [],
"blacklist_markets" : [],
})
tx = graphene.rpc.update_asset(symbol, None, options, True)
print(json.dumps(tx, indent=4))
|
<commit_before><commit_msg>[example] Halt an asset, prevent people from transfers and trading<commit_after>from grapheneapi.grapheneclient import GrapheneClient
import json
symbol = "TOKEN"
class Config():
wallet_host = "localhost"
wallet_port = 8092
wallet_user = ""
wallet_password = ""
## no edits below this line #####################
perm = {}
perm["charge_market_fee"] = 0x01
perm["white_list"] = 0x02
perm["override_authority"] = 0x04
perm["transfer_restricted"] = 0x08
perm["disable_force_settle"] = 0x10
perm["global_settle"] = 0x20
perm["disable_confidential"] = 0x40
perm["witness_fed_asset"] = 0x80
perm["committee_fed_asset"] = 0x100
graphene = GrapheneClient(Config)
nullaccount = graphene.rpc.get_account("null-account")
asset = graphene.rpc.get_asset(symbol)
flags = {
"white_list" : True,
"transfer_restricted" : True,
}
flags_int = 0
for p in flags :
if flags[p]:
flags_int += perm[p]
options = asset["options"]
options.update({
"flags" : flags_int,
"whitelist_authorities" : [nullaccount["id"]],
"blacklist_authorities" : [nullaccount["id"]],
"whitelist_markets" : [],
"blacklist_markets" : [],
})
tx = graphene.rpc.update_asset(symbol, None, options, True)
print(json.dumps(tx, indent=4))
|
|
801c8c7463811af88f232e23d8496180d7b413ad
|
python/extract_duplicate_sets.py
|
python/extract_duplicate_sets.py
|
"""
Copyright 2016 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from collections import defaultdict
import json
import sys
def extract_duplicates(query_results):
# Every record entry has a field "tm_field_dupe_of" which points
# to the "entity_id" field of another article. We need to group
# the articles by their parent
duplicate_sets = defaultdict(set)
for doc in query_results["response"]["docs"]:
id = doc["entity_id"]
duplicate_sets[id].add(id)
for duplicate_of in doc["tm_field_dupe_of"]:
duplicate_sets[id].add(duplicate_of)
return duplicate_sets.values()
def merge_duplicates(duplicate_sets):
merged_sets = []
for duplicate_set in duplicate_sets:
was_merged = False
for other_duplicate_set in merged_sets:
if len(duplicate_set.intersection(other_duplicate_set)) > 0:
other_duplicate_set.update(duplicate_set)
was_merged = True
if not was_merged:
merged_sets.append(duplicate_set)
return merged_sets
def run(args):
input_fl = open(args["input"])
query_results = json.load(input_fl)
input_fl.close()
duplicate_sets = extract_duplicates(query_results)
print len(duplicate_sets), "duplicate sets"
while True:
merged_duplicate_sets = merge_duplicates(duplicate_sets)
if len(duplicate_sets) == len(merged_duplicate_sets):
break
print len(merged_duplicate_sets), "duplicate sets after merge"
duplicate_sets = merged_duplicate_sets
duplicate_lists = []
for duplicate_set in duplicate_sets:
duplicate_lists.append(list(duplicate_set))
output_fl = open(args["output"], "w")
json.dump(duplicate_lists, output_fl)
output_fl.close()
def parseargs():
parser = argparse.ArgumentParser(description="Extract sets of duplicate articles from SOLR query dump")
parser.add_argument("--input", type=str, required=True, help="SOLR query JSON dump")
parser.add_argument("--output", type=str, required=True, help="Duplicate set JSON output")
return vars(parser.parse_args())
if __name__ == "__main__":
args = parseargs()
run(args)
|
Add script for extracting duplicate sets from SOLR dump
|
Add script for extracting duplicate sets from SOLR dump
|
Python
|
apache-2.0
|
rnowling/article-deduplication,rnowling/article-deduplication
|
Add script for extracting duplicate sets from SOLR dump
|
"""
Copyright 2016 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from collections import defaultdict
import json
import sys
def extract_duplicates(query_results):
# Every record entry has a field "tm_field_dupe_of" which points
# to the "entity_id" field of another article. We need to group
# the articles by their parent
duplicate_sets = defaultdict(set)
for doc in query_results["response"]["docs"]:
id = doc["entity_id"]
duplicate_sets[id].add(id)
for duplicate_of in doc["tm_field_dupe_of"]:
duplicate_sets[id].add(duplicate_of)
return duplicate_sets.values()
def merge_duplicates(duplicate_sets):
merged_sets = []
for duplicate_set in duplicate_sets:
was_merged = False
for other_duplicate_set in merged_sets:
if len(duplicate_set.intersection(other_duplicate_set)) > 0:
other_duplicate_set.update(duplicate_set)
was_merged = True
if not was_merged:
merged_sets.append(duplicate_set)
return merged_sets
def run(args):
input_fl = open(args["input"])
query_results = json.load(input_fl)
input_fl.close()
duplicate_sets = extract_duplicates(query_results)
print len(duplicate_sets), "duplicate sets"
while True:
merged_duplicate_sets = merge_duplicates(duplicate_sets)
if len(duplicate_sets) == len(merged_duplicate_sets):
break
print len(merged_duplicate_sets), "duplicate sets after merge"
duplicate_sets = merged_duplicate_sets
duplicate_lists = []
for duplicate_set in duplicate_sets:
duplicate_lists.append(list(duplicate_set))
output_fl = open(args["output"], "w")
json.dump(duplicate_lists, output_fl)
output_fl.close()
def parseargs():
parser = argparse.ArgumentParser(description="Extract sets of duplicate articles from SOLR query dump")
parser.add_argument("--input", type=str, required=True, help="SOLR query JSON dump")
parser.add_argument("--output", type=str, required=True, help="Duplicate set JSON output")
return vars(parser.parse_args())
if __name__ == "__main__":
args = parseargs()
run(args)
|
<commit_before><commit_msg>Add script for extracting duplicate sets from SOLR dump<commit_after>
|
"""
Copyright 2016 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from collections import defaultdict
import json
import sys
def extract_duplicates(query_results):
# Every record entry has a field "tm_field_dupe_of" which points
# to the "entity_id" field of another article. We need to group
# the articles by their parent
duplicate_sets = defaultdict(set)
for doc in query_results["response"]["docs"]:
id = doc["entity_id"]
duplicate_sets[id].add(id)
for duplicate_of in doc["tm_field_dupe_of"]:
duplicate_sets[id].add(duplicate_of)
return duplicate_sets.values()
def merge_duplicates(duplicate_sets):
merged_sets = []
for duplicate_set in duplicate_sets:
was_merged = False
for other_duplicate_set in merged_sets:
if len(duplicate_set.intersection(other_duplicate_set)) > 0:
other_duplicate_set.update(duplicate_set)
was_merged = True
if not was_merged:
merged_sets.append(duplicate_set)
return merged_sets
def run(args):
input_fl = open(args["input"])
query_results = json.load(input_fl)
input_fl.close()
duplicate_sets = extract_duplicates(query_results)
print len(duplicate_sets), "duplicate sets"
while True:
merged_duplicate_sets = merge_duplicates(duplicate_sets)
if len(duplicate_sets) == len(merged_duplicate_sets):
break
print len(merged_duplicate_sets), "duplicate sets after merge"
duplicate_sets = merged_duplicate_sets
duplicate_lists = []
for duplicate_set in duplicate_sets:
duplicate_lists.append(list(duplicate_set))
output_fl = open(args["output"], "w")
json.dump(duplicate_lists, output_fl)
output_fl.close()
def parseargs():
parser = argparse.ArgumentParser(description="Extract sets of duplicate articles from SOLR query dump")
parser.add_argument("--input", type=str, required=True, help="SOLR query JSON dump")
parser.add_argument("--output", type=str, required=True, help="Duplicate set JSON output")
return vars(parser.parse_args())
if __name__ == "__main__":
args = parseargs()
run(args)
|
Add script for extracting duplicate sets from SOLR dump"""
Copyright 2016 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from collections import defaultdict
import json
import sys
def extract_duplicates(query_results):
# Every record entry has a field "tm_field_dupe_of" which points
# to the "entity_id" field of another article. We need to group
# the articles by their parent
duplicate_sets = defaultdict(set)
for doc in query_results["response"]["docs"]:
id = doc["entity_id"]
duplicate_sets[id].add(id)
for duplicate_of in doc["tm_field_dupe_of"]:
duplicate_sets[id].add(duplicate_of)
return duplicate_sets.values()
def merge_duplicates(duplicate_sets):
merged_sets = []
for duplicate_set in duplicate_sets:
was_merged = False
for other_duplicate_set in merged_sets:
if len(duplicate_set.intersection(other_duplicate_set)) > 0:
other_duplicate_set.update(duplicate_set)
was_merged = True
if not was_merged:
merged_sets.append(duplicate_set)
return merged_sets
def run(args):
input_fl = open(args["input"])
query_results = json.load(input_fl)
input_fl.close()
duplicate_sets = extract_duplicates(query_results)
print len(duplicate_sets), "duplicate sets"
while True:
merged_duplicate_sets = merge_duplicates(duplicate_sets)
if len(duplicate_sets) == len(merged_duplicate_sets):
break
print len(merged_duplicate_sets), "duplicate sets after merge"
duplicate_sets = merged_duplicate_sets
duplicate_lists = []
for duplicate_set in duplicate_sets:
duplicate_lists.append(list(duplicate_set))
output_fl = open(args["output"], "w")
json.dump(duplicate_lists, output_fl)
output_fl.close()
def parseargs():
parser = argparse.ArgumentParser(description="Extract sets of duplicate articles from SOLR query dump")
parser.add_argument("--input", type=str, required=True, help="SOLR query JSON dump")
parser.add_argument("--output", type=str, required=True, help="Duplicate set JSON output")
return vars(parser.parse_args())
if __name__ == "__main__":
args = parseargs()
run(args)
|
<commit_before><commit_msg>Add script for extracting duplicate sets from SOLR dump<commit_after>"""
Copyright 2016 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from collections import defaultdict
import json
import sys
def extract_duplicates(query_results):
# Every record entry has a field "tm_field_dupe_of" which points
# to the "entity_id" field of another article. We need to group
# the articles by their parent
duplicate_sets = defaultdict(set)
for doc in query_results["response"]["docs"]:
id = doc["entity_id"]
duplicate_sets[id].add(id)
for duplicate_of in doc["tm_field_dupe_of"]:
duplicate_sets[id].add(duplicate_of)
return duplicate_sets.values()
def merge_duplicates(duplicate_sets):
merged_sets = []
for duplicate_set in duplicate_sets:
was_merged = False
for other_duplicate_set in merged_sets:
if len(duplicate_set.intersection(other_duplicate_set)) > 0:
other_duplicate_set.update(duplicate_set)
was_merged = True
if not was_merged:
merged_sets.append(duplicate_set)
return merged_sets
def run(args):
input_fl = open(args["input"])
query_results = json.load(input_fl)
input_fl.close()
duplicate_sets = extract_duplicates(query_results)
print len(duplicate_sets), "duplicate sets"
while True:
merged_duplicate_sets = merge_duplicates(duplicate_sets)
if len(duplicate_sets) == len(merged_duplicate_sets):
break
print len(merged_duplicate_sets), "duplicate sets after merge"
duplicate_sets = merged_duplicate_sets
duplicate_lists = []
for duplicate_set in duplicate_sets:
duplicate_lists.append(list(duplicate_set))
output_fl = open(args["output"], "w")
json.dump(duplicate_lists, output_fl)
output_fl.close()
def parseargs():
parser = argparse.ArgumentParser(description="Extract sets of duplicate articles from SOLR query dump")
parser.add_argument("--input", type=str, required=True, help="SOLR query JSON dump")
parser.add_argument("--output", type=str, required=True, help="Duplicate set JSON output")
return vars(parser.parse_args())
if __name__ == "__main__":
args = parseargs()
run(args)
|
|
7a3e7365569c18c971db9698cd7cdd1a82756463
|
table.py
|
table.py
|
import csv
class Table():
'''A Table is an object which represents a 2-dimensional CSV file. Both rows
and columns can be accessed via their key as in a dictionary. This means that
keys cannot appear as both a row and column label.'''
def __init__(self, filename):
self._internal_table = self.load_from_filename(filename)
def load_from_filename(self, filename):
'''Load a CSV file into a list of lists. The following CSV:
,a,b,c
d,1,2,3
e,4,5,6
f,7,8,9
would become the list:
[['', 'a', 'b', 'c'],
['d', '1', '2', '3'] ...]'''
with open(filename, 'r') as f:
reader = csv.reader(f)
return [row for row in reader]
def get_row(self, key):
'''Gets a list containing all elements of the row specified by key.
Returns a ValueError if the row doesn't exist.'''
for row in self._internal_table:
if row[0] == key:
return row[1:]
raise ValueError('Row not found')
def get_column(self, key):
'''Gets a list containing all elements of the column specified by key.
Returns a ValueError if the column doesn't exist.'''
for i, column in enumerate(self._internal_table[0]):
if column == key:
return [row[i] for row in self._internal_table[1:]]
raise ValueError('Column not found')
def __getitem__(self, key):
'''Returns the row or column linked to the given key, accessed using
subscript notation.'''
if not isinstance(key, str):
raise TypeError('Key must be a string')
try:
return self.get_row(key)
except ValueError:
try:
return self.get_column(key)
except ValueError:
raise ValueError('Key not found in table')
|
Create Table object for representing phonetic inventory
|
Create Table object for representing phonetic inventory
|
Python
|
mit
|
kdelwat/LangEvolve,kdelwat/LangEvolve,kdelwat/LangEvolve
|
Create Table object for representing phonetic inventory
|
import csv
class Table():
'''A Table is an object which represents a 2-dimensional CSV file. Both rows
and columns can be accessed via their key as in a dictionary. This means that
keys cannot appear as both a row and column label.'''
def __init__(self, filename):
self._internal_table = self.load_from_filename(filename)
def load_from_filename(self, filename):
'''Load a CSV file into a list of lists. The following CSV:
,a,b,c
d,1,2,3
e,4,5,6
f,7,8,9
would become the list:
[['', 'a', 'b', 'c'],
['d', '1', '2', '3'] ...]'''
with open(filename, 'r') as f:
reader = csv.reader(f)
return [row for row in reader]
def get_row(self, key):
'''Gets a list containing all elements of the row specified by key.
Returns a ValueError if the row doesn't exist.'''
for row in self._internal_table:
if row[0] == key:
return row[1:]
raise ValueError('Row not found')
def get_column(self, key):
'''Gets a list containing all elements of the column specified by key.
Returns a ValueError if the column doesn't exist.'''
for i, column in enumerate(self._internal_table[0]):
if column == key:
return [row[i] for row in self._internal_table[1:]]
raise ValueError('Column not found')
def __getitem__(self, key):
'''Returns the row or column linked to the given key, accessed using
subscript notation.'''
if not isinstance(key, str):
raise TypeError('Key must be a string')
try:
return self.get_row(key)
except ValueError:
try:
return self.get_column(key)
except ValueError:
raise ValueError('Key not found in table')
|
<commit_before><commit_msg>Create Table object for representing phonetic inventory<commit_after>
|
import csv
class Table():
'''A Table is an object which represents a 2-dimensional CSV file. Both rows
and columns can be accessed via their key as in a dictionary. This means that
keys cannot appear as both a row and column label.'''
def __init__(self, filename):
self._internal_table = self.load_from_filename(filename)
def load_from_filename(self, filename):
'''Load a CSV file into a list of lists. The following CSV:
,a,b,c
d,1,2,3
e,4,5,6
f,7,8,9
would become the list:
[['', 'a', 'b', 'c'],
['d', '1', '2', '3'] ...]'''
with open(filename, 'r') as f:
reader = csv.reader(f)
return [row for row in reader]
def get_row(self, key):
'''Gets a list containing all elements of the row specified by key.
Returns a ValueError if the row doesn't exist.'''
for row in self._internal_table:
if row[0] == key:
return row[1:]
raise ValueError('Row not found')
def get_column(self, key):
'''Gets a list containing all elements of the column specified by key.
Returns a ValueError if the column doesn't exist.'''
for i, column in enumerate(self._internal_table[0]):
if column == key:
return [row[i] for row in self._internal_table[1:]]
raise ValueError('Column not found')
def __getitem__(self, key):
'''Returns the row or column linked to the given key, accessed using
subscript notation.'''
if not isinstance(key, str):
raise TypeError('Key must be a string')
try:
return self.get_row(key)
except ValueError:
try:
return self.get_column(key)
except ValueError:
raise ValueError('Key not found in table')
|
Create Table object for representing phonetic inventoryimport csv
class Table():
'''A Table is an object which represents a 2-dimensional CSV file. Both rows
and columns can be accessed via their key as in a dictionary. This means that
keys cannot appear as both a row and column label.'''
def __init__(self, filename):
self._internal_table = self.load_from_filename(filename)
def load_from_filename(self, filename):
'''Load a CSV file into a list of lists. The following CSV:
,a,b,c
d,1,2,3
e,4,5,6
f,7,8,9
would become the list:
[['', 'a', 'b', 'c'],
['d', '1', '2', '3'] ...]'''
with open(filename, 'r') as f:
reader = csv.reader(f)
return [row for row in reader]
def get_row(self, key):
'''Gets a list containing all elements of the row specified by key.
Returns a ValueError if the row doesn't exist.'''
for row in self._internal_table:
if row[0] == key:
return row[1:]
raise ValueError('Row not found')
def get_column(self, key):
'''Gets a list containing all elements of the column specified by key.
Returns a ValueError if the column doesn't exist.'''
for i, column in enumerate(self._internal_table[0]):
if column == key:
return [row[i] for row in self._internal_table[1:]]
raise ValueError('Column not found')
def __getitem__(self, key):
'''Returns the row or column linked to the given key, accessed using
subscript notation.'''
if not isinstance(key, str):
raise TypeError('Key must be a string')
try:
return self.get_row(key)
except ValueError:
try:
return self.get_column(key)
except ValueError:
raise ValueError('Key not found in table')
|
<commit_before><commit_msg>Create Table object for representing phonetic inventory<commit_after>import csv
class Table():
'''A Table is an object which represents a 2-dimensional CSV file. Both rows
and columns can be accessed via their key as in a dictionary. This means that
keys cannot appear as both a row and column label.'''
def __init__(self, filename):
self._internal_table = self.load_from_filename(filename)
def load_from_filename(self, filename):
'''Load a CSV file into a list of lists. The following CSV:
,a,b,c
d,1,2,3
e,4,5,6
f,7,8,9
would become the list:
[['', 'a', 'b', 'c'],
['d', '1', '2', '3'] ...]'''
with open(filename, 'r') as f:
reader = csv.reader(f)
return [row for row in reader]
def get_row(self, key):
'''Gets a list containing all elements of the row specified by key.
Returns a ValueError if the row doesn't exist.'''
for row in self._internal_table:
if row[0] == key:
return row[1:]
raise ValueError('Row not found')
def get_column(self, key):
'''Gets a list containing all elements of the column specified by key.
Returns a ValueError if the column doesn't exist.'''
for i, column in enumerate(self._internal_table[0]):
if column == key:
return [row[i] for row in self._internal_table[1:]]
raise ValueError('Column not found')
def __getitem__(self, key):
'''Returns the row or column linked to the given key, accessed using
subscript notation.'''
if not isinstance(key, str):
raise TypeError('Key must be a string')
try:
return self.get_row(key)
except ValueError:
try:
return self.get_column(key)
except ValueError:
raise ValueError('Key not found in table')
|
|
cdaeb420e0cd817ebf570d5eda46362f4c61c691
|
tests/chainer_tests/functions_tests/utils_tests/test_forget.py
|
tests/chainer_tests/functions_tests/utils_tests/test_forget.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import gradient_check
from chainer import testing
class TestForget(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.y = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.gz = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
def check_forward(self, x_data, y_data):
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
z = functions.forget(lambda x, y: (x + y + x,), x, y)
gradient_check.assert_allclose(x_data + y_data + x_data, z.data)
def test_forward_cpu(self):
self.check_forward(self.x, self.y)
def check_backward(self, x_data, y_data, gz_data):
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
z = functions.forget(lambda x, y: (x + y + x,), x, y)
z.grad = gz_data
z.backward()
gradient_check.assert_allclose(x.grad, gz_data * 2)
gradient_check.assert_allclose(y.grad, gz_data)
def test_backward_cpu(self):
self.check_backward(self.x, self.y, self.gz)
class TestForgetError(unittest.TestCase):
def test_not_callable(self):
with self.assertRaises(TypeError):
functions.forget(1)
def test_invalid_type(self):
with self.assertRaisesRegexp(RuntimeError, 'int'):
functions.forget(lambda: 1)
def test_invalid_tuple_type(self):
with self.assertRaisesRegexp(RuntimeError, '1-th.*int'):
functions.forget(lambda: (1,))
testing.run_module(__name__, __file__)
|
Add test for forget function
|
Add test for forget function
|
Python
|
mit
|
wkentaro/chainer,keisuke-umezawa/chainer,okuta/chainer,wkentaro/chainer,hvy/chainer,niboshi/chainer,hvy/chainer,niboshi/chainer,hvy/chainer,niboshi/chainer,kiyukuta/chainer,okuta/chainer,okuta/chainer,cupy/cupy,keisuke-umezawa/chainer,ronekko/chainer,ktnyt/chainer,jnishi/chainer,hvy/chainer,tkerola/chainer,keisuke-umezawa/chainer,chainer/chainer,jnishi/chainer,okuta/chainer,wkentaro/chainer,pfnet/chainer,cupy/cupy,keisuke-umezawa/chainer,rezoo/chainer,aonotas/chainer,chainer/chainer,ysekky/chainer,delta2323/chainer,anaruse/chainer,chainer/chainer,kashif/chainer,niboshi/chainer,cupy/cupy,cupy/cupy,wkentaro/chainer,ktnyt/chainer,ktnyt/chainer,jnishi/chainer,jnishi/chainer,chainer/chainer,ktnyt/chainer
|
Add test for forget function
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import gradient_check
from chainer import testing
class TestForget(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.y = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.gz = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
def check_forward(self, x_data, y_data):
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
z = functions.forget(lambda x, y: (x + y + x,), x, y)
gradient_check.assert_allclose(x_data + y_data + x_data, z.data)
def test_forward_cpu(self):
self.check_forward(self.x, self.y)
def check_backward(self, x_data, y_data, gz_data):
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
z = functions.forget(lambda x, y: (x + y + x,), x, y)
z.grad = gz_data
z.backward()
gradient_check.assert_allclose(x.grad, gz_data * 2)
gradient_check.assert_allclose(y.grad, gz_data)
def test_backward_cpu(self):
self.check_backward(self.x, self.y, self.gz)
class TestForgetError(unittest.TestCase):
def test_not_callable(self):
with self.assertRaises(TypeError):
functions.forget(1)
def test_invalid_type(self):
with self.assertRaisesRegexp(RuntimeError, 'int'):
functions.forget(lambda: 1)
def test_invalid_tuple_type(self):
with self.assertRaisesRegexp(RuntimeError, '1-th.*int'):
functions.forget(lambda: (1,))
testing.run_module(__name__, __file__)
|
<commit_before><commit_msg>Add test for forget function<commit_after>
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import gradient_check
from chainer import testing
class TestForget(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.y = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.gz = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
def check_forward(self, x_data, y_data):
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
z = functions.forget(lambda x, y: (x + y + x,), x, y)
gradient_check.assert_allclose(x_data + y_data + x_data, z.data)
def test_forward_cpu(self):
self.check_forward(self.x, self.y)
def check_backward(self, x_data, y_data, gz_data):
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
z = functions.forget(lambda x, y: (x + y + x,), x, y)
z.grad = gz_data
z.backward()
gradient_check.assert_allclose(x.grad, gz_data * 2)
gradient_check.assert_allclose(y.grad, gz_data)
def test_backward_cpu(self):
self.check_backward(self.x, self.y, self.gz)
class TestForgetError(unittest.TestCase):
def test_not_callable(self):
with self.assertRaises(TypeError):
functions.forget(1)
def test_invalid_type(self):
with self.assertRaisesRegexp(RuntimeError, 'int'):
functions.forget(lambda: 1)
def test_invalid_tuple_type(self):
with self.assertRaisesRegexp(RuntimeError, '1-th.*int'):
functions.forget(lambda: (1,))
testing.run_module(__name__, __file__)
|
Add test for forget functionimport unittest
import numpy
import chainer
from chainer import functions
from chainer import gradient_check
from chainer import testing
class TestForget(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.y = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.gz = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
def check_forward(self, x_data, y_data):
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
z = functions.forget(lambda x, y: (x + y + x,), x, y)
gradient_check.assert_allclose(x_data + y_data + x_data, z.data)
def test_forward_cpu(self):
self.check_forward(self.x, self.y)
def check_backward(self, x_data, y_data, gz_data):
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
z = functions.forget(lambda x, y: (x + y + x,), x, y)
z.grad = gz_data
z.backward()
gradient_check.assert_allclose(x.grad, gz_data * 2)
gradient_check.assert_allclose(y.grad, gz_data)
def test_backward_cpu(self):
self.check_backward(self.x, self.y, self.gz)
class TestForgetError(unittest.TestCase):
def test_not_callable(self):
with self.assertRaises(TypeError):
functions.forget(1)
def test_invalid_type(self):
with self.assertRaisesRegexp(RuntimeError, 'int'):
functions.forget(lambda: 1)
def test_invalid_tuple_type(self):
with self.assertRaisesRegexp(RuntimeError, '1-th.*int'):
functions.forget(lambda: (1,))
testing.run_module(__name__, __file__)
|
<commit_before><commit_msg>Add test for forget function<commit_after>import unittest
import numpy
import chainer
from chainer import functions
from chainer import gradient_check
from chainer import testing
class TestForget(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.y = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.gz = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
def check_forward(self, x_data, y_data):
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
z = functions.forget(lambda x, y: (x + y + x,), x, y)
gradient_check.assert_allclose(x_data + y_data + x_data, z.data)
def test_forward_cpu(self):
self.check_forward(self.x, self.y)
def check_backward(self, x_data, y_data, gz_data):
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
z = functions.forget(lambda x, y: (x + y + x,), x, y)
z.grad = gz_data
z.backward()
gradient_check.assert_allclose(x.grad, gz_data * 2)
gradient_check.assert_allclose(y.grad, gz_data)
def test_backward_cpu(self):
self.check_backward(self.x, self.y, self.gz)
class TestForgetError(unittest.TestCase):
def test_not_callable(self):
with self.assertRaises(TypeError):
functions.forget(1)
def test_invalid_type(self):
with self.assertRaisesRegexp(RuntimeError, 'int'):
functions.forget(lambda: 1)
def test_invalid_tuple_type(self):
with self.assertRaisesRegexp(RuntimeError, '1-th.*int'):
functions.forget(lambda: (1,))
testing.run_module(__name__, __file__)
|
|
d01ad432080446d94028f3d1de76692354871a77
|
bin/change-version.py
|
bin/change-version.py
|
#!/usr/bin/env python
import glob
import shutil
import sys
def change(filename, oldversion, newversion):
tempfile = filename + '.tmp'
fi = open(filename, 'r')
fo = open(tempfile, 'w')
for line in fi:
nl = line.replace(oldversion, newversion)
fo.write(nl)
fo.close()
fi.close()
shutil.copymode(filename, tempfile)
shutil.move(tempfile, filename)
print "Processed " + filename
if len(sys.argv) < 3:
print """
Usage: bin/change-version.py <old-version> <new-version>
"""
sys.exit(-1)
oldversion = sys.argv[1]
newversion = sys.argv[2]
files = [
'bin/common.sh',
'commons/build.sbt',
'dist/bin/*',
'dist/conf/predictionio.conf',
'output/build.sbt',
'process/commons/hadoop/scalding/build.sbt',
'process/engines/itemrec/algorithms/hadoop/scalding/build.sbt',
'process/engines/itemrec/algorithms/hadoop/scalding/*/build.sbt',
'process/engines/itemrec/algorithms/scala/mahout/build.sbt',
'process/engines/itemrec/algorithms/scala/mahout/commons/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/metrics/map/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/trainingtestsplit/build.sbt',
'process/engines/itemrec/evaluations/scala/*/build.sbt',
'process/engines/itemsim/algorithms/hadoop/scalding/build.sbt',
'process/engines/itemsim/algorithms/hadoop/scalding/itemsimcf/build.sbt',
'servers/*/project/Build.scala',
'servers/scheduler/conf/application.conf',
'tools/*/build.sbt',
'tools/migration/*/*/build.sbt'
]
for f in files:
for rf in glob.glob(f):
change(rf, oldversion, newversion)
|
Add script to change versions of all components
|
Add script to change versions of all components
|
Python
|
apache-2.0
|
zafarella/PredictionIO,dszeto/incubator-predictionio,net-shell/PredictionIO,alex9311/PredictionIO,arudenko/PredictionIO,thiagoveras/PredictionIO,himanshudhami/PredictionIO,jasonchaffee/PredictionIO,akaash-nigam/PredictionIO,wangmiao1981/PredictionIO,jlegendary/PredictionIO,tuxdna/PredictionIO,thiagoveras/PredictionIO,michaelshing/PredictionIO,marevol/incubator-predictionio,jlegendary/PredictionIO,wenaz/PredictionIO,net-shell/PredictionIO,elkingtonmcb/PredictionIO,Ribeiro/PredictionIO,himanshudhami/PredictionIO,takezoe/incubator-predictionio,indranig/PredictionIO,skmezanul/PredictionIO,ch33hau/PredictionIO,ydanilenko/PredictionIO,doron123/PredictionIO,rsganesh83/PredictionIO,pferrel/PredictionIO,nvoron23/PredictionIO,beni55/PredictionIO,jingyidata/PredictionIO,marevol/incubator-predictionio,nvoron23/PredictionIO,biddyweb/PredictionIO,adamharish/PredictionIO,dszeto/incubator-predictionio,tuxdna/PredictionIO,sekaiamber/PredictionIO,prmdsharma/PredictionIO,thiagoveras/PredictionIO,codingang/PredictionIO,atyenoria/PredictionIO,biddyweb/PredictionIO,marevol/incubator-predictionio,elkingtonmcb/PredictionIO,skmezanul/PredictionIO,codingang/PredictionIO,BuildAPE/PredictionIO,doron123/PredictionIO,TheDataShed/PredictionIO,PredictionIO/PredictionIO,cristiancrc/PredictionIO,initChan/PredictionIO,adamharish/PredictionIO,ch33hau/PredictionIO,doron123/PredictionIO,sevenihust/PredictionIO,hsavit1/PredictionIO,indranig/PredictionIO,adamharish/PredictionIO,PredictionIO/PredictionIO,sekaiamber/PredictionIO,BojianLi/PredictionIO,shimamoto/incubator-predictionio,takezoe/incubator-predictionio,druzbikova/PredictionIO,hsavit1/PredictionIO,druzbikova/PredictionIO,TheDataShed/PredictionIO,initChan/PredictionIO,TheDataShed/PredictionIO,beni55/PredictionIO,ch33hau/PredictionIO,mars/incubator-predictionio,schon/PredictionIO,nvoron23/PredictionIO,ionux/PredictionIO,Emaasit/PredictionIO,prmdsharma/PredictionIO,zafarella/PredictionIO,Ribeiro/PredictionIO,b-cuts/PredictionIO,rsganesh83/PredictionIO,wangmiao1981/PredictionIO,mars/incubator-predictionio,akaash-nigam/PredictionIO,prmdsharma/PredictionIO,b-cuts/PredictionIO,jasonchaffee/PredictionIO,tastatur/PredictionIO,codingang/PredictionIO,doron123/PredictionIO,indranig/PredictionIO,jlegendary/PredictionIO,cristiancrc/PredictionIO,jlegendary/PredictionIO,elkingtonmcb/PredictionIO,BojianLi/PredictionIO,shimamoto/incubator-predictionio,atyenoria/PredictionIO,takezoe/incubator-predictionio,doron123/PredictionIO,akaash-nigam/PredictionIO,sevenihust/PredictionIO,TheDataShed/PredictionIO,EmergentOrder/PredictionIO,net-shell/PredictionIO,ionux/PredictionIO,sevenihust/PredictionIO,shimamoto/incubator-predictionio,nvoron23/PredictionIO,himanshudhami/PredictionIO,akaash-nigam/PredictionIO,wangmiao1981/PredictionIO,rbo7nik/PredictionIO,nvoron23/PredictionIO,EmergentOrder/PredictionIO,PredictionIO/PredictionIO,jingyidata/PredictionIO,jasonchaffee/PredictionIO,BojianLi/PredictionIO,djeraseit/PredictionIO,schon/PredictionIO,indranig/PredictionIO,clemp6r/PredictionIO,atyenoria/PredictionIO,beni55/PredictionIO,takeshineshiro/PredictionIO,rbo7nik/PredictionIO,akaash-nigam/PredictionIO,stephen-corgiat/PredictionIO,stephen-corgiat/PredictionIO,cristiancrc/PredictionIO,Ribeiro/PredictionIO,codingang/PredictionIO,alex9311/PredictionIO,wenaz/PredictionIO,takeshineshiro/PredictionIO,atyenoria/PredictionIO,takeshineshiro/PredictionIO,net-shell/PredictionIO,shimamoto/incubator-predictionio,ionux/PredictionIO,EmergentOrder/PredictionIO,initChan/PredictionIO,BojianLi/PredictionIO,indranig/PredictionIO,codingang/PredictionIO,tastatur/PredictionIO,ydanilenko/PredictionIO,rbo7nik/PredictionIO,Emaasit/PredictionIO,BuildAPE/PredictionIO,clemp6r/PredictionIO,BuildAPE/PredictionIO,stephen-corgiat/PredictionIO,takezoe/incubator-predictionio,tuxdna/PredictionIO,pferrel/PredictionIO,zafarella/PredictionIO,druzbikova/PredictionIO,michaelshing/PredictionIO,biddyweb/PredictionIO,djeraseit/PredictionIO,hsavit1/PredictionIO,skmezanul/PredictionIO,dszeto/incubator-predictionio,tastatur/PredictionIO,michaelshing/PredictionIO,Ribeiro/PredictionIO,rsganesh83/PredictionIO,Ribeiro/PredictionIO,arudenko/PredictionIO,mars/incubator-predictionio,arudenko/PredictionIO,jlegendary/PredictionIO,mars/incubator-predictionio,atyenoria/PredictionIO,alex9311/PredictionIO,TheDataShed/PredictionIO,prmdsharma/PredictionIO,ydanilenko/PredictionIO,djeraseit/PredictionIO,ydanilenko/PredictionIO,jingyidata/PredictionIO,Emaasit/PredictionIO,sekaiamber/PredictionIO,clemp6r/PredictionIO,wenaz/PredictionIO,BojianLi/PredictionIO,pferrel/PredictionIO,dszeto/incubator-predictionio,net-shell/PredictionIO,prmdsharma/PredictionIO,marevol/incubator-predictionio,b-cuts/PredictionIO,himanshudhami/PredictionIO,ydanilenko/PredictionIO,schon/PredictionIO
|
Add script to change versions of all components
|
#!/usr/bin/env python
import glob
import shutil
import sys
def change(filename, oldversion, newversion):
tempfile = filename + '.tmp'
fi = open(filename, 'r')
fo = open(tempfile, 'w')
for line in fi:
nl = line.replace(oldversion, newversion)
fo.write(nl)
fo.close()
fi.close()
shutil.copymode(filename, tempfile)
shutil.move(tempfile, filename)
print "Processed " + filename
if len(sys.argv) < 3:
print """
Usage: bin/change-version.py <old-version> <new-version>
"""
sys.exit(-1)
oldversion = sys.argv[1]
newversion = sys.argv[2]
files = [
'bin/common.sh',
'commons/build.sbt',
'dist/bin/*',
'dist/conf/predictionio.conf',
'output/build.sbt',
'process/commons/hadoop/scalding/build.sbt',
'process/engines/itemrec/algorithms/hadoop/scalding/build.sbt',
'process/engines/itemrec/algorithms/hadoop/scalding/*/build.sbt',
'process/engines/itemrec/algorithms/scala/mahout/build.sbt',
'process/engines/itemrec/algorithms/scala/mahout/commons/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/metrics/map/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/trainingtestsplit/build.sbt',
'process/engines/itemrec/evaluations/scala/*/build.sbt',
'process/engines/itemsim/algorithms/hadoop/scalding/build.sbt',
'process/engines/itemsim/algorithms/hadoop/scalding/itemsimcf/build.sbt',
'servers/*/project/Build.scala',
'servers/scheduler/conf/application.conf',
'tools/*/build.sbt',
'tools/migration/*/*/build.sbt'
]
for f in files:
for rf in glob.glob(f):
change(rf, oldversion, newversion)
|
<commit_before><commit_msg>Add script to change versions of all components<commit_after>
|
#!/usr/bin/env python
import glob
import shutil
import sys
def change(filename, oldversion, newversion):
tempfile = filename + '.tmp'
fi = open(filename, 'r')
fo = open(tempfile, 'w')
for line in fi:
nl = line.replace(oldversion, newversion)
fo.write(nl)
fo.close()
fi.close()
shutil.copymode(filename, tempfile)
shutil.move(tempfile, filename)
print "Processed " + filename
if len(sys.argv) < 3:
print """
Usage: bin/change-version.py <old-version> <new-version>
"""
sys.exit(-1)
oldversion = sys.argv[1]
newversion = sys.argv[2]
files = [
'bin/common.sh',
'commons/build.sbt',
'dist/bin/*',
'dist/conf/predictionio.conf',
'output/build.sbt',
'process/commons/hadoop/scalding/build.sbt',
'process/engines/itemrec/algorithms/hadoop/scalding/build.sbt',
'process/engines/itemrec/algorithms/hadoop/scalding/*/build.sbt',
'process/engines/itemrec/algorithms/scala/mahout/build.sbt',
'process/engines/itemrec/algorithms/scala/mahout/commons/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/metrics/map/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/trainingtestsplit/build.sbt',
'process/engines/itemrec/evaluations/scala/*/build.sbt',
'process/engines/itemsim/algorithms/hadoop/scalding/build.sbt',
'process/engines/itemsim/algorithms/hadoop/scalding/itemsimcf/build.sbt',
'servers/*/project/Build.scala',
'servers/scheduler/conf/application.conf',
'tools/*/build.sbt',
'tools/migration/*/*/build.sbt'
]
for f in files:
for rf in glob.glob(f):
change(rf, oldversion, newversion)
|
Add script to change versions of all components#!/usr/bin/env python
import glob
import shutil
import sys
def change(filename, oldversion, newversion):
tempfile = filename + '.tmp'
fi = open(filename, 'r')
fo = open(tempfile, 'w')
for line in fi:
nl = line.replace(oldversion, newversion)
fo.write(nl)
fo.close()
fi.close()
shutil.copymode(filename, tempfile)
shutil.move(tempfile, filename)
print "Processed " + filename
if len(sys.argv) < 3:
print """
Usage: bin/change-version.py <old-version> <new-version>
"""
sys.exit(-1)
oldversion = sys.argv[1]
newversion = sys.argv[2]
files = [
'bin/common.sh',
'commons/build.sbt',
'dist/bin/*',
'dist/conf/predictionio.conf',
'output/build.sbt',
'process/commons/hadoop/scalding/build.sbt',
'process/engines/itemrec/algorithms/hadoop/scalding/build.sbt',
'process/engines/itemrec/algorithms/hadoop/scalding/*/build.sbt',
'process/engines/itemrec/algorithms/scala/mahout/build.sbt',
'process/engines/itemrec/algorithms/scala/mahout/commons/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/metrics/map/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/trainingtestsplit/build.sbt',
'process/engines/itemrec/evaluations/scala/*/build.sbt',
'process/engines/itemsim/algorithms/hadoop/scalding/build.sbt',
'process/engines/itemsim/algorithms/hadoop/scalding/itemsimcf/build.sbt',
'servers/*/project/Build.scala',
'servers/scheduler/conf/application.conf',
'tools/*/build.sbt',
'tools/migration/*/*/build.sbt'
]
for f in files:
for rf in glob.glob(f):
change(rf, oldversion, newversion)
|
<commit_before><commit_msg>Add script to change versions of all components<commit_after>#!/usr/bin/env python
import glob
import shutil
import sys
def change(filename, oldversion, newversion):
tempfile = filename + '.tmp'
fi = open(filename, 'r')
fo = open(tempfile, 'w')
for line in fi:
nl = line.replace(oldversion, newversion)
fo.write(nl)
fo.close()
fi.close()
shutil.copymode(filename, tempfile)
shutil.move(tempfile, filename)
print "Processed " + filename
if len(sys.argv) < 3:
print """
Usage: bin/change-version.py <old-version> <new-version>
"""
sys.exit(-1)
oldversion = sys.argv[1]
newversion = sys.argv[2]
files = [
'bin/common.sh',
'commons/build.sbt',
'dist/bin/*',
'dist/conf/predictionio.conf',
'output/build.sbt',
'process/commons/hadoop/scalding/build.sbt',
'process/engines/itemrec/algorithms/hadoop/scalding/build.sbt',
'process/engines/itemrec/algorithms/hadoop/scalding/*/build.sbt',
'process/engines/itemrec/algorithms/scala/mahout/build.sbt',
'process/engines/itemrec/algorithms/scala/mahout/commons/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/metrics/map/build.sbt',
'process/engines/itemrec/evaluations/hadoop/scalding/trainingtestsplit/build.sbt',
'process/engines/itemrec/evaluations/scala/*/build.sbt',
'process/engines/itemsim/algorithms/hadoop/scalding/build.sbt',
'process/engines/itemsim/algorithms/hadoop/scalding/itemsimcf/build.sbt',
'servers/*/project/Build.scala',
'servers/scheduler/conf/application.conf',
'tools/*/build.sbt',
'tools/migration/*/*/build.sbt'
]
for f in files:
for rf in glob.glob(f):
change(rf, oldversion, newversion)
|
|
f3b57ef7713086a86f51a8ed98d52367b02c8959
|
project_fish/whats_fresh/test_models.py
|
project_fish/whats_fresh/test_models.py
|
from django.test import TestCase
from django.conf import settings
from whats_fresh.models import *
from django.contrib.gis.db import models
import os
import time
import sys
import datetime
class ImageTestCase(TestCase):
def setUp(self):
# Set MEDIA ROOT to sample data for this test
TEST_ROOT = os.path.abspath(os.path.dirname(__file__))
self._old_MEDIA_ROOT = settings.MEDIA_ROOT
settings.MEDIA_ROOT = os.path.join(TEST_ROOT, 'testdata/media/')
self.image = Image(image='cat.jpg', caption='Meow!')
# Set creation time variable to test if image.creation records properly
self.creation_time = datetime.datetime.now()
self.image.save()
def tearDown(self):
# reset MEDIA_ROOT
settings.MEDIA_ROOT = self._old_MEDIA_ROOT
def test_image_uploaded(self):
"""
Tests that the image was successfully uploaded
"""
self.assertEquals(self.image.image.url, '/media/cat.jpg')
self.assertEquals(self.image.caption, 'Meow!')
self.assertTrue(
self.image.created.replace(tzinfo=None) - self.creation_time <
datetime.timedelta(milliseconds = 10))
def test_replace_image(self):
"""
Tests that the image was properly replaced with a new image.
Also sets modified time variable for later testing to make
sure the time variables were properly set and saved.
"""
# Sleep 25 milliseconds so that the modified time won't be within
# The defined range of 10 milliseconds
time.sleep(0.025)
self.image.image = 'dog.jpg'
self.mod_time = datetime.datetime.now()
self.image.save()
self.assertEquals(self.image.image.url, '/media/dog.jpg')
self.assertTrue(
self.image.modified.replace(tzinfo=None) - self.mod_time <
datetime.timedelta(milliseconds = 10))
class PreparationsTestCase(TestCase):
def test_fields_exist(self):
model = models.get_model('whats_fresh', 'Preparation')
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('name')))
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('description')))
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('additional_info')))
def test_no_additional_fields(self):
fields = Preparation._meta.get_all_field_names()
expected_fields = ['name', 'description', 'additional_info', u'id']
self.assertTrue(sorted(fields) == sorted(expected_fields))
|
Make preparation model tests actually work
|
Make preparation model tests actually work
|
Python
|
apache-2.0
|
iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api,osu-cass/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,iCHAIT/whats-fresh-api
|
Make preparation model tests actually work
|
from django.test import TestCase
from django.conf import settings
from whats_fresh.models import *
from django.contrib.gis.db import models
import os
import time
import sys
import datetime
class ImageTestCase(TestCase):
def setUp(self):
# Set MEDIA ROOT to sample data for this test
TEST_ROOT = os.path.abspath(os.path.dirname(__file__))
self._old_MEDIA_ROOT = settings.MEDIA_ROOT
settings.MEDIA_ROOT = os.path.join(TEST_ROOT, 'testdata/media/')
self.image = Image(image='cat.jpg', caption='Meow!')
# Set creation time variable to test if image.creation records properly
self.creation_time = datetime.datetime.now()
self.image.save()
def tearDown(self):
# reset MEDIA_ROOT
settings.MEDIA_ROOT = self._old_MEDIA_ROOT
def test_image_uploaded(self):
"""
Tests that the image was successfully uploaded
"""
self.assertEquals(self.image.image.url, '/media/cat.jpg')
self.assertEquals(self.image.caption, 'Meow!')
self.assertTrue(
self.image.created.replace(tzinfo=None) - self.creation_time <
datetime.timedelta(milliseconds = 10))
def test_replace_image(self):
"""
Tests that the image was properly replaced with a new image.
Also sets modified time variable for later testing to make
sure the time variables were properly set and saved.
"""
# Sleep 25 milliseconds so that the modified time won't be within
# The defined range of 10 milliseconds
time.sleep(0.025)
self.image.image = 'dog.jpg'
self.mod_time = datetime.datetime.now()
self.image.save()
self.assertEquals(self.image.image.url, '/media/dog.jpg')
self.assertTrue(
self.image.modified.replace(tzinfo=None) - self.mod_time <
datetime.timedelta(milliseconds = 10))
class PreparationsTestCase(TestCase):
def test_fields_exist(self):
model = models.get_model('whats_fresh', 'Preparation')
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('name')))
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('description')))
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('additional_info')))
def test_no_additional_fields(self):
fields = Preparation._meta.get_all_field_names()
expected_fields = ['name', 'description', 'additional_info', u'id']
self.assertTrue(sorted(fields) == sorted(expected_fields))
|
<commit_before><commit_msg>Make preparation model tests actually work<commit_after>
|
from django.test import TestCase
from django.conf import settings
from whats_fresh.models import *
from django.contrib.gis.db import models
import os
import time
import sys
import datetime
class ImageTestCase(TestCase):
def setUp(self):
# Set MEDIA ROOT to sample data for this test
TEST_ROOT = os.path.abspath(os.path.dirname(__file__))
self._old_MEDIA_ROOT = settings.MEDIA_ROOT
settings.MEDIA_ROOT = os.path.join(TEST_ROOT, 'testdata/media/')
self.image = Image(image='cat.jpg', caption='Meow!')
# Set creation time variable to test if image.creation records properly
self.creation_time = datetime.datetime.now()
self.image.save()
def tearDown(self):
# reset MEDIA_ROOT
settings.MEDIA_ROOT = self._old_MEDIA_ROOT
def test_image_uploaded(self):
"""
Tests that the image was successfully uploaded
"""
self.assertEquals(self.image.image.url, '/media/cat.jpg')
self.assertEquals(self.image.caption, 'Meow!')
self.assertTrue(
self.image.created.replace(tzinfo=None) - self.creation_time <
datetime.timedelta(milliseconds = 10))
def test_replace_image(self):
"""
Tests that the image was properly replaced with a new image.
Also sets modified time variable for later testing to make
sure the time variables were properly set and saved.
"""
# Sleep 25 milliseconds so that the modified time won't be within
# The defined range of 10 milliseconds
time.sleep(0.025)
self.image.image = 'dog.jpg'
self.mod_time = datetime.datetime.now()
self.image.save()
self.assertEquals(self.image.image.url, '/media/dog.jpg')
self.assertTrue(
self.image.modified.replace(tzinfo=None) - self.mod_time <
datetime.timedelta(milliseconds = 10))
class PreparationsTestCase(TestCase):
def test_fields_exist(self):
model = models.get_model('whats_fresh', 'Preparation')
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('name')))
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('description')))
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('additional_info')))
def test_no_additional_fields(self):
fields = Preparation._meta.get_all_field_names()
expected_fields = ['name', 'description', 'additional_info', u'id']
self.assertTrue(sorted(fields) == sorted(expected_fields))
|
Make preparation model tests actually workfrom django.test import TestCase
from django.conf import settings
from whats_fresh.models import *
from django.contrib.gis.db import models
import os
import time
import sys
import datetime
class ImageTestCase(TestCase):
def setUp(self):
# Set MEDIA ROOT to sample data for this test
TEST_ROOT = os.path.abspath(os.path.dirname(__file__))
self._old_MEDIA_ROOT = settings.MEDIA_ROOT
settings.MEDIA_ROOT = os.path.join(TEST_ROOT, 'testdata/media/')
self.image = Image(image='cat.jpg', caption='Meow!')
# Set creation time variable to test if image.creation records properly
self.creation_time = datetime.datetime.now()
self.image.save()
def tearDown(self):
# reset MEDIA_ROOT
settings.MEDIA_ROOT = self._old_MEDIA_ROOT
def test_image_uploaded(self):
"""
Tests that the image was successfully uploaded
"""
self.assertEquals(self.image.image.url, '/media/cat.jpg')
self.assertEquals(self.image.caption, 'Meow!')
self.assertTrue(
self.image.created.replace(tzinfo=None) - self.creation_time <
datetime.timedelta(milliseconds = 10))
def test_replace_image(self):
"""
Tests that the image was properly replaced with a new image.
Also sets modified time variable for later testing to make
sure the time variables were properly set and saved.
"""
# Sleep 25 milliseconds so that the modified time won't be within
# The defined range of 10 milliseconds
time.sleep(0.025)
self.image.image = 'dog.jpg'
self.mod_time = datetime.datetime.now()
self.image.save()
self.assertEquals(self.image.image.url, '/media/dog.jpg')
self.assertTrue(
self.image.modified.replace(tzinfo=None) - self.mod_time <
datetime.timedelta(milliseconds = 10))
class PreparationsTestCase(TestCase):
def test_fields_exist(self):
model = models.get_model('whats_fresh', 'Preparation')
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('name')))
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('description')))
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('additional_info')))
def test_no_additional_fields(self):
fields = Preparation._meta.get_all_field_names()
expected_fields = ['name', 'description', 'additional_info', u'id']
self.assertTrue(sorted(fields) == sorted(expected_fields))
|
<commit_before><commit_msg>Make preparation model tests actually work<commit_after>from django.test import TestCase
from django.conf import settings
from whats_fresh.models import *
from django.contrib.gis.db import models
import os
import time
import sys
import datetime
class ImageTestCase(TestCase):
def setUp(self):
# Set MEDIA ROOT to sample data for this test
TEST_ROOT = os.path.abspath(os.path.dirname(__file__))
self._old_MEDIA_ROOT = settings.MEDIA_ROOT
settings.MEDIA_ROOT = os.path.join(TEST_ROOT, 'testdata/media/')
self.image = Image(image='cat.jpg', caption='Meow!')
# Set creation time variable to test if image.creation records properly
self.creation_time = datetime.datetime.now()
self.image.save()
def tearDown(self):
# reset MEDIA_ROOT
settings.MEDIA_ROOT = self._old_MEDIA_ROOT
def test_image_uploaded(self):
"""
Tests that the image was successfully uploaded
"""
self.assertEquals(self.image.image.url, '/media/cat.jpg')
self.assertEquals(self.image.caption, 'Meow!')
self.assertTrue(
self.image.created.replace(tzinfo=None) - self.creation_time <
datetime.timedelta(milliseconds = 10))
def test_replace_image(self):
"""
Tests that the image was properly replaced with a new image.
Also sets modified time variable for later testing to make
sure the time variables were properly set and saved.
"""
# Sleep 25 milliseconds so that the modified time won't be within
# The defined range of 10 milliseconds
time.sleep(0.025)
self.image.image = 'dog.jpg'
self.mod_time = datetime.datetime.now()
self.image.save()
self.assertEquals(self.image.image.url, '/media/dog.jpg')
self.assertTrue(
self.image.modified.replace(tzinfo=None) - self.mod_time <
datetime.timedelta(milliseconds = 10))
class PreparationsTestCase(TestCase):
def test_fields_exist(self):
model = models.get_model('whats_fresh', 'Preparation')
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('name')))
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('description')))
self.assertEqual(
models.TextField,
type(model._meta.get_field_by_name('additional_info')))
def test_no_additional_fields(self):
fields = Preparation._meta.get_all_field_names()
expected_fields = ['name', 'description', 'additional_info', u'id']
self.assertTrue(sorted(fields) == sorted(expected_fields))
|
|
7106568947142b472498def88d8cef59dd296934
|
ureport/contacts/migrations/0006_auto_20151007_1358.py
|
ureport/contacts/migrations/0006_auto_20151007_1358.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.cache import cache
def clear_contacts(apps, schema_editor):
Contact = apps.get_model('contacts', 'Contact')
# delete fetched contacts
Contact.objects.all().delete()
# clear redis cache and locks to allow the next task to fetch all the contacts
cache.delete_pattern('last:fetch_contacts:*')
cache.delete_pattern('fetch_contacts')
cache.delete_pattern('fetch_contacts*')
class Migration(migrations.Migration):
dependencies = [
('contacts', '0005_auto_20150921_0855'),
]
operations = [
migrations.RunPython(clear_contacts),
]
|
Add migrations to clear current contacts objects, and their redis keys
|
Add migrations to clear current contacts objects, and their redis keys
|
Python
|
agpl-3.0
|
xkmato/ureport,auduaboki/ureport,Ilhasoft/ureport,eHealthAfrica/ureport,Ilhasoft/ureport,xkmato/ureport,eHealthAfrica/ureport,rapidpro/ureport,rapidpro/ureport,auduaboki/ureport,Ilhasoft/ureport,eHealthAfrica/ureport,rapidpro/ureport,auduaboki/ureport,Ilhasoft/ureport,rapidpro/ureport,xkmato/ureport
|
Add migrations to clear current contacts objects, and their redis keys
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.cache import cache
def clear_contacts(apps, schema_editor):
Contact = apps.get_model('contacts', 'Contact')
# delete fetched contacts
Contact.objects.all().delete()
# clear redis cache and locks to allow the next task to fetch all the contacts
cache.delete_pattern('last:fetch_contacts:*')
cache.delete_pattern('fetch_contacts')
cache.delete_pattern('fetch_contacts*')
class Migration(migrations.Migration):
dependencies = [
('contacts', '0005_auto_20150921_0855'),
]
operations = [
migrations.RunPython(clear_contacts),
]
|
<commit_before><commit_msg>Add migrations to clear current contacts objects, and their redis keys<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.cache import cache
def clear_contacts(apps, schema_editor):
Contact = apps.get_model('contacts', 'Contact')
# delete fetched contacts
Contact.objects.all().delete()
# clear redis cache and locks to allow the next task to fetch all the contacts
cache.delete_pattern('last:fetch_contacts:*')
cache.delete_pattern('fetch_contacts')
cache.delete_pattern('fetch_contacts*')
class Migration(migrations.Migration):
dependencies = [
('contacts', '0005_auto_20150921_0855'),
]
operations = [
migrations.RunPython(clear_contacts),
]
|
Add migrations to clear current contacts objects, and their redis keys# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.cache import cache
def clear_contacts(apps, schema_editor):
Contact = apps.get_model('contacts', 'Contact')
# delete fetched contacts
Contact.objects.all().delete()
# clear redis cache and locks to allow the next task to fetch all the contacts
cache.delete_pattern('last:fetch_contacts:*')
cache.delete_pattern('fetch_contacts')
cache.delete_pattern('fetch_contacts*')
class Migration(migrations.Migration):
dependencies = [
('contacts', '0005_auto_20150921_0855'),
]
operations = [
migrations.RunPython(clear_contacts),
]
|
<commit_before><commit_msg>Add migrations to clear current contacts objects, and their redis keys<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.cache import cache
def clear_contacts(apps, schema_editor):
Contact = apps.get_model('contacts', 'Contact')
# delete fetched contacts
Contact.objects.all().delete()
# clear redis cache and locks to allow the next task to fetch all the contacts
cache.delete_pattern('last:fetch_contacts:*')
cache.delete_pattern('fetch_contacts')
cache.delete_pattern('fetch_contacts*')
class Migration(migrations.Migration):
dependencies = [
('contacts', '0005_auto_20150921_0855'),
]
operations = [
migrations.RunPython(clear_contacts),
]
|
|
a2565d27a8fcfd4c1bfa414a39d7f90b99f5ece3
|
top40.py
|
top40.py
|
import click
import requests
url = 'http://ben-major.co.uk/labs/top40/api/singles/'
response = requests.get(url)
print response.json()
|
Print basic json chart on command line
|
Print basic json chart on command line
|
Python
|
mit
|
kevgathuku/top40,andela-kndungu/top40
|
Print basic json chart on command line
|
import click
import requests
url = 'http://ben-major.co.uk/labs/top40/api/singles/'
response = requests.get(url)
print response.json()
|
<commit_before><commit_msg>Print basic json chart on command line<commit_after>
|
import click
import requests
url = 'http://ben-major.co.uk/labs/top40/api/singles/'
response = requests.get(url)
print response.json()
|
Print basic json chart on command lineimport click
import requests
url = 'http://ben-major.co.uk/labs/top40/api/singles/'
response = requests.get(url)
print response.json()
|
<commit_before><commit_msg>Print basic json chart on command line<commit_after>import click
import requests
url = 'http://ben-major.co.uk/labs/top40/api/singles/'
response = requests.get(url)
print response.json()
|
|
1ba3c03e6e68596dd969980af2c07d1d90ba83f6
|
examples/scf/16-h2_scan.py
|
examples/scf/16-h2_scan.py
|
#!/usr/bin/env python
'''
Scan H2 molecule dissociation curve.
'''
import numpy
from pyscf import scf
from pyscf import gto
ehf = []
dm = None
for b in numpy.arange(0.7, 4.01, 0.1):
mol = gto.M(atom=[["H", 0., 0., 0.],
["H", 0., 0., b ]], basis='ccpvdz', verbose=0)
mf = scf.RHF(mol)
ehf.append(mf.kernel(dm))
dm = mf.make_rdm1()
print('R E(HF)')
for i, b in enumerate(numpy.arange(0.7, 4.01, 0.1)):
print('%.2f %.8f' % (b, ehf[i]))
|
Add example of HF PES scanning
|
Add example of HF PES scanning
|
Python
|
apache-2.0
|
sunqm/pyscf,sunqm/pyscf,gkc1000/pyscf,gkc1000/pyscf,gkc1000/pyscf,gkc1000/pyscf,sunqm/pyscf,gkc1000/pyscf,sunqm/pyscf
|
Add example of HF PES scanning
|
#!/usr/bin/env python
'''
Scan H2 molecule dissociation curve.
'''
import numpy
from pyscf import scf
from pyscf import gto
ehf = []
dm = None
for b in numpy.arange(0.7, 4.01, 0.1):
mol = gto.M(atom=[["H", 0., 0., 0.],
["H", 0., 0., b ]], basis='ccpvdz', verbose=0)
mf = scf.RHF(mol)
ehf.append(mf.kernel(dm))
dm = mf.make_rdm1()
print('R E(HF)')
for i, b in enumerate(numpy.arange(0.7, 4.01, 0.1)):
print('%.2f %.8f' % (b, ehf[i]))
|
<commit_before><commit_msg>Add example of HF PES scanning<commit_after>
|
#!/usr/bin/env python
'''
Scan H2 molecule dissociation curve.
'''
import numpy
from pyscf import scf
from pyscf import gto
ehf = []
dm = None
for b in numpy.arange(0.7, 4.01, 0.1):
mol = gto.M(atom=[["H", 0., 0., 0.],
["H", 0., 0., b ]], basis='ccpvdz', verbose=0)
mf = scf.RHF(mol)
ehf.append(mf.kernel(dm))
dm = mf.make_rdm1()
print('R E(HF)')
for i, b in enumerate(numpy.arange(0.7, 4.01, 0.1)):
print('%.2f %.8f' % (b, ehf[i]))
|
Add example of HF PES scanning#!/usr/bin/env python
'''
Scan H2 molecule dissociation curve.
'''
import numpy
from pyscf import scf
from pyscf import gto
ehf = []
dm = None
for b in numpy.arange(0.7, 4.01, 0.1):
mol = gto.M(atom=[["H", 0., 0., 0.],
["H", 0., 0., b ]], basis='ccpvdz', verbose=0)
mf = scf.RHF(mol)
ehf.append(mf.kernel(dm))
dm = mf.make_rdm1()
print('R E(HF)')
for i, b in enumerate(numpy.arange(0.7, 4.01, 0.1)):
print('%.2f %.8f' % (b, ehf[i]))
|
<commit_before><commit_msg>Add example of HF PES scanning<commit_after>#!/usr/bin/env python
'''
Scan H2 molecule dissociation curve.
'''
import numpy
from pyscf import scf
from pyscf import gto
ehf = []
dm = None
for b in numpy.arange(0.7, 4.01, 0.1):
mol = gto.M(atom=[["H", 0., 0., 0.],
["H", 0., 0., b ]], basis='ccpvdz', verbose=0)
mf = scf.RHF(mol)
ehf.append(mf.kernel(dm))
dm = mf.make_rdm1()
print('R E(HF)')
for i, b in enumerate(numpy.arange(0.7, 4.01, 0.1)):
print('%.2f %.8f' % (b, ehf[i]))
|
|
442e141ba3695e32d19cb6a263c63a4d6c31de90
|
examples/todo_app/setup.py
|
examples/todo_app/setup.py
|
from distutils.core import setup
import sys
import os
import shutil
def tree(src):
return [(root, map(lambda f: os.path.join(root, f), filter(lambda f: os.path.splitext(f)[1] != ".map", files))) for (root, dirs, files) in os.walk(os.path.normpath(src))]
APP = ['index.py']
DATA_FILES = tree('assets')
OPTIONS_OSX = {'argv_emulation': False,
'strip': True,
'includes': ['WebKit', 'Foundation', 'webview']}
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('dist'):
shutil.rmtree('dist')
if sys.platform == 'darwin':
import py2app
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS_OSX},
setup_requires=['py2app'],
)
|
Add py2app script to todos_app
|
Add py2app script to todos_app
|
Python
|
bsd-3-clause
|
shivaprsdv/pywebview,shivaprsdv/pywebview,r0x0r/pywebview,shivaprsdv/pywebview,r0x0r/pywebview,shivaprsdv/pywebview,r0x0r/pywebview,r0x0r/pywebview,r0x0r/pywebview
|
Add py2app script to todos_app
|
from distutils.core import setup
import sys
import os
import shutil
def tree(src):
return [(root, map(lambda f: os.path.join(root, f), filter(lambda f: os.path.splitext(f)[1] != ".map", files))) for (root, dirs, files) in os.walk(os.path.normpath(src))]
APP = ['index.py']
DATA_FILES = tree('assets')
OPTIONS_OSX = {'argv_emulation': False,
'strip': True,
'includes': ['WebKit', 'Foundation', 'webview']}
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('dist'):
shutil.rmtree('dist')
if sys.platform == 'darwin':
import py2app
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS_OSX},
setup_requires=['py2app'],
)
|
<commit_before><commit_msg>Add py2app script to todos_app<commit_after>
|
from distutils.core import setup
import sys
import os
import shutil
def tree(src):
return [(root, map(lambda f: os.path.join(root, f), filter(lambda f: os.path.splitext(f)[1] != ".map", files))) for (root, dirs, files) in os.walk(os.path.normpath(src))]
APP = ['index.py']
DATA_FILES = tree('assets')
OPTIONS_OSX = {'argv_emulation': False,
'strip': True,
'includes': ['WebKit', 'Foundation', 'webview']}
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('dist'):
shutil.rmtree('dist')
if sys.platform == 'darwin':
import py2app
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS_OSX},
setup_requires=['py2app'],
)
|
Add py2app script to todos_appfrom distutils.core import setup
import sys
import os
import shutil
def tree(src):
return [(root, map(lambda f: os.path.join(root, f), filter(lambda f: os.path.splitext(f)[1] != ".map", files))) for (root, dirs, files) in os.walk(os.path.normpath(src))]
APP = ['index.py']
DATA_FILES = tree('assets')
OPTIONS_OSX = {'argv_emulation': False,
'strip': True,
'includes': ['WebKit', 'Foundation', 'webview']}
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('dist'):
shutil.rmtree('dist')
if sys.platform == 'darwin':
import py2app
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS_OSX},
setup_requires=['py2app'],
)
|
<commit_before><commit_msg>Add py2app script to todos_app<commit_after>from distutils.core import setup
import sys
import os
import shutil
def tree(src):
return [(root, map(lambda f: os.path.join(root, f), filter(lambda f: os.path.splitext(f)[1] != ".map", files))) for (root, dirs, files) in os.walk(os.path.normpath(src))]
APP = ['index.py']
DATA_FILES = tree('assets')
OPTIONS_OSX = {'argv_emulation': False,
'strip': True,
'includes': ['WebKit', 'Foundation', 'webview']}
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('dist'):
shutil.rmtree('dist')
if sys.platform == 'darwin':
import py2app
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS_OSX},
setup_requires=['py2app'],
)
|
|
d3bbdce883b2a3880970f1923367ca8498b34a8d
|
tempest/tests/services/compute/test_floating_ip_pools_client.py
|
tempest/tests/services/compute/test_floating_ip_pools_client.py
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import floating_ip_pools_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestFloatingIPPoolsClient(base.BaseComputeServiceTest):
FAKE_FLOATING_IP_POOLS = {
"floating_ip_pools":
[
{"name": u'\u3042'},
{"name": u'\u3044'}
]
}
def setUp(self):
super(TestFloatingIPPoolsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = floating_ip_pools_client.FloatingIPPoolsClient(
fake_auth, 'compute', 'regionOne')
def test_list_floating_ip_pools_with_str_body(self):
self.check_service_client_function(
self.client.list_floating_ip_pools,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_FLOATING_IP_POOLS)
def test_list_floating_ip_pools_with_bytes_body(self):
self.check_service_client_function(
self.client.list_floating_ip_pools,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_FLOATING_IP_POOLS, to_utf=True)
|
Add unit test for floating_ip_pools_client
|
Add unit test for floating_ip_pools_client
This patch adds unit test for floating_ip_pools_client.
Change-Id: Iba1dc372867f70742b73b5f4502bb7e94e4dc312
|
Python
|
apache-2.0
|
bigswitch/tempest,izadorozhna/tempest,xbezdick/tempest,openstack/tempest,Tesora/tesora-tempest,LIS/lis-tempest,sebrandon1/tempest,vedujoshi/tempest,zsoltdudas/lis-tempest,cisco-openstack/tempest,masayukig/tempest,flyingfish007/tempest,tonyli71/tempest,masayukig/tempest,xbezdick/tempest,Tesora/tesora-tempest,openstack/tempest,izadorozhna/tempest,Juniper/tempest,vedujoshi/tempest,sebrandon1/tempest,zsoltdudas/lis-tempest,rakeshmi/tempest,LIS/lis-tempest,bigswitch/tempest,tonyli71/tempest,Juniper/tempest,cisco-openstack/tempest,rakeshmi/tempest,flyingfish007/tempest
|
Add unit test for floating_ip_pools_client
This patch adds unit test for floating_ip_pools_client.
Change-Id: Iba1dc372867f70742b73b5f4502bb7e94e4dc312
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import floating_ip_pools_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestFloatingIPPoolsClient(base.BaseComputeServiceTest):
FAKE_FLOATING_IP_POOLS = {
"floating_ip_pools":
[
{"name": u'\u3042'},
{"name": u'\u3044'}
]
}
def setUp(self):
super(TestFloatingIPPoolsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = floating_ip_pools_client.FloatingIPPoolsClient(
fake_auth, 'compute', 'regionOne')
def test_list_floating_ip_pools_with_str_body(self):
self.check_service_client_function(
self.client.list_floating_ip_pools,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_FLOATING_IP_POOLS)
def test_list_floating_ip_pools_with_bytes_body(self):
self.check_service_client_function(
self.client.list_floating_ip_pools,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_FLOATING_IP_POOLS, to_utf=True)
|
<commit_before><commit_msg>Add unit test for floating_ip_pools_client
This patch adds unit test for floating_ip_pools_client.
Change-Id: Iba1dc372867f70742b73b5f4502bb7e94e4dc312<commit_after>
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import floating_ip_pools_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestFloatingIPPoolsClient(base.BaseComputeServiceTest):
FAKE_FLOATING_IP_POOLS = {
"floating_ip_pools":
[
{"name": u'\u3042'},
{"name": u'\u3044'}
]
}
def setUp(self):
super(TestFloatingIPPoolsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = floating_ip_pools_client.FloatingIPPoolsClient(
fake_auth, 'compute', 'regionOne')
def test_list_floating_ip_pools_with_str_body(self):
self.check_service_client_function(
self.client.list_floating_ip_pools,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_FLOATING_IP_POOLS)
def test_list_floating_ip_pools_with_bytes_body(self):
self.check_service_client_function(
self.client.list_floating_ip_pools,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_FLOATING_IP_POOLS, to_utf=True)
|
Add unit test for floating_ip_pools_client
This patch adds unit test for floating_ip_pools_client.
Change-Id: Iba1dc372867f70742b73b5f4502bb7e94e4dc312# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import floating_ip_pools_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestFloatingIPPoolsClient(base.BaseComputeServiceTest):
FAKE_FLOATING_IP_POOLS = {
"floating_ip_pools":
[
{"name": u'\u3042'},
{"name": u'\u3044'}
]
}
def setUp(self):
super(TestFloatingIPPoolsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = floating_ip_pools_client.FloatingIPPoolsClient(
fake_auth, 'compute', 'regionOne')
def test_list_floating_ip_pools_with_str_body(self):
self.check_service_client_function(
self.client.list_floating_ip_pools,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_FLOATING_IP_POOLS)
def test_list_floating_ip_pools_with_bytes_body(self):
self.check_service_client_function(
self.client.list_floating_ip_pools,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_FLOATING_IP_POOLS, to_utf=True)
|
<commit_before><commit_msg>Add unit test for floating_ip_pools_client
This patch adds unit test for floating_ip_pools_client.
Change-Id: Iba1dc372867f70742b73b5f4502bb7e94e4dc312<commit_after># Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import floating_ip_pools_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestFloatingIPPoolsClient(base.BaseComputeServiceTest):
FAKE_FLOATING_IP_POOLS = {
"floating_ip_pools":
[
{"name": u'\u3042'},
{"name": u'\u3044'}
]
}
def setUp(self):
super(TestFloatingIPPoolsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = floating_ip_pools_client.FloatingIPPoolsClient(
fake_auth, 'compute', 'regionOne')
def test_list_floating_ip_pools_with_str_body(self):
self.check_service_client_function(
self.client.list_floating_ip_pools,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_FLOATING_IP_POOLS)
def test_list_floating_ip_pools_with_bytes_body(self):
self.check_service_client_function(
self.client.list_floating_ip_pools,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_FLOATING_IP_POOLS, to_utf=True)
|
|
dd7d3f77b691922c44c489844b1f278d58a790f4
|
xorry.py
|
xorry.py
|
#!/usr/bin/python
import optparse
import sys
def xorobfuscator(plaintext, keyid):
'''XOR Operation on a plaintext using keyid'''
encoded = ""
for i in range(0, len(plaintext), len(keyid)):
start = i
end = i + len(keyid)
for x, y in zip(plaintext[start:end], keyid):
encoded += chr(ord(x) ^ ord(y))
return encoded
def xorDeobfuscator(obfuscated, keyid):
'''Reverse XOR Operation on an obufscated string using keyid'''
decoded = ""
for i in range(0, len(obfuscated), len(keyid)):
start = i
end = i + len(keyid)
for x, y in zip(obfuscated[start:end], keyid):
decoded += chr(ord(x) ^ ord(y))
return decoded
def main():
parser = optparse.OptionParser(
'xorry.py -m < mode de/ob> -s <string> -k <key> ')
parser.add_option('-m', dest='mode',
type='string', help='Mode of Operation')
parser.add_option('-s', dest='plaintext', type='string',
help='String for manipulation')
parser.add_option('-k', dest='keyid', type='string',
help='Key for XOR Operation')
(options, args) = parser.parse_args()
mode = options.mode
plaintext = options.plaintext
keyid = options.keyid
if (mode is None) | (plaintext is None) | (keyid is None):
print(parser.usage)
sys.exit(0)
elif (mode == "de"):
hiddenBin = plaintext.decode("hex")
keyBin = keyid.decode("hex")
decoded = xorDeobfuscator(hiddenBin, keyBin).encode("hex")
print("Deobfuscated String : %s" % decoded)
print("Key Used : %s " % keyBin.encode("hex"))
elif (mode == "ob"):
ObufString = xorobfuscator(plaintext, keyid).encode("hex")
print("Obfuscated String : %s " % ObufString)
print("Key Used : %s " % keyid.encode("hex"))
if __name__ == '__main__':
main()
|
Refactor code - PEP8 Style.
|
Refactor code - PEP8 Style.
|
Python
|
cc0-1.0
|
JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology
|
Refactor code - PEP8 Style.
|
#!/usr/bin/python
import optparse
import sys
def xorobfuscator(plaintext, keyid):
'''XOR Operation on a plaintext using keyid'''
encoded = ""
for i in range(0, len(plaintext), len(keyid)):
start = i
end = i + len(keyid)
for x, y in zip(plaintext[start:end], keyid):
encoded += chr(ord(x) ^ ord(y))
return encoded
def xorDeobfuscator(obfuscated, keyid):
'''Reverse XOR Operation on an obufscated string using keyid'''
decoded = ""
for i in range(0, len(obfuscated), len(keyid)):
start = i
end = i + len(keyid)
for x, y in zip(obfuscated[start:end], keyid):
decoded += chr(ord(x) ^ ord(y))
return decoded
def main():
parser = optparse.OptionParser(
'xorry.py -m < mode de/ob> -s <string> -k <key> ')
parser.add_option('-m', dest='mode',
type='string', help='Mode of Operation')
parser.add_option('-s', dest='plaintext', type='string',
help='String for manipulation')
parser.add_option('-k', dest='keyid', type='string',
help='Key for XOR Operation')
(options, args) = parser.parse_args()
mode = options.mode
plaintext = options.plaintext
keyid = options.keyid
if (mode is None) | (plaintext is None) | (keyid is None):
print(parser.usage)
sys.exit(0)
elif (mode == "de"):
hiddenBin = plaintext.decode("hex")
keyBin = keyid.decode("hex")
decoded = xorDeobfuscator(hiddenBin, keyBin).encode("hex")
print("Deobfuscated String : %s" % decoded)
print("Key Used : %s " % keyBin.encode("hex"))
elif (mode == "ob"):
ObufString = xorobfuscator(plaintext, keyid).encode("hex")
print("Obfuscated String : %s " % ObufString)
print("Key Used : %s " % keyid.encode("hex"))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Refactor code - PEP8 Style.<commit_after>
|
#!/usr/bin/python
import optparse
import sys
def xorobfuscator(plaintext, keyid):
'''XOR Operation on a plaintext using keyid'''
encoded = ""
for i in range(0, len(plaintext), len(keyid)):
start = i
end = i + len(keyid)
for x, y in zip(plaintext[start:end], keyid):
encoded += chr(ord(x) ^ ord(y))
return encoded
def xorDeobfuscator(obfuscated, keyid):
'''Reverse XOR Operation on an obufscated string using keyid'''
decoded = ""
for i in range(0, len(obfuscated), len(keyid)):
start = i
end = i + len(keyid)
for x, y in zip(obfuscated[start:end], keyid):
decoded += chr(ord(x) ^ ord(y))
return decoded
def main():
parser = optparse.OptionParser(
'xorry.py -m < mode de/ob> -s <string> -k <key> ')
parser.add_option('-m', dest='mode',
type='string', help='Mode of Operation')
parser.add_option('-s', dest='plaintext', type='string',
help='String for manipulation')
parser.add_option('-k', dest='keyid', type='string',
help='Key for XOR Operation')
(options, args) = parser.parse_args()
mode = options.mode
plaintext = options.plaintext
keyid = options.keyid
if (mode is None) | (plaintext is None) | (keyid is None):
print(parser.usage)
sys.exit(0)
elif (mode == "de"):
hiddenBin = plaintext.decode("hex")
keyBin = keyid.decode("hex")
decoded = xorDeobfuscator(hiddenBin, keyBin).encode("hex")
print("Deobfuscated String : %s" % decoded)
print("Key Used : %s " % keyBin.encode("hex"))
elif (mode == "ob"):
ObufString = xorobfuscator(plaintext, keyid).encode("hex")
print("Obfuscated String : %s " % ObufString)
print("Key Used : %s " % keyid.encode("hex"))
if __name__ == '__main__':
main()
|
Refactor code - PEP8 Style.#!/usr/bin/python
import optparse
import sys
def xorobfuscator(plaintext, keyid):
'''XOR Operation on a plaintext using keyid'''
encoded = ""
for i in range(0, len(plaintext), len(keyid)):
start = i
end = i + len(keyid)
for x, y in zip(plaintext[start:end], keyid):
encoded += chr(ord(x) ^ ord(y))
return encoded
def xorDeobfuscator(obfuscated, keyid):
'''Reverse XOR Operation on an obufscated string using keyid'''
decoded = ""
for i in range(0, len(obfuscated), len(keyid)):
start = i
end = i + len(keyid)
for x, y in zip(obfuscated[start:end], keyid):
decoded += chr(ord(x) ^ ord(y))
return decoded
def main():
parser = optparse.OptionParser(
'xorry.py -m < mode de/ob> -s <string> -k <key> ')
parser.add_option('-m', dest='mode',
type='string', help='Mode of Operation')
parser.add_option('-s', dest='plaintext', type='string',
help='String for manipulation')
parser.add_option('-k', dest='keyid', type='string',
help='Key for XOR Operation')
(options, args) = parser.parse_args()
mode = options.mode
plaintext = options.plaintext
keyid = options.keyid
if (mode is None) | (plaintext is None) | (keyid is None):
print(parser.usage)
sys.exit(0)
elif (mode == "de"):
hiddenBin = plaintext.decode("hex")
keyBin = keyid.decode("hex")
decoded = xorDeobfuscator(hiddenBin, keyBin).encode("hex")
print("Deobfuscated String : %s" % decoded)
print("Key Used : %s " % keyBin.encode("hex"))
elif (mode == "ob"):
ObufString = xorobfuscator(plaintext, keyid).encode("hex")
print("Obfuscated String : %s " % ObufString)
print("Key Used : %s " % keyid.encode("hex"))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Refactor code - PEP8 Style.<commit_after>#!/usr/bin/python
import optparse
import sys
def xorobfuscator(plaintext, keyid):
'''XOR Operation on a plaintext using keyid'''
encoded = ""
for i in range(0, len(plaintext), len(keyid)):
start = i
end = i + len(keyid)
for x, y in zip(plaintext[start:end], keyid):
encoded += chr(ord(x) ^ ord(y))
return encoded
def xorDeobfuscator(obfuscated, keyid):
'''Reverse XOR Operation on an obufscated string using keyid'''
decoded = ""
for i in range(0, len(obfuscated), len(keyid)):
start = i
end = i + len(keyid)
for x, y in zip(obfuscated[start:end], keyid):
decoded += chr(ord(x) ^ ord(y))
return decoded
def main():
parser = optparse.OptionParser(
'xorry.py -m < mode de/ob> -s <string> -k <key> ')
parser.add_option('-m', dest='mode',
type='string', help='Mode of Operation')
parser.add_option('-s', dest='plaintext', type='string',
help='String for manipulation')
parser.add_option('-k', dest='keyid', type='string',
help='Key for XOR Operation')
(options, args) = parser.parse_args()
mode = options.mode
plaintext = options.plaintext
keyid = options.keyid
if (mode is None) | (plaintext is None) | (keyid is None):
print(parser.usage)
sys.exit(0)
elif (mode == "de"):
hiddenBin = plaintext.decode("hex")
keyBin = keyid.decode("hex")
decoded = xorDeobfuscator(hiddenBin, keyBin).encode("hex")
print("Deobfuscated String : %s" % decoded)
print("Key Used : %s " % keyBin.encode("hex"))
elif (mode == "ob"):
ObufString = xorobfuscator(plaintext, keyid).encode("hex")
print("Obfuscated String : %s " % ObufString)
print("Key Used : %s " % keyid.encode("hex"))
if __name__ == '__main__':
main()
|
|
f367df8baa9deba3c361620f87c98eb88c2e234c
|
alerts/ldap_password_spray.py
|
alerts/ldap_password_spray.py
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch
class AlertLdapPasswordSpray(AlertTask):
def main(self):
# TODO: evaluate what the threshold should be to detect a password spray
search_query = SearchQuery(minutes=60)
search_query.add_must([
TermMatch('category', 'ldap'),
TermMatch('tags', 'ldap'),
TermMatch('details.response.error', 'LDAP_INVALID_CREDENTIALS')
])
self.filtersManual(search_query)
self.searchEventsAggregated('details.client', samplesLimit=10)
# TODO: evaluate what the threshold should be for this alert
self.walkAggregations(threshold=1)
# Set alert properties
def onAggregation(self, aggreg):
category = 'ldap'
tags = ['ldap']
severity = 'WARNING'
user_dn_list = set()
for event in aggreg['allevents']:
for request in event['_source']['details']['requests']:
user_dn_list.add(request['details'][0])
summary = 'Possible Password Spray Attack in Progress from {0} using the following distinguished names: {1}'.format(
aggreg['value'],
",".join(sorted(user_dn_list))
)
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
|
Add prototype LDAP password spray alert
|
Add prototype LDAP password spray alert
|
Python
|
mpl-2.0
|
mpurzynski/MozDef,mozilla/MozDef,mpurzynski/MozDef,mpurzynski/MozDef,mozilla/MozDef,jeffbryner/MozDef,jeffbryner/MozDef,jeffbryner/MozDef,mpurzynski/MozDef,mozilla/MozDef,jeffbryner/MozDef,mozilla/MozDef
|
Add prototype LDAP password spray alert
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch
class AlertLdapPasswordSpray(AlertTask):
def main(self):
# TODO: evaluate what the threshold should be to detect a password spray
search_query = SearchQuery(minutes=60)
search_query.add_must([
TermMatch('category', 'ldap'),
TermMatch('tags', 'ldap'),
TermMatch('details.response.error', 'LDAP_INVALID_CREDENTIALS')
])
self.filtersManual(search_query)
self.searchEventsAggregated('details.client', samplesLimit=10)
# TODO: evaluate what the threshold should be for this alert
self.walkAggregations(threshold=1)
# Set alert properties
def onAggregation(self, aggreg):
category = 'ldap'
tags = ['ldap']
severity = 'WARNING'
user_dn_list = set()
for event in aggreg['allevents']:
for request in event['_source']['details']['requests']:
user_dn_list.add(request['details'][0])
summary = 'Possible Password Spray Attack in Progress from {0} using the following distinguished names: {1}'.format(
aggreg['value'],
",".join(sorted(user_dn_list))
)
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
|
<commit_before><commit_msg>Add prototype LDAP password spray alert<commit_after>
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch
class AlertLdapPasswordSpray(AlertTask):
def main(self):
# TODO: evaluate what the threshold should be to detect a password spray
search_query = SearchQuery(minutes=60)
search_query.add_must([
TermMatch('category', 'ldap'),
TermMatch('tags', 'ldap'),
TermMatch('details.response.error', 'LDAP_INVALID_CREDENTIALS')
])
self.filtersManual(search_query)
self.searchEventsAggregated('details.client', samplesLimit=10)
# TODO: evaluate what the threshold should be for this alert
self.walkAggregations(threshold=1)
# Set alert properties
def onAggregation(self, aggreg):
category = 'ldap'
tags = ['ldap']
severity = 'WARNING'
user_dn_list = set()
for event in aggreg['allevents']:
for request in event['_source']['details']['requests']:
user_dn_list.add(request['details'][0])
summary = 'Possible Password Spray Attack in Progress from {0} using the following distinguished names: {1}'.format(
aggreg['value'],
",".join(sorted(user_dn_list))
)
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
|
Add prototype LDAP password spray alert#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch
class AlertLdapPasswordSpray(AlertTask):
def main(self):
# TODO: evaluate what the threshold should be to detect a password spray
search_query = SearchQuery(minutes=60)
search_query.add_must([
TermMatch('category', 'ldap'),
TermMatch('tags', 'ldap'),
TermMatch('details.response.error', 'LDAP_INVALID_CREDENTIALS')
])
self.filtersManual(search_query)
self.searchEventsAggregated('details.client', samplesLimit=10)
# TODO: evaluate what the threshold should be for this alert
self.walkAggregations(threshold=1)
# Set alert properties
def onAggregation(self, aggreg):
category = 'ldap'
tags = ['ldap']
severity = 'WARNING'
user_dn_list = set()
for event in aggreg['allevents']:
for request in event['_source']['details']['requests']:
user_dn_list.add(request['details'][0])
summary = 'Possible Password Spray Attack in Progress from {0} using the following distinguished names: {1}'.format(
aggreg['value'],
",".join(sorted(user_dn_list))
)
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
|
<commit_before><commit_msg>Add prototype LDAP password spray alert<commit_after>#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, TermMatch
class AlertLdapPasswordSpray(AlertTask):
def main(self):
# TODO: evaluate what the threshold should be to detect a password spray
search_query = SearchQuery(minutes=60)
search_query.add_must([
TermMatch('category', 'ldap'),
TermMatch('tags', 'ldap'),
TermMatch('details.response.error', 'LDAP_INVALID_CREDENTIALS')
])
self.filtersManual(search_query)
self.searchEventsAggregated('details.client', samplesLimit=10)
# TODO: evaluate what the threshold should be for this alert
self.walkAggregations(threshold=1)
# Set alert properties
def onAggregation(self, aggreg):
category = 'ldap'
tags = ['ldap']
severity = 'WARNING'
user_dn_list = set()
for event in aggreg['allevents']:
for request in event['_source']['details']['requests']:
user_dn_list.add(request['details'][0])
summary = 'Possible Password Spray Attack in Progress from {0} using the following distinguished names: {1}'.format(
aggreg['value'],
",".join(sorted(user_dn_list))
)
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
|
|
58a3c9e6cbddba664c1c89c6039d9cc72fac526f
|
grader/grader/test/test_new.py
|
grader/grader/test/test_new.py
|
import os
import pytest
import yaml
def test_new_without_repo(parse_and_run):
"""Test vanilla assignment initialization
"""
path = parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1"])
a_path = os.path.join(path, "assignments", "assignment1")
gs_path = os.path.join(a_path, "gradesheet")
# Assignment directory exists
assert os.path.exists(a_path)
# Assignment directory's contents exist
assert os.path.exists(gs_path)
assert os.path.exists(os.path.join(a_path, "submissions"))
assert os.path.exists(os.path.join(a_path, "results"))
# Gradesheet directory's contents exist
assert os.path.exists(os.path.join(gs_path, "assignment.yml"))
assert os.path.exists(os.path.join(gs_path, "Dockerfile"))
with open(os.path.join(gs_path, "assignment.yml")) as f:
config = yaml.load(f)
assert config['assignment-name'] == 'assignment1'
def test_new_with_repo(parse_and_run):
"""Test assignment initialization from existing repository
"""
path = parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1",
"https://github.com/michaelwisely/python-gradesheet.git"])
a_path = os.path.join(path, "assignments", "assignment1")
gs_path = os.path.join(a_path, "gradesheet")
# Assignment directory exists
assert os.path.exists(a_path)
# Assignment directory's contents exist
assert os.path.exists(gs_path)
assert os.path.exists(os.path.join(a_path, "submissions"))
assert os.path.exists(os.path.join(a_path, "results"))
# Gradesheet directory's contents exist
assert os.path.exists(os.path.join(gs_path, "assignment.yml"))
assert os.path.exists(os.path.join(gs_path, "Dockerfile"))
with open(os.path.join(gs_path, "assignment.yml")) as f:
config = yaml.load(f)
# Now it's equal to the value from the repo
assert config['assignment-name'] == 'new-python-assignment'
def test_new_existing_assignment(parse_and_run):
"""Test overwriting an existing an assignment
"""
parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1"])
with pytest.raises(SystemExit):
parse_and_run(["new", "assignment1"])
|
Add some tests for new assignments
|
Add some tests for new assignments
|
Python
|
mit
|
redkyn/grader,grade-it/grader,redkyn/grader
|
Add some tests for new assignments
|
import os
import pytest
import yaml
def test_new_without_repo(parse_and_run):
"""Test vanilla assignment initialization
"""
path = parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1"])
a_path = os.path.join(path, "assignments", "assignment1")
gs_path = os.path.join(a_path, "gradesheet")
# Assignment directory exists
assert os.path.exists(a_path)
# Assignment directory's contents exist
assert os.path.exists(gs_path)
assert os.path.exists(os.path.join(a_path, "submissions"))
assert os.path.exists(os.path.join(a_path, "results"))
# Gradesheet directory's contents exist
assert os.path.exists(os.path.join(gs_path, "assignment.yml"))
assert os.path.exists(os.path.join(gs_path, "Dockerfile"))
with open(os.path.join(gs_path, "assignment.yml")) as f:
config = yaml.load(f)
assert config['assignment-name'] == 'assignment1'
def test_new_with_repo(parse_and_run):
"""Test assignment initialization from existing repository
"""
path = parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1",
"https://github.com/michaelwisely/python-gradesheet.git"])
a_path = os.path.join(path, "assignments", "assignment1")
gs_path = os.path.join(a_path, "gradesheet")
# Assignment directory exists
assert os.path.exists(a_path)
# Assignment directory's contents exist
assert os.path.exists(gs_path)
assert os.path.exists(os.path.join(a_path, "submissions"))
assert os.path.exists(os.path.join(a_path, "results"))
# Gradesheet directory's contents exist
assert os.path.exists(os.path.join(gs_path, "assignment.yml"))
assert os.path.exists(os.path.join(gs_path, "Dockerfile"))
with open(os.path.join(gs_path, "assignment.yml")) as f:
config = yaml.load(f)
# Now it's equal to the value from the repo
assert config['assignment-name'] == 'new-python-assignment'
def test_new_existing_assignment(parse_and_run):
"""Test overwriting an existing an assignment
"""
parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1"])
with pytest.raises(SystemExit):
parse_and_run(["new", "assignment1"])
|
<commit_before><commit_msg>Add some tests for new assignments<commit_after>
|
import os
import pytest
import yaml
def test_new_without_repo(parse_and_run):
"""Test vanilla assignment initialization
"""
path = parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1"])
a_path = os.path.join(path, "assignments", "assignment1")
gs_path = os.path.join(a_path, "gradesheet")
# Assignment directory exists
assert os.path.exists(a_path)
# Assignment directory's contents exist
assert os.path.exists(gs_path)
assert os.path.exists(os.path.join(a_path, "submissions"))
assert os.path.exists(os.path.join(a_path, "results"))
# Gradesheet directory's contents exist
assert os.path.exists(os.path.join(gs_path, "assignment.yml"))
assert os.path.exists(os.path.join(gs_path, "Dockerfile"))
with open(os.path.join(gs_path, "assignment.yml")) as f:
config = yaml.load(f)
assert config['assignment-name'] == 'assignment1'
def test_new_with_repo(parse_and_run):
"""Test assignment initialization from existing repository
"""
path = parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1",
"https://github.com/michaelwisely/python-gradesheet.git"])
a_path = os.path.join(path, "assignments", "assignment1")
gs_path = os.path.join(a_path, "gradesheet")
# Assignment directory exists
assert os.path.exists(a_path)
# Assignment directory's contents exist
assert os.path.exists(gs_path)
assert os.path.exists(os.path.join(a_path, "submissions"))
assert os.path.exists(os.path.join(a_path, "results"))
# Gradesheet directory's contents exist
assert os.path.exists(os.path.join(gs_path, "assignment.yml"))
assert os.path.exists(os.path.join(gs_path, "Dockerfile"))
with open(os.path.join(gs_path, "assignment.yml")) as f:
config = yaml.load(f)
# Now it's equal to the value from the repo
assert config['assignment-name'] == 'new-python-assignment'
def test_new_existing_assignment(parse_and_run):
"""Test overwriting an existing an assignment
"""
parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1"])
with pytest.raises(SystemExit):
parse_and_run(["new", "assignment1"])
|
Add some tests for new assignmentsimport os
import pytest
import yaml
def test_new_without_repo(parse_and_run):
"""Test vanilla assignment initialization
"""
path = parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1"])
a_path = os.path.join(path, "assignments", "assignment1")
gs_path = os.path.join(a_path, "gradesheet")
# Assignment directory exists
assert os.path.exists(a_path)
# Assignment directory's contents exist
assert os.path.exists(gs_path)
assert os.path.exists(os.path.join(a_path, "submissions"))
assert os.path.exists(os.path.join(a_path, "results"))
# Gradesheet directory's contents exist
assert os.path.exists(os.path.join(gs_path, "assignment.yml"))
assert os.path.exists(os.path.join(gs_path, "Dockerfile"))
with open(os.path.join(gs_path, "assignment.yml")) as f:
config = yaml.load(f)
assert config['assignment-name'] == 'assignment1'
def test_new_with_repo(parse_and_run):
"""Test assignment initialization from existing repository
"""
path = parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1",
"https://github.com/michaelwisely/python-gradesheet.git"])
a_path = os.path.join(path, "assignments", "assignment1")
gs_path = os.path.join(a_path, "gradesheet")
# Assignment directory exists
assert os.path.exists(a_path)
# Assignment directory's contents exist
assert os.path.exists(gs_path)
assert os.path.exists(os.path.join(a_path, "submissions"))
assert os.path.exists(os.path.join(a_path, "results"))
# Gradesheet directory's contents exist
assert os.path.exists(os.path.join(gs_path, "assignment.yml"))
assert os.path.exists(os.path.join(gs_path, "Dockerfile"))
with open(os.path.join(gs_path, "assignment.yml")) as f:
config = yaml.load(f)
# Now it's equal to the value from the repo
assert config['assignment-name'] == 'new-python-assignment'
def test_new_existing_assignment(parse_and_run):
"""Test overwriting an existing an assignment
"""
parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1"])
with pytest.raises(SystemExit):
parse_and_run(["new", "assignment1"])
|
<commit_before><commit_msg>Add some tests for new assignments<commit_after>import os
import pytest
import yaml
def test_new_without_repo(parse_and_run):
"""Test vanilla assignment initialization
"""
path = parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1"])
a_path = os.path.join(path, "assignments", "assignment1")
gs_path = os.path.join(a_path, "gradesheet")
# Assignment directory exists
assert os.path.exists(a_path)
# Assignment directory's contents exist
assert os.path.exists(gs_path)
assert os.path.exists(os.path.join(a_path, "submissions"))
assert os.path.exists(os.path.join(a_path, "results"))
# Gradesheet directory's contents exist
assert os.path.exists(os.path.join(gs_path, "assignment.yml"))
assert os.path.exists(os.path.join(gs_path, "Dockerfile"))
with open(os.path.join(gs_path, "assignment.yml")) as f:
config = yaml.load(f)
assert config['assignment-name'] == 'assignment1'
def test_new_with_repo(parse_and_run):
"""Test assignment initialization from existing repository
"""
path = parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1",
"https://github.com/michaelwisely/python-gradesheet.git"])
a_path = os.path.join(path, "assignments", "assignment1")
gs_path = os.path.join(a_path, "gradesheet")
# Assignment directory exists
assert os.path.exists(a_path)
# Assignment directory's contents exist
assert os.path.exists(gs_path)
assert os.path.exists(os.path.join(a_path, "submissions"))
assert os.path.exists(os.path.join(a_path, "results"))
# Gradesheet directory's contents exist
assert os.path.exists(os.path.join(gs_path, "assignment.yml"))
assert os.path.exists(os.path.join(gs_path, "Dockerfile"))
with open(os.path.join(gs_path, "assignment.yml")) as f:
config = yaml.load(f)
# Now it's equal to the value from the repo
assert config['assignment-name'] == 'new-python-assignment'
def test_new_existing_assignment(parse_and_run):
"""Test overwriting an existing an assignment
"""
parse_and_run(["init", "cpl"])
parse_and_run(["new", "assignment1"])
with pytest.raises(SystemExit):
parse_and_run(["new", "assignment1"])
|
|
509d4f1e6d22a373cfa20944ef388f7155443d4a
|
monroe/api.py
|
monroe/api.py
|
from flask import Flask
import monroe
api = Flask(__name__)
@api.route('/')
def aping():
return 'v0.1'
@api.route('/start')
def start_monroe():
monroe.main()
|
ADD Flask based RESTful API
|
ADD Flask based RESTful API
|
Python
|
apache-2.0
|
ecelis/monroe
|
ADD Flask based RESTful API
|
from flask import Flask
import monroe
api = Flask(__name__)
@api.route('/')
def aping():
return 'v0.1'
@api.route('/start')
def start_monroe():
monroe.main()
|
<commit_before><commit_msg>ADD Flask based RESTful API<commit_after>
|
from flask import Flask
import monroe
api = Flask(__name__)
@api.route('/')
def aping():
return 'v0.1'
@api.route('/start')
def start_monroe():
monroe.main()
|
ADD Flask based RESTful APIfrom flask import Flask
import monroe
api = Flask(__name__)
@api.route('/')
def aping():
return 'v0.1'
@api.route('/start')
def start_monroe():
monroe.main()
|
<commit_before><commit_msg>ADD Flask based RESTful API<commit_after>from flask import Flask
import monroe
api = Flask(__name__)
@api.route('/')
def aping():
return 'v0.1'
@api.route('/start')
def start_monroe():
monroe.main()
|
|
ce43d38cccd44464b1c777e99af540133033a3fc
|
pygraphc/clustering/MaxCliquesPercolationSA.py
|
pygraphc/clustering/MaxCliquesPercolationSA.py
|
from MaxCliquesPercolation import MaxCliquesPercolationWeighted
class MaxCliquesPercolationSA(MaxCliquesPercolationWeighted):
def __init__(self, graph, edges_weight, nodes_id, k, threshold):
super(MaxCliquesPercolationSA, self).__init__(graph, edges_weight, nodes_id, k, threshold)
def get_maxcliques_percolation_sa(self):
pass
|
Add file for maximal cliques percolation using sa
|
Add file for maximal cliques percolation using sa
|
Python
|
mit
|
studiawan/pygraphc
|
Add file for maximal cliques percolation using sa
|
from MaxCliquesPercolation import MaxCliquesPercolationWeighted
class MaxCliquesPercolationSA(MaxCliquesPercolationWeighted):
def __init__(self, graph, edges_weight, nodes_id, k, threshold):
super(MaxCliquesPercolationSA, self).__init__(graph, edges_weight, nodes_id, k, threshold)
def get_maxcliques_percolation_sa(self):
pass
|
<commit_before><commit_msg>Add file for maximal cliques percolation using sa<commit_after>
|
from MaxCliquesPercolation import MaxCliquesPercolationWeighted
class MaxCliquesPercolationSA(MaxCliquesPercolationWeighted):
def __init__(self, graph, edges_weight, nodes_id, k, threshold):
super(MaxCliquesPercolationSA, self).__init__(graph, edges_weight, nodes_id, k, threshold)
def get_maxcliques_percolation_sa(self):
pass
|
Add file for maximal cliques percolation using safrom MaxCliquesPercolation import MaxCliquesPercolationWeighted
class MaxCliquesPercolationSA(MaxCliquesPercolationWeighted):
def __init__(self, graph, edges_weight, nodes_id, k, threshold):
super(MaxCliquesPercolationSA, self).__init__(graph, edges_weight, nodes_id, k, threshold)
def get_maxcliques_percolation_sa(self):
pass
|
<commit_before><commit_msg>Add file for maximal cliques percolation using sa<commit_after>from MaxCliquesPercolation import MaxCliquesPercolationWeighted
class MaxCliquesPercolationSA(MaxCliquesPercolationWeighted):
def __init__(self, graph, edges_weight, nodes_id, k, threshold):
super(MaxCliquesPercolationSA, self).__init__(graph, edges_weight, nodes_id, k, threshold)
def get_maxcliques_percolation_sa(self):
pass
|
|
83f1dab96d5e9f82137dbe4142ed415a3e3e3f48
|
biobox_cli/biobox_file.py
|
biobox_cli/biobox_file.py
|
import os
import yaml
def generate(args):
output = {"version" : "0.9.0", "arguments" : args}
return yaml.safe_dump(output, default_flow_style = False)
def get_biobox_file_contents(dir_):
with open(os.path.join(dir_, 'biobox.yaml'), 'r') as f:
return yaml.load(f.read())
def fastq_arguments(args):
return files_values("fastq", args)
def fasta_arguments(args):
return files_values("fasta", args)
def reference_argument(ref):
return {"fasta_dir": [{"id" : 1, "type" : "reference", "value" : ref}]}
def files_values(identifier, args):
values = [entry(identifier + "_" + str(i), p_c, t) for (i, (p_c, t)) in enumerate(args)]
return {identifier : values}
def entry(id_, value, type_):
return {"id" : id_, "value" : value, "type" : type_}
def create_biobox_directory(content):
import tempfile as tmp
dir_ = tmp.mkdtemp()
with open(os.path.join(dir_, "biobox.yaml"), "w") as f:
f.write(content)
return dir_
|
import os
import yaml
def generate(args):
output = {"version" : "0.9.0", "arguments" : args}
return yaml.safe_dump(output, default_flow_style = False)
def get_biobox_file_contents(dir_):
with open(os.path.join(dir_, 'biobox.yaml'), 'r') as f:
return yaml.load(f.read())
def fastq_arguments(args):
return files_values("fastq", args)
def fasta_arguments(args):
return files_values("fasta", args)
def reference_argument(ref):
return {"fasta_dir": [{"id" : 1, "type" : "reference", "value" : ref}]}
def files_values(identifier, args):
values = [entry(identifier + "_" + str(i), p_c, t) for (i, (p_c, t)) in enumerate(args)]
return {identifier : values}
def entry(id_, value, type_):
return {"id" : id_, "value" : value, "type" : type_}
|
Remove no longer needed biobox_directory function
|
Remove no longer needed biobox_directory function
|
Python
|
mit
|
michaelbarton/command-line-interface,michaelbarton/command-line-interface,bioboxes/command-line-interface,bioboxes/command-line-interface
|
import os
import yaml
def generate(args):
output = {"version" : "0.9.0", "arguments" : args}
return yaml.safe_dump(output, default_flow_style = False)
def get_biobox_file_contents(dir_):
with open(os.path.join(dir_, 'biobox.yaml'), 'r') as f:
return yaml.load(f.read())
def fastq_arguments(args):
return files_values("fastq", args)
def fasta_arguments(args):
return files_values("fasta", args)
def reference_argument(ref):
return {"fasta_dir": [{"id" : 1, "type" : "reference", "value" : ref}]}
def files_values(identifier, args):
values = [entry(identifier + "_" + str(i), p_c, t) for (i, (p_c, t)) in enumerate(args)]
return {identifier : values}
def entry(id_, value, type_):
return {"id" : id_, "value" : value, "type" : type_}
def create_biobox_directory(content):
import tempfile as tmp
dir_ = tmp.mkdtemp()
with open(os.path.join(dir_, "biobox.yaml"), "w") as f:
f.write(content)
return dir_
Remove no longer needed biobox_directory function
|
import os
import yaml
def generate(args):
output = {"version" : "0.9.0", "arguments" : args}
return yaml.safe_dump(output, default_flow_style = False)
def get_biobox_file_contents(dir_):
with open(os.path.join(dir_, 'biobox.yaml'), 'r') as f:
return yaml.load(f.read())
def fastq_arguments(args):
return files_values("fastq", args)
def fasta_arguments(args):
return files_values("fasta", args)
def reference_argument(ref):
return {"fasta_dir": [{"id" : 1, "type" : "reference", "value" : ref}]}
def files_values(identifier, args):
values = [entry(identifier + "_" + str(i), p_c, t) for (i, (p_c, t)) in enumerate(args)]
return {identifier : values}
def entry(id_, value, type_):
return {"id" : id_, "value" : value, "type" : type_}
|
<commit_before>import os
import yaml
def generate(args):
output = {"version" : "0.9.0", "arguments" : args}
return yaml.safe_dump(output, default_flow_style = False)
def get_biobox_file_contents(dir_):
with open(os.path.join(dir_, 'biobox.yaml'), 'r') as f:
return yaml.load(f.read())
def fastq_arguments(args):
return files_values("fastq", args)
def fasta_arguments(args):
return files_values("fasta", args)
def reference_argument(ref):
return {"fasta_dir": [{"id" : 1, "type" : "reference", "value" : ref}]}
def files_values(identifier, args):
values = [entry(identifier + "_" + str(i), p_c, t) for (i, (p_c, t)) in enumerate(args)]
return {identifier : values}
def entry(id_, value, type_):
return {"id" : id_, "value" : value, "type" : type_}
def create_biobox_directory(content):
import tempfile as tmp
dir_ = tmp.mkdtemp()
with open(os.path.join(dir_, "biobox.yaml"), "w") as f:
f.write(content)
return dir_
<commit_msg>Remove no longer needed biobox_directory function<commit_after>
|
import os
import yaml
def generate(args):
output = {"version" : "0.9.0", "arguments" : args}
return yaml.safe_dump(output, default_flow_style = False)
def get_biobox_file_contents(dir_):
with open(os.path.join(dir_, 'biobox.yaml'), 'r') as f:
return yaml.load(f.read())
def fastq_arguments(args):
return files_values("fastq", args)
def fasta_arguments(args):
return files_values("fasta", args)
def reference_argument(ref):
return {"fasta_dir": [{"id" : 1, "type" : "reference", "value" : ref}]}
def files_values(identifier, args):
values = [entry(identifier + "_" + str(i), p_c, t) for (i, (p_c, t)) in enumerate(args)]
return {identifier : values}
def entry(id_, value, type_):
return {"id" : id_, "value" : value, "type" : type_}
|
import os
import yaml
def generate(args):
output = {"version" : "0.9.0", "arguments" : args}
return yaml.safe_dump(output, default_flow_style = False)
def get_biobox_file_contents(dir_):
with open(os.path.join(dir_, 'biobox.yaml'), 'r') as f:
return yaml.load(f.read())
def fastq_arguments(args):
return files_values("fastq", args)
def fasta_arguments(args):
return files_values("fasta", args)
def reference_argument(ref):
return {"fasta_dir": [{"id" : 1, "type" : "reference", "value" : ref}]}
def files_values(identifier, args):
values = [entry(identifier + "_" + str(i), p_c, t) for (i, (p_c, t)) in enumerate(args)]
return {identifier : values}
def entry(id_, value, type_):
return {"id" : id_, "value" : value, "type" : type_}
def create_biobox_directory(content):
import tempfile as tmp
dir_ = tmp.mkdtemp()
with open(os.path.join(dir_, "biobox.yaml"), "w") as f:
f.write(content)
return dir_
Remove no longer needed biobox_directory functionimport os
import yaml
def generate(args):
output = {"version" : "0.9.0", "arguments" : args}
return yaml.safe_dump(output, default_flow_style = False)
def get_biobox_file_contents(dir_):
with open(os.path.join(dir_, 'biobox.yaml'), 'r') as f:
return yaml.load(f.read())
def fastq_arguments(args):
return files_values("fastq", args)
def fasta_arguments(args):
return files_values("fasta", args)
def reference_argument(ref):
return {"fasta_dir": [{"id" : 1, "type" : "reference", "value" : ref}]}
def files_values(identifier, args):
values = [entry(identifier + "_" + str(i), p_c, t) for (i, (p_c, t)) in enumerate(args)]
return {identifier : values}
def entry(id_, value, type_):
return {"id" : id_, "value" : value, "type" : type_}
|
<commit_before>import os
import yaml
def generate(args):
output = {"version" : "0.9.0", "arguments" : args}
return yaml.safe_dump(output, default_flow_style = False)
def get_biobox_file_contents(dir_):
with open(os.path.join(dir_, 'biobox.yaml'), 'r') as f:
return yaml.load(f.read())
def fastq_arguments(args):
return files_values("fastq", args)
def fasta_arguments(args):
return files_values("fasta", args)
def reference_argument(ref):
return {"fasta_dir": [{"id" : 1, "type" : "reference", "value" : ref}]}
def files_values(identifier, args):
values = [entry(identifier + "_" + str(i), p_c, t) for (i, (p_c, t)) in enumerate(args)]
return {identifier : values}
def entry(id_, value, type_):
return {"id" : id_, "value" : value, "type" : type_}
def create_biobox_directory(content):
import tempfile as tmp
dir_ = tmp.mkdtemp()
with open(os.path.join(dir_, "biobox.yaml"), "w") as f:
f.write(content)
return dir_
<commit_msg>Remove no longer needed biobox_directory function<commit_after>import os
import yaml
def generate(args):
output = {"version" : "0.9.0", "arguments" : args}
return yaml.safe_dump(output, default_flow_style = False)
def get_biobox_file_contents(dir_):
with open(os.path.join(dir_, 'biobox.yaml'), 'r') as f:
return yaml.load(f.read())
def fastq_arguments(args):
return files_values("fastq", args)
def fasta_arguments(args):
return files_values("fasta", args)
def reference_argument(ref):
return {"fasta_dir": [{"id" : 1, "type" : "reference", "value" : ref}]}
def files_values(identifier, args):
values = [entry(identifier + "_" + str(i), p_c, t) for (i, (p_c, t)) in enumerate(args)]
return {identifier : values}
def entry(id_, value, type_):
return {"id" : id_, "value" : value, "type" : type_}
|
d5beeaa933b08f80ce2b0d8b56e022cd5b23397c
|
mysql_test.py
|
mysql_test.py
|
#!/usr/bin/env python
# blogware - a python blogging system
# Copyright (C) 2016-2017 izrik
#
# This file is a part of blogware.
#
# Blogware is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Blogware is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with blogware. If not, see <http://www.gnu.org/licenses/>.
import argparse
from os import environ
import MySQLdb
class Config(object):
PASSWORD = environ.get('PASSWORD')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('host', type=str, action='store')
parser.add_argument('user', type=str, action='store')
parser.add_argument('--password', type=str, action='store',
default=Config.PASSWORD)
parser.add_argument('db', type=str, action='store')
parser.add_argument('--ssl-cert', type=str, action='store')
parser.add_argument('--ssl-key', type=str, action='store')
args = parser.parse_args()
Config.PASSWORD = args.password
def run():
print('DB URI: {}'.format(Config.DB_URI))
conn = MySQLdb.connect(
host=args.host,
user=args.user,
passwd=Config.PASSWORD,
db=args.db,
)
if __name__ == "__main__":
run()
|
Include a simple mysql connection diagnostic tool.
|
Include a simple mysql connection diagnostic tool.
|
Python
|
agpl-3.0
|
izrik/wikiware,izrik/blogware,izrik/wikiware,izrik/blogware,izrik/blogware,izrik/wikiware
|
Include a simple mysql connection diagnostic tool.
|
#!/usr/bin/env python
# blogware - a python blogging system
# Copyright (C) 2016-2017 izrik
#
# This file is a part of blogware.
#
# Blogware is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Blogware is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with blogware. If not, see <http://www.gnu.org/licenses/>.
import argparse
from os import environ
import MySQLdb
class Config(object):
PASSWORD = environ.get('PASSWORD')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('host', type=str, action='store')
parser.add_argument('user', type=str, action='store')
parser.add_argument('--password', type=str, action='store',
default=Config.PASSWORD)
parser.add_argument('db', type=str, action='store')
parser.add_argument('--ssl-cert', type=str, action='store')
parser.add_argument('--ssl-key', type=str, action='store')
args = parser.parse_args()
Config.PASSWORD = args.password
def run():
print('DB URI: {}'.format(Config.DB_URI))
conn = MySQLdb.connect(
host=args.host,
user=args.user,
passwd=Config.PASSWORD,
db=args.db,
)
if __name__ == "__main__":
run()
|
<commit_before><commit_msg>Include a simple mysql connection diagnostic tool.<commit_after>
|
#!/usr/bin/env python
# blogware - a python blogging system
# Copyright (C) 2016-2017 izrik
#
# This file is a part of blogware.
#
# Blogware is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Blogware is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with blogware. If not, see <http://www.gnu.org/licenses/>.
import argparse
from os import environ
import MySQLdb
class Config(object):
PASSWORD = environ.get('PASSWORD')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('host', type=str, action='store')
parser.add_argument('user', type=str, action='store')
parser.add_argument('--password', type=str, action='store',
default=Config.PASSWORD)
parser.add_argument('db', type=str, action='store')
parser.add_argument('--ssl-cert', type=str, action='store')
parser.add_argument('--ssl-key', type=str, action='store')
args = parser.parse_args()
Config.PASSWORD = args.password
def run():
print('DB URI: {}'.format(Config.DB_URI))
conn = MySQLdb.connect(
host=args.host,
user=args.user,
passwd=Config.PASSWORD,
db=args.db,
)
if __name__ == "__main__":
run()
|
Include a simple mysql connection diagnostic tool.#!/usr/bin/env python
# blogware - a python blogging system
# Copyright (C) 2016-2017 izrik
#
# This file is a part of blogware.
#
# Blogware is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Blogware is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with blogware. If not, see <http://www.gnu.org/licenses/>.
import argparse
from os import environ
import MySQLdb
class Config(object):
PASSWORD = environ.get('PASSWORD')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('host', type=str, action='store')
parser.add_argument('user', type=str, action='store')
parser.add_argument('--password', type=str, action='store',
default=Config.PASSWORD)
parser.add_argument('db', type=str, action='store')
parser.add_argument('--ssl-cert', type=str, action='store')
parser.add_argument('--ssl-key', type=str, action='store')
args = parser.parse_args()
Config.PASSWORD = args.password
def run():
print('DB URI: {}'.format(Config.DB_URI))
conn = MySQLdb.connect(
host=args.host,
user=args.user,
passwd=Config.PASSWORD,
db=args.db,
)
if __name__ == "__main__":
run()
|
<commit_before><commit_msg>Include a simple mysql connection diagnostic tool.<commit_after>#!/usr/bin/env python
# blogware - a python blogging system
# Copyright (C) 2016-2017 izrik
#
# This file is a part of blogware.
#
# Blogware is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Blogware is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with blogware. If not, see <http://www.gnu.org/licenses/>.
import argparse
from os import environ
import MySQLdb
class Config(object):
PASSWORD = environ.get('PASSWORD')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('host', type=str, action='store')
parser.add_argument('user', type=str, action='store')
parser.add_argument('--password', type=str, action='store',
default=Config.PASSWORD)
parser.add_argument('db', type=str, action='store')
parser.add_argument('--ssl-cert', type=str, action='store')
parser.add_argument('--ssl-key', type=str, action='store')
args = parser.parse_args()
Config.PASSWORD = args.password
def run():
print('DB URI: {}'.format(Config.DB_URI))
conn = MySQLdb.connect(
host=args.host,
user=args.user,
passwd=Config.PASSWORD,
db=args.db,
)
if __name__ == "__main__":
run()
|
|
8486d506ac619b2bb6caceb435c20047d7398603
|
stacked_generalization/example/joblibed_classification.py
|
stacked_generalization/example/joblibed_classification.py
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.validation import check_random_state
from stacked_generalization.lib.joblibed import JoblibedClassifier
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# Joblibed model
rf = RandomForestClassifier(n_estimators=40,
criterion='gini',
random_state=1)
clf = JoblibedClassifier(rf, "rf")
train_idx, test_idx = list(StratifiedKFold(iris.target, 3))[0]
xs_train = iris.data[train_idx]
y_train = iris.target[train_idx]
xs_test = iris.data[test_idx]
y_test = iris.target[test_idx]
print("First fit and prediction (not cached).")
clf.fit(xs_train, y_train, train_idx)
score = clf.score(xs_test, y_test, test_idx)
print('Classfier score: {0}'.format(score))
print("Second fit and prediction (load cache).")
clf.fit(xs_train, y_train, train_idx)
score = clf.score(xs_test, y_test, test_idx)
print('Classfier score: {0}'.format(score))
|
Add example of joblibed classifier
|
Add example of joblibed classifier
|
Python
|
apache-2.0
|
fukatani/stacked_generalization
|
Add example of joblibed classifier
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.validation import check_random_state
from stacked_generalization.lib.joblibed import JoblibedClassifier
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# Joblibed model
rf = RandomForestClassifier(n_estimators=40,
criterion='gini',
random_state=1)
clf = JoblibedClassifier(rf, "rf")
train_idx, test_idx = list(StratifiedKFold(iris.target, 3))[0]
xs_train = iris.data[train_idx]
y_train = iris.target[train_idx]
xs_test = iris.data[test_idx]
y_test = iris.target[test_idx]
print("First fit and prediction (not cached).")
clf.fit(xs_train, y_train, train_idx)
score = clf.score(xs_test, y_test, test_idx)
print('Classfier score: {0}'.format(score))
print("Second fit and prediction (load cache).")
clf.fit(xs_train, y_train, train_idx)
score = clf.score(xs_test, y_test, test_idx)
print('Classfier score: {0}'.format(score))
|
<commit_before><commit_msg>Add example of joblibed classifier<commit_after>
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.validation import check_random_state
from stacked_generalization.lib.joblibed import JoblibedClassifier
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# Joblibed model
rf = RandomForestClassifier(n_estimators=40,
criterion='gini',
random_state=1)
clf = JoblibedClassifier(rf, "rf")
train_idx, test_idx = list(StratifiedKFold(iris.target, 3))[0]
xs_train = iris.data[train_idx]
y_train = iris.target[train_idx]
xs_test = iris.data[test_idx]
y_test = iris.target[test_idx]
print("First fit and prediction (not cached).")
clf.fit(xs_train, y_train, train_idx)
score = clf.score(xs_test, y_test, test_idx)
print('Classfier score: {0}'.format(score))
print("Second fit and prediction (load cache).")
clf.fit(xs_train, y_train, train_idx)
score = clf.score(xs_test, y_test, test_idx)
print('Classfier score: {0}'.format(score))
|
Add example of joblibed classifierimport os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.validation import check_random_state
from stacked_generalization.lib.joblibed import JoblibedClassifier
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# Joblibed model
rf = RandomForestClassifier(n_estimators=40,
criterion='gini',
random_state=1)
clf = JoblibedClassifier(rf, "rf")
train_idx, test_idx = list(StratifiedKFold(iris.target, 3))[0]
xs_train = iris.data[train_idx]
y_train = iris.target[train_idx]
xs_test = iris.data[test_idx]
y_test = iris.target[test_idx]
print("First fit and prediction (not cached).")
clf.fit(xs_train, y_train, train_idx)
score = clf.score(xs_test, y_test, test_idx)
print('Classfier score: {0}'.format(score))
print("Second fit and prediction (load cache).")
clf.fit(xs_train, y_train, train_idx)
score = clf.score(xs_test, y_test, test_idx)
print('Classfier score: {0}'.format(score))
|
<commit_before><commit_msg>Add example of joblibed classifier<commit_after>import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.validation import check_random_state
from stacked_generalization.lib.joblibed import JoblibedClassifier
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# Joblibed model
rf = RandomForestClassifier(n_estimators=40,
criterion='gini',
random_state=1)
clf = JoblibedClassifier(rf, "rf")
train_idx, test_idx = list(StratifiedKFold(iris.target, 3))[0]
xs_train = iris.data[train_idx]
y_train = iris.target[train_idx]
xs_test = iris.data[test_idx]
y_test = iris.target[test_idx]
print("First fit and prediction (not cached).")
clf.fit(xs_train, y_train, train_idx)
score = clf.score(xs_test, y_test, test_idx)
print('Classfier score: {0}'.format(score))
print("Second fit and prediction (load cache).")
clf.fit(xs_train, y_train, train_idx)
score = clf.score(xs_test, y_test, test_idx)
print('Classfier score: {0}'.format(score))
|
|
bdbbddf86a6137c1edf96ab8505611a4fc2ab30e
|
sunbrella.py
|
sunbrella.py
|
import urllib2
import json
import yaml
from os.path import expanduser, isfile
import datetime
from dateutil import parser
from pytz import reference
CACHE_PATH = expanduser('~/.sunbrella_cache.json')
def get_cache(api_key, latitude, longitude):
if isfile(CACHE_PATH):
try:
cache = json.load(open(CACHE_PATH))
except:
return None, None
data = cache['data']
headers = cache['headers']
####
return data, headers
if latitude == data['latitude'] and longitude == data['longitude']:
now = datetime.datetime.now(tz=reference.LocalTimezone())
if now < parser.parse(headers['expires']):
return data, headers
return None, None
def get_weather(api_key, latitude, longitude, cache_expiry=None):
data, headers = get_cache(api_key, latitude, longitude)
if data is not None and headers is not None:
return data, headers
url = 'https://api.forecast.io/forecast/%s/%s,%s' % (
api_key, latitude, longitude)
response = urllib2.urlopen(url)
headers = dict(response.info())
data = json.loads(response.read())
json.dump({'data': data, 'headers': headers}, open(CACHE_PATH, 'w'))
return data, headers
if __name__ == '__main__':
if isfile('config.yaml'):
config = yaml.load(open('config.yaml'))
elif isfile('.sunbrella.yaml'):
config = yaml.load(open('.sunbrella.yaml'))
else:
raise Exception('No config file found')
data, headers = get_weather(**config)
|
Make working cached API call
|
Make working cached API call
Signed-off-by: Adam Obeng <64b2b6d12bfe4baae7dad3d018f8cbf6b0e7a044@binaryeagle.com>
|
Python
|
mit
|
adamobeng/sunbrella
|
Make working cached API call
Signed-off-by: Adam Obeng <64b2b6d12bfe4baae7dad3d018f8cbf6b0e7a044@binaryeagle.com>
|
import urllib2
import json
import yaml
from os.path import expanduser, isfile
import datetime
from dateutil import parser
from pytz import reference
CACHE_PATH = expanduser('~/.sunbrella_cache.json')
def get_cache(api_key, latitude, longitude):
if isfile(CACHE_PATH):
try:
cache = json.load(open(CACHE_PATH))
except:
return None, None
data = cache['data']
headers = cache['headers']
####
return data, headers
if latitude == data['latitude'] and longitude == data['longitude']:
now = datetime.datetime.now(tz=reference.LocalTimezone())
if now < parser.parse(headers['expires']):
return data, headers
return None, None
def get_weather(api_key, latitude, longitude, cache_expiry=None):
data, headers = get_cache(api_key, latitude, longitude)
if data is not None and headers is not None:
return data, headers
url = 'https://api.forecast.io/forecast/%s/%s,%s' % (
api_key, latitude, longitude)
response = urllib2.urlopen(url)
headers = dict(response.info())
data = json.loads(response.read())
json.dump({'data': data, 'headers': headers}, open(CACHE_PATH, 'w'))
return data, headers
if __name__ == '__main__':
if isfile('config.yaml'):
config = yaml.load(open('config.yaml'))
elif isfile('.sunbrella.yaml'):
config = yaml.load(open('.sunbrella.yaml'))
else:
raise Exception('No config file found')
data, headers = get_weather(**config)
|
<commit_before><commit_msg>Make working cached API call
Signed-off-by: Adam Obeng <64b2b6d12bfe4baae7dad3d018f8cbf6b0e7a044@binaryeagle.com><commit_after>
|
import urllib2
import json
import yaml
from os.path import expanduser, isfile
import datetime
from dateutil import parser
from pytz import reference
CACHE_PATH = expanduser('~/.sunbrella_cache.json')
def get_cache(api_key, latitude, longitude):
if isfile(CACHE_PATH):
try:
cache = json.load(open(CACHE_PATH))
except:
return None, None
data = cache['data']
headers = cache['headers']
####
return data, headers
if latitude == data['latitude'] and longitude == data['longitude']:
now = datetime.datetime.now(tz=reference.LocalTimezone())
if now < parser.parse(headers['expires']):
return data, headers
return None, None
def get_weather(api_key, latitude, longitude, cache_expiry=None):
data, headers = get_cache(api_key, latitude, longitude)
if data is not None and headers is not None:
return data, headers
url = 'https://api.forecast.io/forecast/%s/%s,%s' % (
api_key, latitude, longitude)
response = urllib2.urlopen(url)
headers = dict(response.info())
data = json.loads(response.read())
json.dump({'data': data, 'headers': headers}, open(CACHE_PATH, 'w'))
return data, headers
if __name__ == '__main__':
if isfile('config.yaml'):
config = yaml.load(open('config.yaml'))
elif isfile('.sunbrella.yaml'):
config = yaml.load(open('.sunbrella.yaml'))
else:
raise Exception('No config file found')
data, headers = get_weather(**config)
|
Make working cached API call
Signed-off-by: Adam Obeng <64b2b6d12bfe4baae7dad3d018f8cbf6b0e7a044@binaryeagle.com>import urllib2
import json
import yaml
from os.path import expanduser, isfile
import datetime
from dateutil import parser
from pytz import reference
CACHE_PATH = expanduser('~/.sunbrella_cache.json')
def get_cache(api_key, latitude, longitude):
if isfile(CACHE_PATH):
try:
cache = json.load(open(CACHE_PATH))
except:
return None, None
data = cache['data']
headers = cache['headers']
####
return data, headers
if latitude == data['latitude'] and longitude == data['longitude']:
now = datetime.datetime.now(tz=reference.LocalTimezone())
if now < parser.parse(headers['expires']):
return data, headers
return None, None
def get_weather(api_key, latitude, longitude, cache_expiry=None):
data, headers = get_cache(api_key, latitude, longitude)
if data is not None and headers is not None:
return data, headers
url = 'https://api.forecast.io/forecast/%s/%s,%s' % (
api_key, latitude, longitude)
response = urllib2.urlopen(url)
headers = dict(response.info())
data = json.loads(response.read())
json.dump({'data': data, 'headers': headers}, open(CACHE_PATH, 'w'))
return data, headers
if __name__ == '__main__':
if isfile('config.yaml'):
config = yaml.load(open('config.yaml'))
elif isfile('.sunbrella.yaml'):
config = yaml.load(open('.sunbrella.yaml'))
else:
raise Exception('No config file found')
data, headers = get_weather(**config)
|
<commit_before><commit_msg>Make working cached API call
Signed-off-by: Adam Obeng <64b2b6d12bfe4baae7dad3d018f8cbf6b0e7a044@binaryeagle.com><commit_after>import urllib2
import json
import yaml
from os.path import expanduser, isfile
import datetime
from dateutil import parser
from pytz import reference
CACHE_PATH = expanduser('~/.sunbrella_cache.json')
def get_cache(api_key, latitude, longitude):
if isfile(CACHE_PATH):
try:
cache = json.load(open(CACHE_PATH))
except:
return None, None
data = cache['data']
headers = cache['headers']
####
return data, headers
if latitude == data['latitude'] and longitude == data['longitude']:
now = datetime.datetime.now(tz=reference.LocalTimezone())
if now < parser.parse(headers['expires']):
return data, headers
return None, None
def get_weather(api_key, latitude, longitude, cache_expiry=None):
data, headers = get_cache(api_key, latitude, longitude)
if data is not None and headers is not None:
return data, headers
url = 'https://api.forecast.io/forecast/%s/%s,%s' % (
api_key, latitude, longitude)
response = urllib2.urlopen(url)
headers = dict(response.info())
data = json.loads(response.read())
json.dump({'data': data, 'headers': headers}, open(CACHE_PATH, 'w'))
return data, headers
if __name__ == '__main__':
if isfile('config.yaml'):
config = yaml.load(open('config.yaml'))
elif isfile('.sunbrella.yaml'):
config = yaml.load(open('.sunbrella.yaml'))
else:
raise Exception('No config file found')
data, headers = get_weather(**config)
|
|
82da46fd16b89839036195ce862167f98c880f1f
|
github2fedmsg/custom_openid.py
|
github2fedmsg/custom_openid.py
|
import velruse.api
import velruse.providers.openid as vr
from pyramid.security import NO_PERMISSION_REQUIRED
def add_openid_login(config, realm, identity_provider):
provider = SingleOpenIDConsumer(
'openid', 'openid',
realm=realm,
identity_provider=identity_provider,
storage=None,
)
login_path='/login/openid'
callback_path='/login/openid/callback'
config.add_route(provider.login_route, login_path)
config.add_view(provider, attr='login', route_name=provider.login_route,
permission=NO_PERMISSION_REQUIRED)
config.add_route(provider.callback_route, callback_path,
use_global_views=True,
factory=provider.callback)
velruse.api.register_provider(config, 'openid', provider)
class SingleOpenIDConsumer(vr.OpenIDConsumer):
def __init__(self,
name,
_type,
realm=None,
identity_provider=None,
storage=None,
context=vr.OpenIDAuthenticationComplete):
super(SingleOpenIDConsumer, self).__init__(
name, _type, realm, storage, context)
self.identity_provider = identity_provider
def _lookup_identifier(self, request, url):
return self.identity_provider
|
Add forgotten custom openid plugin to velruse.
|
Add forgotten custom openid plugin to velruse.
|
Python
|
agpl-3.0
|
pombredanne/github2fedmsg,fedora-infra/github2fedmsg,pombredanne/github2fedmsg,fedora-infra/github2fedmsg
|
Add forgotten custom openid plugin to velruse.
|
import velruse.api
import velruse.providers.openid as vr
from pyramid.security import NO_PERMISSION_REQUIRED
def add_openid_login(config, realm, identity_provider):
provider = SingleOpenIDConsumer(
'openid', 'openid',
realm=realm,
identity_provider=identity_provider,
storage=None,
)
login_path='/login/openid'
callback_path='/login/openid/callback'
config.add_route(provider.login_route, login_path)
config.add_view(provider, attr='login', route_name=provider.login_route,
permission=NO_PERMISSION_REQUIRED)
config.add_route(provider.callback_route, callback_path,
use_global_views=True,
factory=provider.callback)
velruse.api.register_provider(config, 'openid', provider)
class SingleOpenIDConsumer(vr.OpenIDConsumer):
def __init__(self,
name,
_type,
realm=None,
identity_provider=None,
storage=None,
context=vr.OpenIDAuthenticationComplete):
super(SingleOpenIDConsumer, self).__init__(
name, _type, realm, storage, context)
self.identity_provider = identity_provider
def _lookup_identifier(self, request, url):
return self.identity_provider
|
<commit_before><commit_msg>Add forgotten custom openid plugin to velruse.<commit_after>
|
import velruse.api
import velruse.providers.openid as vr
from pyramid.security import NO_PERMISSION_REQUIRED
def add_openid_login(config, realm, identity_provider):
provider = SingleOpenIDConsumer(
'openid', 'openid',
realm=realm,
identity_provider=identity_provider,
storage=None,
)
login_path='/login/openid'
callback_path='/login/openid/callback'
config.add_route(provider.login_route, login_path)
config.add_view(provider, attr='login', route_name=provider.login_route,
permission=NO_PERMISSION_REQUIRED)
config.add_route(provider.callback_route, callback_path,
use_global_views=True,
factory=provider.callback)
velruse.api.register_provider(config, 'openid', provider)
class SingleOpenIDConsumer(vr.OpenIDConsumer):
def __init__(self,
name,
_type,
realm=None,
identity_provider=None,
storage=None,
context=vr.OpenIDAuthenticationComplete):
super(SingleOpenIDConsumer, self).__init__(
name, _type, realm, storage, context)
self.identity_provider = identity_provider
def _lookup_identifier(self, request, url):
return self.identity_provider
|
Add forgotten custom openid plugin to velruse.import velruse.api
import velruse.providers.openid as vr
from pyramid.security import NO_PERMISSION_REQUIRED
def add_openid_login(config, realm, identity_provider):
provider = SingleOpenIDConsumer(
'openid', 'openid',
realm=realm,
identity_provider=identity_provider,
storage=None,
)
login_path='/login/openid'
callback_path='/login/openid/callback'
config.add_route(provider.login_route, login_path)
config.add_view(provider, attr='login', route_name=provider.login_route,
permission=NO_PERMISSION_REQUIRED)
config.add_route(provider.callback_route, callback_path,
use_global_views=True,
factory=provider.callback)
velruse.api.register_provider(config, 'openid', provider)
class SingleOpenIDConsumer(vr.OpenIDConsumer):
def __init__(self,
name,
_type,
realm=None,
identity_provider=None,
storage=None,
context=vr.OpenIDAuthenticationComplete):
super(SingleOpenIDConsumer, self).__init__(
name, _type, realm, storage, context)
self.identity_provider = identity_provider
def _lookup_identifier(self, request, url):
return self.identity_provider
|
<commit_before><commit_msg>Add forgotten custom openid plugin to velruse.<commit_after>import velruse.api
import velruse.providers.openid as vr
from pyramid.security import NO_PERMISSION_REQUIRED
def add_openid_login(config, realm, identity_provider):
provider = SingleOpenIDConsumer(
'openid', 'openid',
realm=realm,
identity_provider=identity_provider,
storage=None,
)
login_path='/login/openid'
callback_path='/login/openid/callback'
config.add_route(provider.login_route, login_path)
config.add_view(provider, attr='login', route_name=provider.login_route,
permission=NO_PERMISSION_REQUIRED)
config.add_route(provider.callback_route, callback_path,
use_global_views=True,
factory=provider.callback)
velruse.api.register_provider(config, 'openid', provider)
class SingleOpenIDConsumer(vr.OpenIDConsumer):
def __init__(self,
name,
_type,
realm=None,
identity_provider=None,
storage=None,
context=vr.OpenIDAuthenticationComplete):
super(SingleOpenIDConsumer, self).__init__(
name, _type, realm, storage, context)
self.identity_provider = identity_provider
def _lookup_identifier(self, request, url):
return self.identity_provider
|
|
d5b155f51eb4b204e5812e8339579e9d91066103
|
configstore/tests/test_docker_secret.py
|
configstore/tests/test_docker_secret.py
|
import os
from unittest import TestCase
try:
from unittest import mock
except ImportError:
import mock
from configstore.backends.docker_secret import DockerSecretBackend
from .test_data import DEFAULT_KEY, DEFAULT_VALUE, CUSTOM_PATH
class TestDockerSecretBackend(TestCase):
def test_get_secret(self):
mocked_open = mock.mock_open(read_data=DEFAULT_VALUE)
with mock.patch('configstore.backends.docker_secret.open',
mocked_open,
create=True):
d = DockerSecretBackend()
val = d.get_config(DEFAULT_KEY)
self.assertEqual(DEFAULT_VALUE, val)
def test_secrets_path(self):
mocked_open = mock.MagicMock()
with mock.patch('configstore.backends.docker_secret.open',
mocked_open,
create=True):
d = DockerSecretBackend(CUSTOM_PATH)
d.get_config(DEFAULT_KEY)
mocked_open.assert_called_once_with(
os.path.join(CUSTOM_PATH, DEFAULT_KEY)
)
|
Add unittests for docker_secret backend
|
Add unittests for docker_secret backend
|
Python
|
mit
|
caravancoop/configstore
|
Add unittests for docker_secret backend
|
import os
from unittest import TestCase
try:
from unittest import mock
except ImportError:
import mock
from configstore.backends.docker_secret import DockerSecretBackend
from .test_data import DEFAULT_KEY, DEFAULT_VALUE, CUSTOM_PATH
class TestDockerSecretBackend(TestCase):
def test_get_secret(self):
mocked_open = mock.mock_open(read_data=DEFAULT_VALUE)
with mock.patch('configstore.backends.docker_secret.open',
mocked_open,
create=True):
d = DockerSecretBackend()
val = d.get_config(DEFAULT_KEY)
self.assertEqual(DEFAULT_VALUE, val)
def test_secrets_path(self):
mocked_open = mock.MagicMock()
with mock.patch('configstore.backends.docker_secret.open',
mocked_open,
create=True):
d = DockerSecretBackend(CUSTOM_PATH)
d.get_config(DEFAULT_KEY)
mocked_open.assert_called_once_with(
os.path.join(CUSTOM_PATH, DEFAULT_KEY)
)
|
<commit_before><commit_msg>Add unittests for docker_secret backend<commit_after>
|
import os
from unittest import TestCase
try:
from unittest import mock
except ImportError:
import mock
from configstore.backends.docker_secret import DockerSecretBackend
from .test_data import DEFAULT_KEY, DEFAULT_VALUE, CUSTOM_PATH
class TestDockerSecretBackend(TestCase):
def test_get_secret(self):
mocked_open = mock.mock_open(read_data=DEFAULT_VALUE)
with mock.patch('configstore.backends.docker_secret.open',
mocked_open,
create=True):
d = DockerSecretBackend()
val = d.get_config(DEFAULT_KEY)
self.assertEqual(DEFAULT_VALUE, val)
def test_secrets_path(self):
mocked_open = mock.MagicMock()
with mock.patch('configstore.backends.docker_secret.open',
mocked_open,
create=True):
d = DockerSecretBackend(CUSTOM_PATH)
d.get_config(DEFAULT_KEY)
mocked_open.assert_called_once_with(
os.path.join(CUSTOM_PATH, DEFAULT_KEY)
)
|
Add unittests for docker_secret backendimport os
from unittest import TestCase
try:
from unittest import mock
except ImportError:
import mock
from configstore.backends.docker_secret import DockerSecretBackend
from .test_data import DEFAULT_KEY, DEFAULT_VALUE, CUSTOM_PATH
class TestDockerSecretBackend(TestCase):
def test_get_secret(self):
mocked_open = mock.mock_open(read_data=DEFAULT_VALUE)
with mock.patch('configstore.backends.docker_secret.open',
mocked_open,
create=True):
d = DockerSecretBackend()
val = d.get_config(DEFAULT_KEY)
self.assertEqual(DEFAULT_VALUE, val)
def test_secrets_path(self):
mocked_open = mock.MagicMock()
with mock.patch('configstore.backends.docker_secret.open',
mocked_open,
create=True):
d = DockerSecretBackend(CUSTOM_PATH)
d.get_config(DEFAULT_KEY)
mocked_open.assert_called_once_with(
os.path.join(CUSTOM_PATH, DEFAULT_KEY)
)
|
<commit_before><commit_msg>Add unittests for docker_secret backend<commit_after>import os
from unittest import TestCase
try:
from unittest import mock
except ImportError:
import mock
from configstore.backends.docker_secret import DockerSecretBackend
from .test_data import DEFAULT_KEY, DEFAULT_VALUE, CUSTOM_PATH
class TestDockerSecretBackend(TestCase):
def test_get_secret(self):
mocked_open = mock.mock_open(read_data=DEFAULT_VALUE)
with mock.patch('configstore.backends.docker_secret.open',
mocked_open,
create=True):
d = DockerSecretBackend()
val = d.get_config(DEFAULT_KEY)
self.assertEqual(DEFAULT_VALUE, val)
def test_secrets_path(self):
mocked_open = mock.MagicMock()
with mock.patch('configstore.backends.docker_secret.open',
mocked_open,
create=True):
d = DockerSecretBackend(CUSTOM_PATH)
d.get_config(DEFAULT_KEY)
mocked_open.assert_called_once_with(
os.path.join(CUSTOM_PATH, DEFAULT_KEY)
)
|
|
bcb30dbad2f86781ddd1ee9f38b16c4d68ac57da
|
maxwellbloch/ob_solve.py
|
maxwellbloch/ob_solve.py
|
# -*- coding: utf-8 -*-
import os
import sys
import json
import qutip as qu
from maxwellbloch import ob_atom
# Main
class OBSolve(object):
"""docstring for OBSolve"""
def __init__(self, ob_atom={}, t_min=0.0, t_max=1.0, t_steps=100,
method='mesolve', opts={}):
self.build_ob_atom(ob_atom)
self.build_tlist(t_min, t_max, t_steps)
self.method = method
self.build_opts(opts)
def __repr__(self):
return ("OBSolve(ob_atom={0}, " +
"t_min={1}, " +
"t_max={2}, " +
"t_steps={3}, " +
"method={4}, " +
"opts={5})").format(self.ob_atom,
self.t_min,
self.t_max,
self.t_steps,
self.method,
self.opts)
def build_ob_atom(self, ob_atom_dict):
self.ob_atom = ob_atom.OBAtom(**ob_atom_dict)
return self.ob_atom
def build_tlist(self, t_min, t_max, t_steps):
from numpy import linspace
self.t_min=t_min
self.t_max=t_max
self.t_steps=t_steps
self.tlist = linspace(t_min, t_max, t_steps+1)
return self.tlist
def build_opts(self, opts):
self.opts = qu.Options()
return self.opts
def solve(self, method=self.method, rho0=None, e_ops=[], opts=self.opts,
recalc=True, savefile=None, show_pbar=False):
if method == 'mesolve':
ob_obj.mesolve(self.tlist, rho0=rho0, e_ops=e_ops,
opts=qu.Options(), recalc=True, savefile=None,
show_pbar=False)
@classmethod
def from_json_str(cls, json_str):
json_dict = json.loads(json_str)
return cls(**json_dict)
@classmethod
def from_json(cls, file_path):
with open(file_path) as json_file:
json_dict = json.load(json_file)
return cls(**json_dict)
def main():
print(OBSolve())
if __name__ == '__main__':
status = main()
sys.exit(status)
|
Add OBSolve class and from_json methods
|
Add OBSolve class and from_json methods
|
Python
|
mit
|
tommyogden/maxwellbloch,tommyogden/maxwellbloch
|
Add OBSolve class and from_json methods
|
# -*- coding: utf-8 -*-
import os
import sys
import json
import qutip as qu
from maxwellbloch import ob_atom
# Main
class OBSolve(object):
"""docstring for OBSolve"""
def __init__(self, ob_atom={}, t_min=0.0, t_max=1.0, t_steps=100,
method='mesolve', opts={}):
self.build_ob_atom(ob_atom)
self.build_tlist(t_min, t_max, t_steps)
self.method = method
self.build_opts(opts)
def __repr__(self):
return ("OBSolve(ob_atom={0}, " +
"t_min={1}, " +
"t_max={2}, " +
"t_steps={3}, " +
"method={4}, " +
"opts={5})").format(self.ob_atom,
self.t_min,
self.t_max,
self.t_steps,
self.method,
self.opts)
def build_ob_atom(self, ob_atom_dict):
self.ob_atom = ob_atom.OBAtom(**ob_atom_dict)
return self.ob_atom
def build_tlist(self, t_min, t_max, t_steps):
from numpy import linspace
self.t_min=t_min
self.t_max=t_max
self.t_steps=t_steps
self.tlist = linspace(t_min, t_max, t_steps+1)
return self.tlist
def build_opts(self, opts):
self.opts = qu.Options()
return self.opts
def solve(self, method=self.method, rho0=None, e_ops=[], opts=self.opts,
recalc=True, savefile=None, show_pbar=False):
if method == 'mesolve':
ob_obj.mesolve(self.tlist, rho0=rho0, e_ops=e_ops,
opts=qu.Options(), recalc=True, savefile=None,
show_pbar=False)
@classmethod
def from_json_str(cls, json_str):
json_dict = json.loads(json_str)
return cls(**json_dict)
@classmethod
def from_json(cls, file_path):
with open(file_path) as json_file:
json_dict = json.load(json_file)
return cls(**json_dict)
def main():
print(OBSolve())
if __name__ == '__main__':
status = main()
sys.exit(status)
|
<commit_before><commit_msg>Add OBSolve class and from_json methods<commit_after>
|
# -*- coding: utf-8 -*-
import os
import sys
import json
import qutip as qu
from maxwellbloch import ob_atom
# Main
class OBSolve(object):
"""docstring for OBSolve"""
def __init__(self, ob_atom={}, t_min=0.0, t_max=1.0, t_steps=100,
method='mesolve', opts={}):
self.build_ob_atom(ob_atom)
self.build_tlist(t_min, t_max, t_steps)
self.method = method
self.build_opts(opts)
def __repr__(self):
return ("OBSolve(ob_atom={0}, " +
"t_min={1}, " +
"t_max={2}, " +
"t_steps={3}, " +
"method={4}, " +
"opts={5})").format(self.ob_atom,
self.t_min,
self.t_max,
self.t_steps,
self.method,
self.opts)
def build_ob_atom(self, ob_atom_dict):
self.ob_atom = ob_atom.OBAtom(**ob_atom_dict)
return self.ob_atom
def build_tlist(self, t_min, t_max, t_steps):
from numpy import linspace
self.t_min=t_min
self.t_max=t_max
self.t_steps=t_steps
self.tlist = linspace(t_min, t_max, t_steps+1)
return self.tlist
def build_opts(self, opts):
self.opts = qu.Options()
return self.opts
def solve(self, method=self.method, rho0=None, e_ops=[], opts=self.opts,
recalc=True, savefile=None, show_pbar=False):
if method == 'mesolve':
ob_obj.mesolve(self.tlist, rho0=rho0, e_ops=e_ops,
opts=qu.Options(), recalc=True, savefile=None,
show_pbar=False)
@classmethod
def from_json_str(cls, json_str):
json_dict = json.loads(json_str)
return cls(**json_dict)
@classmethod
def from_json(cls, file_path):
with open(file_path) as json_file:
json_dict = json.load(json_file)
return cls(**json_dict)
def main():
print(OBSolve())
if __name__ == '__main__':
status = main()
sys.exit(status)
|
Add OBSolve class and from_json methods# -*- coding: utf-8 -*-
import os
import sys
import json
import qutip as qu
from maxwellbloch import ob_atom
# Main
class OBSolve(object):
"""docstring for OBSolve"""
def __init__(self, ob_atom={}, t_min=0.0, t_max=1.0, t_steps=100,
method='mesolve', opts={}):
self.build_ob_atom(ob_atom)
self.build_tlist(t_min, t_max, t_steps)
self.method = method
self.build_opts(opts)
def __repr__(self):
return ("OBSolve(ob_atom={0}, " +
"t_min={1}, " +
"t_max={2}, " +
"t_steps={3}, " +
"method={4}, " +
"opts={5})").format(self.ob_atom,
self.t_min,
self.t_max,
self.t_steps,
self.method,
self.opts)
def build_ob_atom(self, ob_atom_dict):
self.ob_atom = ob_atom.OBAtom(**ob_atom_dict)
return self.ob_atom
def build_tlist(self, t_min, t_max, t_steps):
from numpy import linspace
self.t_min=t_min
self.t_max=t_max
self.t_steps=t_steps
self.tlist = linspace(t_min, t_max, t_steps+1)
return self.tlist
def build_opts(self, opts):
self.opts = qu.Options()
return self.opts
def solve(self, method=self.method, rho0=None, e_ops=[], opts=self.opts,
recalc=True, savefile=None, show_pbar=False):
if method == 'mesolve':
ob_obj.mesolve(self.tlist, rho0=rho0, e_ops=e_ops,
opts=qu.Options(), recalc=True, savefile=None,
show_pbar=False)
@classmethod
def from_json_str(cls, json_str):
json_dict = json.loads(json_str)
return cls(**json_dict)
@classmethod
def from_json(cls, file_path):
with open(file_path) as json_file:
json_dict = json.load(json_file)
return cls(**json_dict)
def main():
print(OBSolve())
if __name__ == '__main__':
status = main()
sys.exit(status)
|
<commit_before><commit_msg>Add OBSolve class and from_json methods<commit_after># -*- coding: utf-8 -*-
import os
import sys
import json
import qutip as qu
from maxwellbloch import ob_atom
# Main
class OBSolve(object):
"""docstring for OBSolve"""
def __init__(self, ob_atom={}, t_min=0.0, t_max=1.0, t_steps=100,
method='mesolve', opts={}):
self.build_ob_atom(ob_atom)
self.build_tlist(t_min, t_max, t_steps)
self.method = method
self.build_opts(opts)
def __repr__(self):
return ("OBSolve(ob_atom={0}, " +
"t_min={1}, " +
"t_max={2}, " +
"t_steps={3}, " +
"method={4}, " +
"opts={5})").format(self.ob_atom,
self.t_min,
self.t_max,
self.t_steps,
self.method,
self.opts)
def build_ob_atom(self, ob_atom_dict):
self.ob_atom = ob_atom.OBAtom(**ob_atom_dict)
return self.ob_atom
def build_tlist(self, t_min, t_max, t_steps):
from numpy import linspace
self.t_min=t_min
self.t_max=t_max
self.t_steps=t_steps
self.tlist = linspace(t_min, t_max, t_steps+1)
return self.tlist
def build_opts(self, opts):
self.opts = qu.Options()
return self.opts
def solve(self, method=self.method, rho0=None, e_ops=[], opts=self.opts,
recalc=True, savefile=None, show_pbar=False):
if method == 'mesolve':
ob_obj.mesolve(self.tlist, rho0=rho0, e_ops=e_ops,
opts=qu.Options(), recalc=True, savefile=None,
show_pbar=False)
@classmethod
def from_json_str(cls, json_str):
json_dict = json.loads(json_str)
return cls(**json_dict)
@classmethod
def from_json(cls, file_path):
with open(file_path) as json_file:
json_dict = json.load(json_file)
return cls(**json_dict)
def main():
print(OBSolve())
if __name__ == '__main__':
status = main()
sys.exit(status)
|
|
1e3853c231facac6e590b66de8d9c2cc37e7f540
|
download_and_unzip_files.py
|
download_and_unzip_files.py
|
import os
import datetime
current_year = datetime.datetime.now().year
years_with_data = range(2011, current_year + 1)
remote_path = "https://ssl.netfile.com/pub2/excel/COAKBrowsable/"
for year in years_with_data:
print "Downloading " + str(year) + " data..."
filename_for_year = "efile_newest_COAK_" + str(year) + ".zip"
os.system("wget " + remote_path + filename_for_year)
os.system("unzip " + filename_for_year)
os.system("rm " + filename_for_year)
|
Add script to download and unzip files for all years
|
Add script to download and unzip files for all years
|
Python
|
bsd-3-clause
|
daguar/netfile-etl,daguar/netfile-etl
|
Add script to download and unzip files for all years
|
import os
import datetime
current_year = datetime.datetime.now().year
years_with_data = range(2011, current_year + 1)
remote_path = "https://ssl.netfile.com/pub2/excel/COAKBrowsable/"
for year in years_with_data:
print "Downloading " + str(year) + " data..."
filename_for_year = "efile_newest_COAK_" + str(year) + ".zip"
os.system("wget " + remote_path + filename_for_year)
os.system("unzip " + filename_for_year)
os.system("rm " + filename_for_year)
|
<commit_before><commit_msg>Add script to download and unzip files for all years<commit_after>
|
import os
import datetime
current_year = datetime.datetime.now().year
years_with_data = range(2011, current_year + 1)
remote_path = "https://ssl.netfile.com/pub2/excel/COAKBrowsable/"
for year in years_with_data:
print "Downloading " + str(year) + " data..."
filename_for_year = "efile_newest_COAK_" + str(year) + ".zip"
os.system("wget " + remote_path + filename_for_year)
os.system("unzip " + filename_for_year)
os.system("rm " + filename_for_year)
|
Add script to download and unzip files for all yearsimport os
import datetime
current_year = datetime.datetime.now().year
years_with_data = range(2011, current_year + 1)
remote_path = "https://ssl.netfile.com/pub2/excel/COAKBrowsable/"
for year in years_with_data:
print "Downloading " + str(year) + " data..."
filename_for_year = "efile_newest_COAK_" + str(year) + ".zip"
os.system("wget " + remote_path + filename_for_year)
os.system("unzip " + filename_for_year)
os.system("rm " + filename_for_year)
|
<commit_before><commit_msg>Add script to download and unzip files for all years<commit_after>import os
import datetime
current_year = datetime.datetime.now().year
years_with_data = range(2011, current_year + 1)
remote_path = "https://ssl.netfile.com/pub2/excel/COAKBrowsable/"
for year in years_with_data:
print "Downloading " + str(year) + " data..."
filename_for_year = "efile_newest_COAK_" + str(year) + ".zip"
os.system("wget " + remote_path + filename_for_year)
os.system("unzip " + filename_for_year)
os.system("rm " + filename_for_year)
|
|
97074e1bf452f12821058c035aca15c692092074
|
src/circus/module/set_metrics.py
|
src/circus/module/set_metrics.py
|
#!/usr/bin/env python
__cmdname__ = 'set_metrics'
__cmdopts__ = ''
import re
import sys
import log
import util
class Module(object):
def __init__(self, api, account):
self.api = api
self.account = account
def command(self, opts, pattern, *metrics_to_enable):
"""Set the active metrics for a check based on regular expression
This command will set the enabled metrics to exactly what matches the
pattern(s) given. Any other metrics will be disabled, regardless of
what their original setting was.
Arguments:
pattern -- Pattern for checks
metrics_to_enable -- One or more regexes for enabled metrics
"""
checks, groups = util.find_checks(self.api, pattern)
to_enable = {}
# Pick only one check per check bundle
bundles = {}
for c in checks:
if c['bundle_id'] in bundles:
continue
bundles[c['bundle_id']] = c
log.msg("Retrieving metrics for checks")
count = 0
for c in bundles.values():
count += 1
print "\r%s/%s" % (count, len(bundles)),
sys.stdout.flush()
metrics = self.api.list_metrics(check_id=c['check_id'])
to_enable[c['check_id']] = []
for metric in sorted(metrics):
for pattern in metrics_to_enable:
if re.match(pattern, metric['name']):
to_enable[c['check_id']].append(metric['name'])
log.msg("About to set enabled metrics for the following checks")
for c in bundles.values():
log.msg(" %s (%s)" % (c['name'],
', '.join(to_enable[c['check_id']])))
if util.confirm():
for c in bundles.values():
# Enable metrics here
log.msgnb("%s..." % c['name'])
# The set of metrics has changed, apply the edit
self.api.edit_check_bundle(
bundle_id=c['bundle_id'],
metric_name=to_enable[c['check_id']])
log.msgnf("Done")
|
Set metrics command (regex based metric enabling)
|
Set metrics command (regex based metric enabling)
|
Python
|
isc
|
omniti-labs/circus,omniti-labs/circus
|
Set metrics command (regex based metric enabling)
|
#!/usr/bin/env python
__cmdname__ = 'set_metrics'
__cmdopts__ = ''
import re
import sys
import log
import util
class Module(object):
def __init__(self, api, account):
self.api = api
self.account = account
def command(self, opts, pattern, *metrics_to_enable):
"""Set the active metrics for a check based on regular expression
This command will set the enabled metrics to exactly what matches the
pattern(s) given. Any other metrics will be disabled, regardless of
what their original setting was.
Arguments:
pattern -- Pattern for checks
metrics_to_enable -- One or more regexes for enabled metrics
"""
checks, groups = util.find_checks(self.api, pattern)
to_enable = {}
# Pick only one check per check bundle
bundles = {}
for c in checks:
if c['bundle_id'] in bundles:
continue
bundles[c['bundle_id']] = c
log.msg("Retrieving metrics for checks")
count = 0
for c in bundles.values():
count += 1
print "\r%s/%s" % (count, len(bundles)),
sys.stdout.flush()
metrics = self.api.list_metrics(check_id=c['check_id'])
to_enable[c['check_id']] = []
for metric in sorted(metrics):
for pattern in metrics_to_enable:
if re.match(pattern, metric['name']):
to_enable[c['check_id']].append(metric['name'])
log.msg("About to set enabled metrics for the following checks")
for c in bundles.values():
log.msg(" %s (%s)" % (c['name'],
', '.join(to_enable[c['check_id']])))
if util.confirm():
for c in bundles.values():
# Enable metrics here
log.msgnb("%s..." % c['name'])
# The set of metrics has changed, apply the edit
self.api.edit_check_bundle(
bundle_id=c['bundle_id'],
metric_name=to_enable[c['check_id']])
log.msgnf("Done")
|
<commit_before><commit_msg>Set metrics command (regex based metric enabling)<commit_after>
|
#!/usr/bin/env python
__cmdname__ = 'set_metrics'
__cmdopts__ = ''
import re
import sys
import log
import util
class Module(object):
def __init__(self, api, account):
self.api = api
self.account = account
def command(self, opts, pattern, *metrics_to_enable):
"""Set the active metrics for a check based on regular expression
This command will set the enabled metrics to exactly what matches the
pattern(s) given. Any other metrics will be disabled, regardless of
what their original setting was.
Arguments:
pattern -- Pattern for checks
metrics_to_enable -- One or more regexes for enabled metrics
"""
checks, groups = util.find_checks(self.api, pattern)
to_enable = {}
# Pick only one check per check bundle
bundles = {}
for c in checks:
if c['bundle_id'] in bundles:
continue
bundles[c['bundle_id']] = c
log.msg("Retrieving metrics for checks")
count = 0
for c in bundles.values():
count += 1
print "\r%s/%s" % (count, len(bundles)),
sys.stdout.flush()
metrics = self.api.list_metrics(check_id=c['check_id'])
to_enable[c['check_id']] = []
for metric in sorted(metrics):
for pattern in metrics_to_enable:
if re.match(pattern, metric['name']):
to_enable[c['check_id']].append(metric['name'])
log.msg("About to set enabled metrics for the following checks")
for c in bundles.values():
log.msg(" %s (%s)" % (c['name'],
', '.join(to_enable[c['check_id']])))
if util.confirm():
for c in bundles.values():
# Enable metrics here
log.msgnb("%s..." % c['name'])
# The set of metrics has changed, apply the edit
self.api.edit_check_bundle(
bundle_id=c['bundle_id'],
metric_name=to_enable[c['check_id']])
log.msgnf("Done")
|
Set metrics command (regex based metric enabling)#!/usr/bin/env python
__cmdname__ = 'set_metrics'
__cmdopts__ = ''
import re
import sys
import log
import util
class Module(object):
def __init__(self, api, account):
self.api = api
self.account = account
def command(self, opts, pattern, *metrics_to_enable):
"""Set the active metrics for a check based on regular expression
This command will set the enabled metrics to exactly what matches the
pattern(s) given. Any other metrics will be disabled, regardless of
what their original setting was.
Arguments:
pattern -- Pattern for checks
metrics_to_enable -- One or more regexes for enabled metrics
"""
checks, groups = util.find_checks(self.api, pattern)
to_enable = {}
# Pick only one check per check bundle
bundles = {}
for c in checks:
if c['bundle_id'] in bundles:
continue
bundles[c['bundle_id']] = c
log.msg("Retrieving metrics for checks")
count = 0
for c in bundles.values():
count += 1
print "\r%s/%s" % (count, len(bundles)),
sys.stdout.flush()
metrics = self.api.list_metrics(check_id=c['check_id'])
to_enable[c['check_id']] = []
for metric in sorted(metrics):
for pattern in metrics_to_enable:
if re.match(pattern, metric['name']):
to_enable[c['check_id']].append(metric['name'])
log.msg("About to set enabled metrics for the following checks")
for c in bundles.values():
log.msg(" %s (%s)" % (c['name'],
', '.join(to_enable[c['check_id']])))
if util.confirm():
for c in bundles.values():
# Enable metrics here
log.msgnb("%s..." % c['name'])
# The set of metrics has changed, apply the edit
self.api.edit_check_bundle(
bundle_id=c['bundle_id'],
metric_name=to_enable[c['check_id']])
log.msgnf("Done")
|
<commit_before><commit_msg>Set metrics command (regex based metric enabling)<commit_after>#!/usr/bin/env python
__cmdname__ = 'set_metrics'
__cmdopts__ = ''
import re
import sys
import log
import util
class Module(object):
def __init__(self, api, account):
self.api = api
self.account = account
def command(self, opts, pattern, *metrics_to_enable):
"""Set the active metrics for a check based on regular expression
This command will set the enabled metrics to exactly what matches the
pattern(s) given. Any other metrics will be disabled, regardless of
what their original setting was.
Arguments:
pattern -- Pattern for checks
metrics_to_enable -- One or more regexes for enabled metrics
"""
checks, groups = util.find_checks(self.api, pattern)
to_enable = {}
# Pick only one check per check bundle
bundles = {}
for c in checks:
if c['bundle_id'] in bundles:
continue
bundles[c['bundle_id']] = c
log.msg("Retrieving metrics for checks")
count = 0
for c in bundles.values():
count += 1
print "\r%s/%s" % (count, len(bundles)),
sys.stdout.flush()
metrics = self.api.list_metrics(check_id=c['check_id'])
to_enable[c['check_id']] = []
for metric in sorted(metrics):
for pattern in metrics_to_enable:
if re.match(pattern, metric['name']):
to_enable[c['check_id']].append(metric['name'])
log.msg("About to set enabled metrics for the following checks")
for c in bundles.values():
log.msg(" %s (%s)" % (c['name'],
', '.join(to_enable[c['check_id']])))
if util.confirm():
for c in bundles.values():
# Enable metrics here
log.msgnb("%s..." % c['name'])
# The set of metrics has changed, apply the edit
self.api.edit_check_bundle(
bundle_id=c['bundle_id'],
metric_name=to_enable[c['check_id']])
log.msgnf("Done")
|
|
5535fa4d9c5b70347bf16931887e3c5141698e31
|
languages/python/linux-info.py
|
languages/python/linux-info.py
|
#!/usr/bin/env python
#
# Print information about the current computer.
#
import os
import re
from socket import gethostname
from platform import linux_distribution
def gethost():
'''Extract host name'''
try:
return gethostname()
except:
return "NA"
def processor():
'''Extract first processor name'''
try:
with open("/proc/cpuinfo", 'r') as the_file:
info = the_file.read().strip()
for line in info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
return "N/A"
def manufacturer():
'''Extract manufacturer'''
try:
with open("/sys/class/dmi/id/sys_vendor", 'r') as the_file:
info = the_file.read().strip()
return info
except:
return "N/A"
def model():
'''Extract model'''
try:
with open("/sys/class/dmi/id/product_name", 'r') as the_file:
info = the_file.read().strip()
return info
except:
return "N/A"
def distribution():
'''Format distribution'''
try:
return ' '.join([str(x) for x in linux_distribution()]) + ' (' + os.uname()[2] + ')'
except:
return "N/A"
print " Host:", gethost()
print "Manufacturer:", manufacturer()
print " Model:", model()
print " Processor:", processor()
print "Distribution:", distribution()
|
Add python script to print Linux information.
|
Add python script to print Linux information.
|
Python
|
apache-2.0
|
sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource
|
Add python script to print Linux information.
|
#!/usr/bin/env python
#
# Print information about the current computer.
#
import os
import re
from socket import gethostname
from platform import linux_distribution
def gethost():
'''Extract host name'''
try:
return gethostname()
except:
return "NA"
def processor():
'''Extract first processor name'''
try:
with open("/proc/cpuinfo", 'r') as the_file:
info = the_file.read().strip()
for line in info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
return "N/A"
def manufacturer():
'''Extract manufacturer'''
try:
with open("/sys/class/dmi/id/sys_vendor", 'r') as the_file:
info = the_file.read().strip()
return info
except:
return "N/A"
def model():
'''Extract model'''
try:
with open("/sys/class/dmi/id/product_name", 'r') as the_file:
info = the_file.read().strip()
return info
except:
return "N/A"
def distribution():
'''Format distribution'''
try:
return ' '.join([str(x) for x in linux_distribution()]) + ' (' + os.uname()[2] + ')'
except:
return "N/A"
print " Host:", gethost()
print "Manufacturer:", manufacturer()
print " Model:", model()
print " Processor:", processor()
print "Distribution:", distribution()
|
<commit_before><commit_msg>Add python script to print Linux information.<commit_after>
|
#!/usr/bin/env python
#
# Print information about the current computer.
#
import os
import re
from socket import gethostname
from platform import linux_distribution
def gethost():
'''Extract host name'''
try:
return gethostname()
except:
return "NA"
def processor():
'''Extract first processor name'''
try:
with open("/proc/cpuinfo", 'r') as the_file:
info = the_file.read().strip()
for line in info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
return "N/A"
def manufacturer():
'''Extract manufacturer'''
try:
with open("/sys/class/dmi/id/sys_vendor", 'r') as the_file:
info = the_file.read().strip()
return info
except:
return "N/A"
def model():
'''Extract model'''
try:
with open("/sys/class/dmi/id/product_name", 'r') as the_file:
info = the_file.read().strip()
return info
except:
return "N/A"
def distribution():
'''Format distribution'''
try:
return ' '.join([str(x) for x in linux_distribution()]) + ' (' + os.uname()[2] + ')'
except:
return "N/A"
print " Host:", gethost()
print "Manufacturer:", manufacturer()
print " Model:", model()
print " Processor:", processor()
print "Distribution:", distribution()
|
Add python script to print Linux information.#!/usr/bin/env python
#
# Print information about the current computer.
#
import os
import re
from socket import gethostname
from platform import linux_distribution
def gethost():
'''Extract host name'''
try:
return gethostname()
except:
return "NA"
def processor():
'''Extract first processor name'''
try:
with open("/proc/cpuinfo", 'r') as the_file:
info = the_file.read().strip()
for line in info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
return "N/A"
def manufacturer():
'''Extract manufacturer'''
try:
with open("/sys/class/dmi/id/sys_vendor", 'r') as the_file:
info = the_file.read().strip()
return info
except:
return "N/A"
def model():
'''Extract model'''
try:
with open("/sys/class/dmi/id/product_name", 'r') as the_file:
info = the_file.read().strip()
return info
except:
return "N/A"
def distribution():
'''Format distribution'''
try:
return ' '.join([str(x) for x in linux_distribution()]) + ' (' + os.uname()[2] + ')'
except:
return "N/A"
print " Host:", gethost()
print "Manufacturer:", manufacturer()
print " Model:", model()
print " Processor:", processor()
print "Distribution:", distribution()
|
<commit_before><commit_msg>Add python script to print Linux information.<commit_after>#!/usr/bin/env python
#
# Print information about the current computer.
#
import os
import re
from socket import gethostname
from platform import linux_distribution
def gethost():
'''Extract host name'''
try:
return gethostname()
except:
return "NA"
def processor():
'''Extract first processor name'''
try:
with open("/proc/cpuinfo", 'r') as the_file:
info = the_file.read().strip()
for line in info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
return "N/A"
def manufacturer():
'''Extract manufacturer'''
try:
with open("/sys/class/dmi/id/sys_vendor", 'r') as the_file:
info = the_file.read().strip()
return info
except:
return "N/A"
def model():
'''Extract model'''
try:
with open("/sys/class/dmi/id/product_name", 'r') as the_file:
info = the_file.read().strip()
return info
except:
return "N/A"
def distribution():
'''Format distribution'''
try:
return ' '.join([str(x) for x in linux_distribution()]) + ' (' + os.uname()[2] + ')'
except:
return "N/A"
print " Host:", gethost()
print "Manufacturer:", manufacturer()
print " Model:", model()
print " Processor:", processor()
print "Distribution:", distribution()
|
|
05dd74f3721cb1997f45e3931d50329473b62df3
|
web/users/migrations/0003_alter_user.py
|
web/users/migrations/0003_alter_user.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150814_0805'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, verbose_name='email address', blank=True),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username'),
),
]
|
Add a migration for users.User model
|
Add a migration for users.User model
|
Python
|
agpl-3.0
|
matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo
|
Add a migration for users.User model
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150814_0805'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, verbose_name='email address', blank=True),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username'),
),
]
|
<commit_before><commit_msg>Add a migration for users.User model<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150814_0805'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, verbose_name='email address', blank=True),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username'),
),
]
|
Add a migration for users.User model# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150814_0805'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, verbose_name='email address', blank=True),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username'),
),
]
|
<commit_before><commit_msg>Add a migration for users.User model<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20150814_0805'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, verbose_name='email address', blank=True),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username'),
),
]
|
|
d216f2c2f6f6a8e94dbe0c79dafe03f1a4f9a886
|
tests/test_rest_track_history.py
|
tests/test_rest_track_history.py
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import tempfile
import json
import pytest
from context import app
@pytest.fixture
def client(request):
"""
Definges the client object to make requests against
"""
db_fd, app.APP.config['DATABASE'] = tempfile.mkstemp()
app.APP.config['TESTING'] = True
client = app.APP.test_client()
def teardown():
"""
Close the client once testing has completed
"""
os.close(db_fd)
os.unlink(app.APP.config['DATABASE'])
request.addfinalizer(teardown)
return client
def test_track_history_01(client):
"""
Test that specifying a user_id returns information
"""
users = ["adam", "ben", "chris", "denis", "eric", "test"]
for user in users:
rest_value = client.get('/mug/api/dmp/tracks?user_id=' + user)
results = json.loads(rest_value.data)
#_run_tests(results)
for result in results['files']:
print(result['_id'])
rest_value = client.get('/mug/api/dmp/trackHistory?user_id=' + user + '&file_id=' + result['_id'])
history_results = json.loads(rest_value.data)
print(history_results)
assert 'history_files' in history_results
|
Test for ensuring that the history functions are able to return
|
Test for ensuring that the history functions are able to return
|
Python
|
apache-2.0
|
Multiscale-Genomics/mg-rest-dm
|
Test for ensuring that the history functions are able to return
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import tempfile
import json
import pytest
from context import app
@pytest.fixture
def client(request):
"""
Definges the client object to make requests against
"""
db_fd, app.APP.config['DATABASE'] = tempfile.mkstemp()
app.APP.config['TESTING'] = True
client = app.APP.test_client()
def teardown():
"""
Close the client once testing has completed
"""
os.close(db_fd)
os.unlink(app.APP.config['DATABASE'])
request.addfinalizer(teardown)
return client
def test_track_history_01(client):
"""
Test that specifying a user_id returns information
"""
users = ["adam", "ben", "chris", "denis", "eric", "test"]
for user in users:
rest_value = client.get('/mug/api/dmp/tracks?user_id=' + user)
results = json.loads(rest_value.data)
#_run_tests(results)
for result in results['files']:
print(result['_id'])
rest_value = client.get('/mug/api/dmp/trackHistory?user_id=' + user + '&file_id=' + result['_id'])
history_results = json.loads(rest_value.data)
print(history_results)
assert 'history_files' in history_results
|
<commit_before><commit_msg>Test for ensuring that the history functions are able to return<commit_after>
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import tempfile
import json
import pytest
from context import app
@pytest.fixture
def client(request):
"""
Definges the client object to make requests against
"""
db_fd, app.APP.config['DATABASE'] = tempfile.mkstemp()
app.APP.config['TESTING'] = True
client = app.APP.test_client()
def teardown():
"""
Close the client once testing has completed
"""
os.close(db_fd)
os.unlink(app.APP.config['DATABASE'])
request.addfinalizer(teardown)
return client
def test_track_history_01(client):
"""
Test that specifying a user_id returns information
"""
users = ["adam", "ben", "chris", "denis", "eric", "test"]
for user in users:
rest_value = client.get('/mug/api/dmp/tracks?user_id=' + user)
results = json.loads(rest_value.data)
#_run_tests(results)
for result in results['files']:
print(result['_id'])
rest_value = client.get('/mug/api/dmp/trackHistory?user_id=' + user + '&file_id=' + result['_id'])
history_results = json.loads(rest_value.data)
print(history_results)
assert 'history_files' in history_results
|
Test for ensuring that the history functions are able to return"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import tempfile
import json
import pytest
from context import app
@pytest.fixture
def client(request):
"""
Definges the client object to make requests against
"""
db_fd, app.APP.config['DATABASE'] = tempfile.mkstemp()
app.APP.config['TESTING'] = True
client = app.APP.test_client()
def teardown():
"""
Close the client once testing has completed
"""
os.close(db_fd)
os.unlink(app.APP.config['DATABASE'])
request.addfinalizer(teardown)
return client
def test_track_history_01(client):
"""
Test that specifying a user_id returns information
"""
users = ["adam", "ben", "chris", "denis", "eric", "test"]
for user in users:
rest_value = client.get('/mug/api/dmp/tracks?user_id=' + user)
results = json.loads(rest_value.data)
#_run_tests(results)
for result in results['files']:
print(result['_id'])
rest_value = client.get('/mug/api/dmp/trackHistory?user_id=' + user + '&file_id=' + result['_id'])
history_results = json.loads(rest_value.data)
print(history_results)
assert 'history_files' in history_results
|
<commit_before><commit_msg>Test for ensuring that the history functions are able to return<commit_after>"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import tempfile
import json
import pytest
from context import app
@pytest.fixture
def client(request):
"""
Definges the client object to make requests against
"""
db_fd, app.APP.config['DATABASE'] = tempfile.mkstemp()
app.APP.config['TESTING'] = True
client = app.APP.test_client()
def teardown():
"""
Close the client once testing has completed
"""
os.close(db_fd)
os.unlink(app.APP.config['DATABASE'])
request.addfinalizer(teardown)
return client
def test_track_history_01(client):
"""
Test that specifying a user_id returns information
"""
users = ["adam", "ben", "chris", "denis", "eric", "test"]
for user in users:
rest_value = client.get('/mug/api/dmp/tracks?user_id=' + user)
results = json.loads(rest_value.data)
#_run_tests(results)
for result in results['files']:
print(result['_id'])
rest_value = client.get('/mug/api/dmp/trackHistory?user_id=' + user + '&file_id=' + result['_id'])
history_results = json.loads(rest_value.data)
print(history_results)
assert 'history_files' in history_results
|
|
e095cbdc5f6c74b54a518131692d5ab993c4fc9c
|
repo_to_submodule.py
|
repo_to_submodule.py
|
#!/usr/bin/python
import os
import sys
import xml.etree.ElementTree as ET
from termcolor import colored
def repo_to_submodule(superproject):
# FIXME remove this line
os.system("rm " + superproject + " -rf")
os.mkdir(superproject)
MANIFEST_XML = "manifest.xml"
# 1. Create a manifest from repo
if os.system("repo manifest -r -o " + superproject + "/" + MANIFEST_XML) != 0:
print "Failed to create a repo manifest. Are you sure we are in a repo project folder?"
sys.exit(-1)
# 2. Initialise the git superproject
os.chdir(superproject)
os.system("git init")
# 3. Add git submodules according to the hash in manifest.xml
tree = ET.parse(MANIFEST_XML)
root = tree.getroot()
# Read fetch
remote = root.find("remote")
fetch = remote.get("fetch")
# Iterate through all project
for project in root.findall("project"):
os.system("git submodule add " + fetch + project.get("name") + " " + project.get("path"))
# Save current working directory
pwd = os.getcwd()
os.chdir(project.get("path"))
os.system("git reset --hard " + project.get("revision"))
# Get back to the root of superproject
os.chdir(pwd)
# Remove manifest.xml
os.remove(MANIFEST_XML)
print colored("Success!\nPlease go to " + superproject + " and commit your superproject", "green")
if __name__ == "__main__":
if(len(sys.argv) < 2):
print "Usage: repo_to_submodule.py GIT_SUPERPROJECT_NAME"
sys.exit(-1)
repo_to_submodule(sys.argv[1])
|
Create a git superproject and add submodules
|
Create a git superproject and add submodules
It does the following in order:
1. Generate a manifest.xml for repo
2. Create a git superproject
3. Add repo project as git submodules
|
Python
|
apache-2.0
|
clarkli86/repo_to_submodule
|
Create a git superproject and add submodules
It does the following in order:
1. Generate a manifest.xml for repo
2. Create a git superproject
3. Add repo project as git submodules
|
#!/usr/bin/python
import os
import sys
import xml.etree.ElementTree as ET
from termcolor import colored
def repo_to_submodule(superproject):
# FIXME remove this line
os.system("rm " + superproject + " -rf")
os.mkdir(superproject)
MANIFEST_XML = "manifest.xml"
# 1. Create a manifest from repo
if os.system("repo manifest -r -o " + superproject + "/" + MANIFEST_XML) != 0:
print "Failed to create a repo manifest. Are you sure we are in a repo project folder?"
sys.exit(-1)
# 2. Initialise the git superproject
os.chdir(superproject)
os.system("git init")
# 3. Add git submodules according to the hash in manifest.xml
tree = ET.parse(MANIFEST_XML)
root = tree.getroot()
# Read fetch
remote = root.find("remote")
fetch = remote.get("fetch")
# Iterate through all project
for project in root.findall("project"):
os.system("git submodule add " + fetch + project.get("name") + " " + project.get("path"))
# Save current working directory
pwd = os.getcwd()
os.chdir(project.get("path"))
os.system("git reset --hard " + project.get("revision"))
# Get back to the root of superproject
os.chdir(pwd)
# Remove manifest.xml
os.remove(MANIFEST_XML)
print colored("Success!\nPlease go to " + superproject + " and commit your superproject", "green")
if __name__ == "__main__":
if(len(sys.argv) < 2):
print "Usage: repo_to_submodule.py GIT_SUPERPROJECT_NAME"
sys.exit(-1)
repo_to_submodule(sys.argv[1])
|
<commit_before><commit_msg>Create a git superproject and add submodules
It does the following in order:
1. Generate a manifest.xml for repo
2. Create a git superproject
3. Add repo project as git submodules<commit_after>
|
#!/usr/bin/python
import os
import sys
import xml.etree.ElementTree as ET
from termcolor import colored
def repo_to_submodule(superproject):
# FIXME remove this line
os.system("rm " + superproject + " -rf")
os.mkdir(superproject)
MANIFEST_XML = "manifest.xml"
# 1. Create a manifest from repo
if os.system("repo manifest -r -o " + superproject + "/" + MANIFEST_XML) != 0:
print "Failed to create a repo manifest. Are you sure we are in a repo project folder?"
sys.exit(-1)
# 2. Initialise the git superproject
os.chdir(superproject)
os.system("git init")
# 3. Add git submodules according to the hash in manifest.xml
tree = ET.parse(MANIFEST_XML)
root = tree.getroot()
# Read fetch
remote = root.find("remote")
fetch = remote.get("fetch")
# Iterate through all project
for project in root.findall("project"):
os.system("git submodule add " + fetch + project.get("name") + " " + project.get("path"))
# Save current working directory
pwd = os.getcwd()
os.chdir(project.get("path"))
os.system("git reset --hard " + project.get("revision"))
# Get back to the root of superproject
os.chdir(pwd)
# Remove manifest.xml
os.remove(MANIFEST_XML)
print colored("Success!\nPlease go to " + superproject + " and commit your superproject", "green")
if __name__ == "__main__":
if(len(sys.argv) < 2):
print "Usage: repo_to_submodule.py GIT_SUPERPROJECT_NAME"
sys.exit(-1)
repo_to_submodule(sys.argv[1])
|
Create a git superproject and add submodules
It does the following in order:
1. Generate a manifest.xml for repo
2. Create a git superproject
3. Add repo project as git submodules#!/usr/bin/python
import os
import sys
import xml.etree.ElementTree as ET
from termcolor import colored
def repo_to_submodule(superproject):
# FIXME remove this line
os.system("rm " + superproject + " -rf")
os.mkdir(superproject)
MANIFEST_XML = "manifest.xml"
# 1. Create a manifest from repo
if os.system("repo manifest -r -o " + superproject + "/" + MANIFEST_XML) != 0:
print "Failed to create a repo manifest. Are you sure we are in a repo project folder?"
sys.exit(-1)
# 2. Initialise the git superproject
os.chdir(superproject)
os.system("git init")
# 3. Add git submodules according to the hash in manifest.xml
tree = ET.parse(MANIFEST_XML)
root = tree.getroot()
# Read fetch
remote = root.find("remote")
fetch = remote.get("fetch")
# Iterate through all project
for project in root.findall("project"):
os.system("git submodule add " + fetch + project.get("name") + " " + project.get("path"))
# Save current working directory
pwd = os.getcwd()
os.chdir(project.get("path"))
os.system("git reset --hard " + project.get("revision"))
# Get back to the root of superproject
os.chdir(pwd)
# Remove manifest.xml
os.remove(MANIFEST_XML)
print colored("Success!\nPlease go to " + superproject + " and commit your superproject", "green")
if __name__ == "__main__":
if(len(sys.argv) < 2):
print "Usage: repo_to_submodule.py GIT_SUPERPROJECT_NAME"
sys.exit(-1)
repo_to_submodule(sys.argv[1])
|
<commit_before><commit_msg>Create a git superproject and add submodules
It does the following in order:
1. Generate a manifest.xml for repo
2. Create a git superproject
3. Add repo project as git submodules<commit_after>#!/usr/bin/python
import os
import sys
import xml.etree.ElementTree as ET
from termcolor import colored
def repo_to_submodule(superproject):
# FIXME remove this line
os.system("rm " + superproject + " -rf")
os.mkdir(superproject)
MANIFEST_XML = "manifest.xml"
# 1. Create a manifest from repo
if os.system("repo manifest -r -o " + superproject + "/" + MANIFEST_XML) != 0:
print "Failed to create a repo manifest. Are you sure we are in a repo project folder?"
sys.exit(-1)
# 2. Initialise the git superproject
os.chdir(superproject)
os.system("git init")
# 3. Add git submodules according to the hash in manifest.xml
tree = ET.parse(MANIFEST_XML)
root = tree.getroot()
# Read fetch
remote = root.find("remote")
fetch = remote.get("fetch")
# Iterate through all project
for project in root.findall("project"):
os.system("git submodule add " + fetch + project.get("name") + " " + project.get("path"))
# Save current working directory
pwd = os.getcwd()
os.chdir(project.get("path"))
os.system("git reset --hard " + project.get("revision"))
# Get back to the root of superproject
os.chdir(pwd)
# Remove manifest.xml
os.remove(MANIFEST_XML)
print colored("Success!\nPlease go to " + superproject + " and commit your superproject", "green")
if __name__ == "__main__":
if(len(sys.argv) < 2):
print "Usage: repo_to_submodule.py GIT_SUPERPROJECT_NAME"
sys.exit(-1)
repo_to_submodule(sys.argv[1])
|
|
077699976adb8a6a25f5cb8c4ff3cbb1ae95440c
|
tools/database_dump_converter.py
|
tools/database_dump_converter.py
|
from sys import argv
from xml.dom.minidom import parse
from base64 import b64decode
def Escape(text):
text = text.replace('\\', '\\\\')
for sample, replace in {'\'': '\\\'', '"': '\\"', '\n': '\\n', '\r': '\\r', '\x00': '\\0', '\x1a': '\\Z'}.iteritems():
text = text.replace(sample, replace);
return text
if len(argv) != 2:
print 'Error: invalid arguments.'
print 'Using: %s filename' % __file__
exit(1)
try:
sql = 'DELETE FROM `diary_points`;\n'
sql += 'INSERT INTO `diary_points` (`date`, `text`, `state`, `check`, `order`) VALUES\n'
dom = parse(argv[1])
for day in dom.getElementsByTagName('day'):
date = day.attributes['date'].value
order = 3
for point in day.getElementsByTagName('point'):
text = Escape(b64decode(point.firstChild.nodeValue)) if len(point.childNodes) == 1 else ''
state = point.attributes['state'].value
check = 1 if point.hasAttribute('check') and point.attributes['check'].value == 'true' else 0
sql += "\t('{0}', '{1}', '{2}', {3}, {4}),\n".format(date, text, state, check, order)
order += 2
sql = sql[:-2] + ';'
with open(argv[1].replace('.xml', '') + '.sql', 'w') as out:
out.write(sql)
except IOError as exception:
print 'Error! %s.' % str(exception)
|
Add database dump convert tool.
|
Add database dump convert tool.
|
Python
|
mit
|
thewizardplusplus/wizard-diary,thewizardplusplus/wizard-diary,thewizardplusplus/wizard-diary,thewizardplusplus/wizard-diary,thewizardplusplus/wizard-diary
|
Add database dump convert tool.
|
from sys import argv
from xml.dom.minidom import parse
from base64 import b64decode
def Escape(text):
text = text.replace('\\', '\\\\')
for sample, replace in {'\'': '\\\'', '"': '\\"', '\n': '\\n', '\r': '\\r', '\x00': '\\0', '\x1a': '\\Z'}.iteritems():
text = text.replace(sample, replace);
return text
if len(argv) != 2:
print 'Error: invalid arguments.'
print 'Using: %s filename' % __file__
exit(1)
try:
sql = 'DELETE FROM `diary_points`;\n'
sql += 'INSERT INTO `diary_points` (`date`, `text`, `state`, `check`, `order`) VALUES\n'
dom = parse(argv[1])
for day in dom.getElementsByTagName('day'):
date = day.attributes['date'].value
order = 3
for point in day.getElementsByTagName('point'):
text = Escape(b64decode(point.firstChild.nodeValue)) if len(point.childNodes) == 1 else ''
state = point.attributes['state'].value
check = 1 if point.hasAttribute('check') and point.attributes['check'].value == 'true' else 0
sql += "\t('{0}', '{1}', '{2}', {3}, {4}),\n".format(date, text, state, check, order)
order += 2
sql = sql[:-2] + ';'
with open(argv[1].replace('.xml', '') + '.sql', 'w') as out:
out.write(sql)
except IOError as exception:
print 'Error! %s.' % str(exception)
|
<commit_before><commit_msg>Add database dump convert tool.<commit_after>
|
from sys import argv
from xml.dom.minidom import parse
from base64 import b64decode
def Escape(text):
text = text.replace('\\', '\\\\')
for sample, replace in {'\'': '\\\'', '"': '\\"', '\n': '\\n', '\r': '\\r', '\x00': '\\0', '\x1a': '\\Z'}.iteritems():
text = text.replace(sample, replace);
return text
if len(argv) != 2:
print 'Error: invalid arguments.'
print 'Using: %s filename' % __file__
exit(1)
try:
sql = 'DELETE FROM `diary_points`;\n'
sql += 'INSERT INTO `diary_points` (`date`, `text`, `state`, `check`, `order`) VALUES\n'
dom = parse(argv[1])
for day in dom.getElementsByTagName('day'):
date = day.attributes['date'].value
order = 3
for point in day.getElementsByTagName('point'):
text = Escape(b64decode(point.firstChild.nodeValue)) if len(point.childNodes) == 1 else ''
state = point.attributes['state'].value
check = 1 if point.hasAttribute('check') and point.attributes['check'].value == 'true' else 0
sql += "\t('{0}', '{1}', '{2}', {3}, {4}),\n".format(date, text, state, check, order)
order += 2
sql = sql[:-2] + ';'
with open(argv[1].replace('.xml', '') + '.sql', 'w') as out:
out.write(sql)
except IOError as exception:
print 'Error! %s.' % str(exception)
|
Add database dump convert tool.from sys import argv
from xml.dom.minidom import parse
from base64 import b64decode
def Escape(text):
text = text.replace('\\', '\\\\')
for sample, replace in {'\'': '\\\'', '"': '\\"', '\n': '\\n', '\r': '\\r', '\x00': '\\0', '\x1a': '\\Z'}.iteritems():
text = text.replace(sample, replace);
return text
if len(argv) != 2:
print 'Error: invalid arguments.'
print 'Using: %s filename' % __file__
exit(1)
try:
sql = 'DELETE FROM `diary_points`;\n'
sql += 'INSERT INTO `diary_points` (`date`, `text`, `state`, `check`, `order`) VALUES\n'
dom = parse(argv[1])
for day in dom.getElementsByTagName('day'):
date = day.attributes['date'].value
order = 3
for point in day.getElementsByTagName('point'):
text = Escape(b64decode(point.firstChild.nodeValue)) if len(point.childNodes) == 1 else ''
state = point.attributes['state'].value
check = 1 if point.hasAttribute('check') and point.attributes['check'].value == 'true' else 0
sql += "\t('{0}', '{1}', '{2}', {3}, {4}),\n".format(date, text, state, check, order)
order += 2
sql = sql[:-2] + ';'
with open(argv[1].replace('.xml', '') + '.sql', 'w') as out:
out.write(sql)
except IOError as exception:
print 'Error! %s.' % str(exception)
|
<commit_before><commit_msg>Add database dump convert tool.<commit_after>from sys import argv
from xml.dom.minidom import parse
from base64 import b64decode
def Escape(text):
text = text.replace('\\', '\\\\')
for sample, replace in {'\'': '\\\'', '"': '\\"', '\n': '\\n', '\r': '\\r', '\x00': '\\0', '\x1a': '\\Z'}.iteritems():
text = text.replace(sample, replace);
return text
if len(argv) != 2:
print 'Error: invalid arguments.'
print 'Using: %s filename' % __file__
exit(1)
try:
sql = 'DELETE FROM `diary_points`;\n'
sql += 'INSERT INTO `diary_points` (`date`, `text`, `state`, `check`, `order`) VALUES\n'
dom = parse(argv[1])
for day in dom.getElementsByTagName('day'):
date = day.attributes['date'].value
order = 3
for point in day.getElementsByTagName('point'):
text = Escape(b64decode(point.firstChild.nodeValue)) if len(point.childNodes) == 1 else ''
state = point.attributes['state'].value
check = 1 if point.hasAttribute('check') and point.attributes['check'].value == 'true' else 0
sql += "\t('{0}', '{1}', '{2}', {3}, {4}),\n".format(date, text, state, check, order)
order += 2
sql = sql[:-2] + ';'
with open(argv[1].replace('.xml', '') + '.sql', 'w') as out:
out.write(sql)
except IOError as exception:
print 'Error! %s.' % str(exception)
|
|
87818a138408752217d6a90041db5273116db228
|
python/opencv/opencv_2/image_precessing/hough_circle_transform.py
|
python/opencv/opencv_2/image_precessing/hough_circle_transform.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Hough Circle Transform: find circles in an image.
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.html#hough-circles
"""
from __future__ import print_function
import cv2 as cv
import numpy as np
def main():
device_number = 0
video_capture = cv.VideoCapture(device_number)
# Reduce the video resolution
video_capture.set(cv.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
video_capture.set(cv.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
print("Press q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# IMAGE PROCESSING ################################
# Convert BGR color space to HSV
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
# Hough Circle Transform
circles = cv.HoughCircles(img_gray, cv.cv.CV_HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0)
# DRAW CIRCLES ####################################
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv.circle(img_bgr,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv.circle(img_bgr,(i[0],i[1]),2,(0,0,255),3)
# DISPLAY IMAGES ##################################
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# KEYBOARD LISTENER ###############################
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
Add a snippet (Python OpenCV).
|
Add a snippet (Python OpenCV).
|
Python
|
mit
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
Add a snippet (Python OpenCV).
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Hough Circle Transform: find circles in an image.
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.html#hough-circles
"""
from __future__ import print_function
import cv2 as cv
import numpy as np
def main():
device_number = 0
video_capture = cv.VideoCapture(device_number)
# Reduce the video resolution
video_capture.set(cv.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
video_capture.set(cv.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
print("Press q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# IMAGE PROCESSING ################################
# Convert BGR color space to HSV
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
# Hough Circle Transform
circles = cv.HoughCircles(img_gray, cv.cv.CV_HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0)
# DRAW CIRCLES ####################################
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv.circle(img_bgr,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv.circle(img_bgr,(i[0],i[1]),2,(0,0,255),3)
# DISPLAY IMAGES ##################################
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# KEYBOARD LISTENER ###############################
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python OpenCV).<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Hough Circle Transform: find circles in an image.
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.html#hough-circles
"""
from __future__ import print_function
import cv2 as cv
import numpy as np
def main():
device_number = 0
video_capture = cv.VideoCapture(device_number)
# Reduce the video resolution
video_capture.set(cv.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
video_capture.set(cv.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
print("Press q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# IMAGE PROCESSING ################################
# Convert BGR color space to HSV
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
# Hough Circle Transform
circles = cv.HoughCircles(img_gray, cv.cv.CV_HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0)
# DRAW CIRCLES ####################################
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv.circle(img_bgr,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv.circle(img_bgr,(i[0],i[1]),2,(0,0,255),3)
# DISPLAY IMAGES ##################################
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# KEYBOARD LISTENER ###############################
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
Add a snippet (Python OpenCV).#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Hough Circle Transform: find circles in an image.
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.html#hough-circles
"""
from __future__ import print_function
import cv2 as cv
import numpy as np
def main():
device_number = 0
video_capture = cv.VideoCapture(device_number)
# Reduce the video resolution
video_capture.set(cv.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
video_capture.set(cv.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
print("Press q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# IMAGE PROCESSING ################################
# Convert BGR color space to HSV
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
# Hough Circle Transform
circles = cv.HoughCircles(img_gray, cv.cv.CV_HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0)
# DRAW CIRCLES ####################################
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv.circle(img_bgr,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv.circle(img_bgr,(i[0],i[1]),2,(0,0,255),3)
# DISPLAY IMAGES ##################################
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# KEYBOARD LISTENER ###############################
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python OpenCV).<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Hough Circle Transform: find circles in an image.
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.html#hough-circles
"""
from __future__ import print_function
import cv2 as cv
import numpy as np
def main():
device_number = 0
video_capture = cv.VideoCapture(device_number)
# Reduce the video resolution
video_capture.set(cv.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
video_capture.set(cv.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
print("Press q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# IMAGE PROCESSING ################################
# Convert BGR color space to HSV
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
# Hough Circle Transform
circles = cv.HoughCircles(img_gray, cv.cv.CV_HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0)
# DRAW CIRCLES ####################################
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv.circle(img_bgr,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv.circle(img_bgr,(i[0],i[1]),2,(0,0,255),3)
# DISPLAY IMAGES ##################################
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# KEYBOARD LISTENER ###############################
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
|
042844d918d34a16f18ea9ab2c09694e81c25623
|
bluebottle/categories/migrations/0008_authenticated-permissions.py
|
bluebottle/categories/migrations/0008_authenticated-permissions.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-02-22 08:58
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Authenticated': {
'perms': ('api_read_category', )
}
}
update_group_permissions('categories', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('categories', '0007_auto_20180907_1131'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
Add read categories permission to authenticated users
|
Add read categories permission to authenticated users
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Add read categories permission to authenticated users
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-02-22 08:58
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Authenticated': {
'perms': ('api_read_category', )
}
}
update_group_permissions('categories', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('categories', '0007_auto_20180907_1131'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
<commit_before><commit_msg>Add read categories permission to authenticated users<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-02-22 08:58
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Authenticated': {
'perms': ('api_read_category', )
}
}
update_group_permissions('categories', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('categories', '0007_auto_20180907_1131'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
Add read categories permission to authenticated users# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-02-22 08:58
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Authenticated': {
'perms': ('api_read_category', )
}
}
update_group_permissions('categories', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('categories', '0007_auto_20180907_1131'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
<commit_before><commit_msg>Add read categories permission to authenticated users<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-02-22 08:58
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Authenticated': {
'perms': ('api_read_category', )
}
}
update_group_permissions('categories', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('categories', '0007_auto_20180907_1131'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
|
388ca425168572cbcbaa3638d78e5f9933380fb7
|
tests/test_config.py
|
tests/test_config.py
|
import logging
import socket
import pytest
from uvicorn import protocols
from uvicorn.config import Config
from uvicorn.middleware.debug import DebugMiddleware
from uvicorn.middleware.wsgi import WSGIMiddleware
async def asgi_app():
pass
def wsgi_app():
pass
def test_debug_app():
config = Config(app=asgi_app, debug=True)
config.load()
assert config.debug is True
assert isinstance(config.loaded_app, DebugMiddleware)
def test_wsgi_app():
config = Config(app=wsgi_app, interface="wsgi")
config.load()
assert isinstance(config.loaded_app, WSGIMiddleware)
assert config.interface == "wsgi"
def test_proxy_headers():
config = Config(app=asgi_app, proxy_headers=True)
config.load()
assert config.proxy_headers is True
def test_app_unimportable():
config = Config(app="no.such:app")
with pytest.raises(ModuleNotFoundError):
config.load()
def test_concrete_http_class():
config = Config(app=asgi_app, http=protocols.http.h11_impl.H11Protocol)
config.load()
assert config.http_protocol_class is protocols.http.h11_impl.H11Protocol
def test_logger():
logger = logging.getLogger("just-for-tests")
config = Config(app=asgi_app, logger=logger)
config.load()
assert config.logger is logger
def test_socket_bind():
config = Config(app=asgi_app)
config.load()
assert isinstance(config.bind_socket(), socket.socket)
|
Add tests for most of Config class
|
Add tests for most of Config class
|
Python
|
bsd-3-clause
|
encode/uvicorn,encode/uvicorn
|
Add tests for most of Config class
|
import logging
import socket
import pytest
from uvicorn import protocols
from uvicorn.config import Config
from uvicorn.middleware.debug import DebugMiddleware
from uvicorn.middleware.wsgi import WSGIMiddleware
async def asgi_app():
pass
def wsgi_app():
pass
def test_debug_app():
config = Config(app=asgi_app, debug=True)
config.load()
assert config.debug is True
assert isinstance(config.loaded_app, DebugMiddleware)
def test_wsgi_app():
config = Config(app=wsgi_app, interface="wsgi")
config.load()
assert isinstance(config.loaded_app, WSGIMiddleware)
assert config.interface == "wsgi"
def test_proxy_headers():
config = Config(app=asgi_app, proxy_headers=True)
config.load()
assert config.proxy_headers is True
def test_app_unimportable():
config = Config(app="no.such:app")
with pytest.raises(ModuleNotFoundError):
config.load()
def test_concrete_http_class():
config = Config(app=asgi_app, http=protocols.http.h11_impl.H11Protocol)
config.load()
assert config.http_protocol_class is protocols.http.h11_impl.H11Protocol
def test_logger():
logger = logging.getLogger("just-for-tests")
config = Config(app=asgi_app, logger=logger)
config.load()
assert config.logger is logger
def test_socket_bind():
config = Config(app=asgi_app)
config.load()
assert isinstance(config.bind_socket(), socket.socket)
|
<commit_before><commit_msg>Add tests for most of Config class<commit_after>
|
import logging
import socket
import pytest
from uvicorn import protocols
from uvicorn.config import Config
from uvicorn.middleware.debug import DebugMiddleware
from uvicorn.middleware.wsgi import WSGIMiddleware
async def asgi_app():
pass
def wsgi_app():
pass
def test_debug_app():
config = Config(app=asgi_app, debug=True)
config.load()
assert config.debug is True
assert isinstance(config.loaded_app, DebugMiddleware)
def test_wsgi_app():
config = Config(app=wsgi_app, interface="wsgi")
config.load()
assert isinstance(config.loaded_app, WSGIMiddleware)
assert config.interface == "wsgi"
def test_proxy_headers():
config = Config(app=asgi_app, proxy_headers=True)
config.load()
assert config.proxy_headers is True
def test_app_unimportable():
config = Config(app="no.such:app")
with pytest.raises(ModuleNotFoundError):
config.load()
def test_concrete_http_class():
config = Config(app=asgi_app, http=protocols.http.h11_impl.H11Protocol)
config.load()
assert config.http_protocol_class is protocols.http.h11_impl.H11Protocol
def test_logger():
logger = logging.getLogger("just-for-tests")
config = Config(app=asgi_app, logger=logger)
config.load()
assert config.logger is logger
def test_socket_bind():
config = Config(app=asgi_app)
config.load()
assert isinstance(config.bind_socket(), socket.socket)
|
Add tests for most of Config classimport logging
import socket
import pytest
from uvicorn import protocols
from uvicorn.config import Config
from uvicorn.middleware.debug import DebugMiddleware
from uvicorn.middleware.wsgi import WSGIMiddleware
async def asgi_app():
pass
def wsgi_app():
pass
def test_debug_app():
config = Config(app=asgi_app, debug=True)
config.load()
assert config.debug is True
assert isinstance(config.loaded_app, DebugMiddleware)
def test_wsgi_app():
config = Config(app=wsgi_app, interface="wsgi")
config.load()
assert isinstance(config.loaded_app, WSGIMiddleware)
assert config.interface == "wsgi"
def test_proxy_headers():
config = Config(app=asgi_app, proxy_headers=True)
config.load()
assert config.proxy_headers is True
def test_app_unimportable():
config = Config(app="no.such:app")
with pytest.raises(ModuleNotFoundError):
config.load()
def test_concrete_http_class():
config = Config(app=asgi_app, http=protocols.http.h11_impl.H11Protocol)
config.load()
assert config.http_protocol_class is protocols.http.h11_impl.H11Protocol
def test_logger():
logger = logging.getLogger("just-for-tests")
config = Config(app=asgi_app, logger=logger)
config.load()
assert config.logger is logger
def test_socket_bind():
config = Config(app=asgi_app)
config.load()
assert isinstance(config.bind_socket(), socket.socket)
|
<commit_before><commit_msg>Add tests for most of Config class<commit_after>import logging
import socket
import pytest
from uvicorn import protocols
from uvicorn.config import Config
from uvicorn.middleware.debug import DebugMiddleware
from uvicorn.middleware.wsgi import WSGIMiddleware
async def asgi_app():
pass
def wsgi_app():
pass
def test_debug_app():
config = Config(app=asgi_app, debug=True)
config.load()
assert config.debug is True
assert isinstance(config.loaded_app, DebugMiddleware)
def test_wsgi_app():
config = Config(app=wsgi_app, interface="wsgi")
config.load()
assert isinstance(config.loaded_app, WSGIMiddleware)
assert config.interface == "wsgi"
def test_proxy_headers():
config = Config(app=asgi_app, proxy_headers=True)
config.load()
assert config.proxy_headers is True
def test_app_unimportable():
config = Config(app="no.such:app")
with pytest.raises(ModuleNotFoundError):
config.load()
def test_concrete_http_class():
config = Config(app=asgi_app, http=protocols.http.h11_impl.H11Protocol)
config.load()
assert config.http_protocol_class is protocols.http.h11_impl.H11Protocol
def test_logger():
logger = logging.getLogger("just-for-tests")
config = Config(app=asgi_app, logger=logger)
config.load()
assert config.logger is logger
def test_socket_bind():
config = Config(app=asgi_app)
config.load()
assert isinstance(config.bind_socket(), socket.socket)
|
|
41f6dd8259a8e56a157930e809426f5e0e8113eb
|
tests/test_config.py
|
tests/test_config.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import binascii
import shutil
import tempfile
from timebook.config import parse_config
def test_parse_config(capsys):
seed = binascii.hexlify(os.urandom(4))
fname = os.path.join(tempfile.gettempdir(), 'test-%s' % seed, 'test_timebook_config.ini')
parse_config(fname)
shutil.rmtree(os.path.dirname(fname))
|
Add config file parse test
|
Add config file parse test
Increases coverage to 73% on Python 2.x and 74% on Python 3.x.
|
Python
|
mit
|
imiric/timebook
|
Add config file parse test
Increases coverage to 73% on Python 2.x and 74% on Python 3.x.
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import binascii
import shutil
import tempfile
from timebook.config import parse_config
def test_parse_config(capsys):
seed = binascii.hexlify(os.urandom(4))
fname = os.path.join(tempfile.gettempdir(), 'test-%s' % seed, 'test_timebook_config.ini')
parse_config(fname)
shutil.rmtree(os.path.dirname(fname))
|
<commit_before><commit_msg>Add config file parse test
Increases coverage to 73% on Python 2.x and 74% on Python 3.x.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import binascii
import shutil
import tempfile
from timebook.config import parse_config
def test_parse_config(capsys):
seed = binascii.hexlify(os.urandom(4))
fname = os.path.join(tempfile.gettempdir(), 'test-%s' % seed, 'test_timebook_config.ini')
parse_config(fname)
shutil.rmtree(os.path.dirname(fname))
|
Add config file parse test
Increases coverage to 73% on Python 2.x and 74% on Python 3.x.# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import binascii
import shutil
import tempfile
from timebook.config import parse_config
def test_parse_config(capsys):
seed = binascii.hexlify(os.urandom(4))
fname = os.path.join(tempfile.gettempdir(), 'test-%s' % seed, 'test_timebook_config.ini')
parse_config(fname)
shutil.rmtree(os.path.dirname(fname))
|
<commit_before><commit_msg>Add config file parse test
Increases coverage to 73% on Python 2.x and 74% on Python 3.x.<commit_after># -*- coding: utf-8 -*-
from __future__ import print_function
import os
import binascii
import shutil
import tempfile
from timebook.config import parse_config
def test_parse_config(capsys):
seed = binascii.hexlify(os.urandom(4))
fname = os.path.join(tempfile.gettempdir(), 'test-%s' % seed, 'test_timebook_config.ini')
parse_config(fname)
shutil.rmtree(os.path.dirname(fname))
|
|
de44f28e266a9e8a730030029d47d6a262e7cf3c
|
utils/h5tobigfile.py
|
utils/h5tobigfile.py
|
import h5py
import bigfile
import logging
import argparse
ap = argparse.ArgumentParser('h5tobigfile')
ap.add_argument("hdf5")
ap.add_argument("bigfile")
ap.add_argument("--verify", action='store_true', default=False)
ap.add_argument("--include", action="append")
ap.add_argument("--exclude", action="append")
def traverse(hobj, operation, prefix=""):
if hasattr(hobj, 'keys'):
for k in hobj.keys():
path = prefix + '/' + k
traverse(hobj[k], operation, path)
if hasattr(hobj, 'dtype'):
if hobj.dtype.names:
for field in hobj.dtype.names:
path = prefix + field + '/'
traverse(hobj[field], operation, path)
else:
operation(hobj, prefix)
def main(ns):
hin = h5py.File(ns.hdf5, mode='r')
bout = bigfile.BigFile(ns.bigfile, create=True)
def create(hobj, path):
print("Copying %s as %s" % (str(hobj), path))
if hobj.dtype.shape and len(hobj.dtype.shape) > 1:
raise TypeError("bigfile cannot handle tensor dtype : %s" % str(hobj.dtype))
bb = bout.create(path, hobj.dtype, size=hobj.size)
bb.attrs['ndarray.shape'] = hobj.shape
bb.write(0, hobj[:].ravel().copy())
traverse(hin, create)
bout.close()
if ns.verify:
bout = bigfile.BigFile(ns.bigfile, create=False)
def verify(hobj, path):
print("Verifying %s against %s" % (str(hobj), path))
bb = bout[path]
assert (hobj.dtype == bb.dtype)
assert (hobj.shape == bb.attrs['ndarray.shape']).all()
assert (hobj[:].flat == bb[:]).all()
traverse(hin, verify)
bout.close()
hin.close()
if __name__ == "__main__" :
ns = ap.parse_args()
main(ns)
|
Add a tool to convert hdf5 to bigfile.
|
Add a tool to convert hdf5 to bigfile.
This shall work for a lot of cases but not all. It's incomplete.
|
Python
|
bsd-2-clause
|
rainwoodman/bigfile,rainwoodman/bigfile,rainwoodman/bigfile
|
Add a tool to convert hdf5 to bigfile.
This shall work for a lot of cases but not all. It's incomplete.
|
import h5py
import bigfile
import logging
import argparse
ap = argparse.ArgumentParser('h5tobigfile')
ap.add_argument("hdf5")
ap.add_argument("bigfile")
ap.add_argument("--verify", action='store_true', default=False)
ap.add_argument("--include", action="append")
ap.add_argument("--exclude", action="append")
def traverse(hobj, operation, prefix=""):
if hasattr(hobj, 'keys'):
for k in hobj.keys():
path = prefix + '/' + k
traverse(hobj[k], operation, path)
if hasattr(hobj, 'dtype'):
if hobj.dtype.names:
for field in hobj.dtype.names:
path = prefix + field + '/'
traverse(hobj[field], operation, path)
else:
operation(hobj, prefix)
def main(ns):
hin = h5py.File(ns.hdf5, mode='r')
bout = bigfile.BigFile(ns.bigfile, create=True)
def create(hobj, path):
print("Copying %s as %s" % (str(hobj), path))
if hobj.dtype.shape and len(hobj.dtype.shape) > 1:
raise TypeError("bigfile cannot handle tensor dtype : %s" % str(hobj.dtype))
bb = bout.create(path, hobj.dtype, size=hobj.size)
bb.attrs['ndarray.shape'] = hobj.shape
bb.write(0, hobj[:].ravel().copy())
traverse(hin, create)
bout.close()
if ns.verify:
bout = bigfile.BigFile(ns.bigfile, create=False)
def verify(hobj, path):
print("Verifying %s against %s" % (str(hobj), path))
bb = bout[path]
assert (hobj.dtype == bb.dtype)
assert (hobj.shape == bb.attrs['ndarray.shape']).all()
assert (hobj[:].flat == bb[:]).all()
traverse(hin, verify)
bout.close()
hin.close()
if __name__ == "__main__" :
ns = ap.parse_args()
main(ns)
|
<commit_before><commit_msg>Add a tool to convert hdf5 to bigfile.
This shall work for a lot of cases but not all. It's incomplete.<commit_after>
|
import h5py
import bigfile
import logging
import argparse
ap = argparse.ArgumentParser('h5tobigfile')
ap.add_argument("hdf5")
ap.add_argument("bigfile")
ap.add_argument("--verify", action='store_true', default=False)
ap.add_argument("--include", action="append")
ap.add_argument("--exclude", action="append")
def traverse(hobj, operation, prefix=""):
if hasattr(hobj, 'keys'):
for k in hobj.keys():
path = prefix + '/' + k
traverse(hobj[k], operation, path)
if hasattr(hobj, 'dtype'):
if hobj.dtype.names:
for field in hobj.dtype.names:
path = prefix + field + '/'
traverse(hobj[field], operation, path)
else:
operation(hobj, prefix)
def main(ns):
hin = h5py.File(ns.hdf5, mode='r')
bout = bigfile.BigFile(ns.bigfile, create=True)
def create(hobj, path):
print("Copying %s as %s" % (str(hobj), path))
if hobj.dtype.shape and len(hobj.dtype.shape) > 1:
raise TypeError("bigfile cannot handle tensor dtype : %s" % str(hobj.dtype))
bb = bout.create(path, hobj.dtype, size=hobj.size)
bb.attrs['ndarray.shape'] = hobj.shape
bb.write(0, hobj[:].ravel().copy())
traverse(hin, create)
bout.close()
if ns.verify:
bout = bigfile.BigFile(ns.bigfile, create=False)
def verify(hobj, path):
print("Verifying %s against %s" % (str(hobj), path))
bb = bout[path]
assert (hobj.dtype == bb.dtype)
assert (hobj.shape == bb.attrs['ndarray.shape']).all()
assert (hobj[:].flat == bb[:]).all()
traverse(hin, verify)
bout.close()
hin.close()
if __name__ == "__main__" :
ns = ap.parse_args()
main(ns)
|
Add a tool to convert hdf5 to bigfile.
This shall work for a lot of cases but not all. It's incomplete.import h5py
import bigfile
import logging
import argparse
ap = argparse.ArgumentParser('h5tobigfile')
ap.add_argument("hdf5")
ap.add_argument("bigfile")
ap.add_argument("--verify", action='store_true', default=False)
ap.add_argument("--include", action="append")
ap.add_argument("--exclude", action="append")
def traverse(hobj, operation, prefix=""):
if hasattr(hobj, 'keys'):
for k in hobj.keys():
path = prefix + '/' + k
traverse(hobj[k], operation, path)
if hasattr(hobj, 'dtype'):
if hobj.dtype.names:
for field in hobj.dtype.names:
path = prefix + field + '/'
traverse(hobj[field], operation, path)
else:
operation(hobj, prefix)
def main(ns):
hin = h5py.File(ns.hdf5, mode='r')
bout = bigfile.BigFile(ns.bigfile, create=True)
def create(hobj, path):
print("Copying %s as %s" % (str(hobj), path))
if hobj.dtype.shape and len(hobj.dtype.shape) > 1:
raise TypeError("bigfile cannot handle tensor dtype : %s" % str(hobj.dtype))
bb = bout.create(path, hobj.dtype, size=hobj.size)
bb.attrs['ndarray.shape'] = hobj.shape
bb.write(0, hobj[:].ravel().copy())
traverse(hin, create)
bout.close()
if ns.verify:
bout = bigfile.BigFile(ns.bigfile, create=False)
def verify(hobj, path):
print("Verifying %s against %s" % (str(hobj), path))
bb = bout[path]
assert (hobj.dtype == bb.dtype)
assert (hobj.shape == bb.attrs['ndarray.shape']).all()
assert (hobj[:].flat == bb[:]).all()
traverse(hin, verify)
bout.close()
hin.close()
if __name__ == "__main__" :
ns = ap.parse_args()
main(ns)
|
<commit_before><commit_msg>Add a tool to convert hdf5 to bigfile.
This shall work for a lot of cases but not all. It's incomplete.<commit_after>import h5py
import bigfile
import logging
import argparse
ap = argparse.ArgumentParser('h5tobigfile')
ap.add_argument("hdf5")
ap.add_argument("bigfile")
ap.add_argument("--verify", action='store_true', default=False)
ap.add_argument("--include", action="append")
ap.add_argument("--exclude", action="append")
def traverse(hobj, operation, prefix=""):
if hasattr(hobj, 'keys'):
for k in hobj.keys():
path = prefix + '/' + k
traverse(hobj[k], operation, path)
if hasattr(hobj, 'dtype'):
if hobj.dtype.names:
for field in hobj.dtype.names:
path = prefix + field + '/'
traverse(hobj[field], operation, path)
else:
operation(hobj, prefix)
def main(ns):
hin = h5py.File(ns.hdf5, mode='r')
bout = bigfile.BigFile(ns.bigfile, create=True)
def create(hobj, path):
print("Copying %s as %s" % (str(hobj), path))
if hobj.dtype.shape and len(hobj.dtype.shape) > 1:
raise TypeError("bigfile cannot handle tensor dtype : %s" % str(hobj.dtype))
bb = bout.create(path, hobj.dtype, size=hobj.size)
bb.attrs['ndarray.shape'] = hobj.shape
bb.write(0, hobj[:].ravel().copy())
traverse(hin, create)
bout.close()
if ns.verify:
bout = bigfile.BigFile(ns.bigfile, create=False)
def verify(hobj, path):
print("Verifying %s against %s" % (str(hobj), path))
bb = bout[path]
assert (hobj.dtype == bb.dtype)
assert (hobj.shape == bb.attrs['ndarray.shape']).all()
assert (hobj[:].flat == bb[:]).all()
traverse(hin, verify)
bout.close()
hin.close()
if __name__ == "__main__" :
ns = ap.parse_args()
main(ns)
|
|
6b4489e9e0362b75433f8a75fcfd041bab4e8a08
|
problem_3/solution2.py
|
problem_3/solution2.py
|
roots = []; product = 1; x = 2; n = 600851475143; y = n;
while product != n:
while (y % x == 0):
roots.append(x)
y /= x
product *= roots[-1]
x += 1
print max(roots)
|
Add a second python implementation for problem 3
|
Add a second python implementation for problem 3
|
Python
|
mit
|
mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler
|
Add a second python implementation for problem 3
|
roots = []; product = 1; x = 2; n = 600851475143; y = n;
while product != n:
while (y % x == 0):
roots.append(x)
y /= x
product *= roots[-1]
x += 1
print max(roots)
|
<commit_before><commit_msg>Add a second python implementation for problem 3<commit_after>
|
roots = []; product = 1; x = 2; n = 600851475143; y = n;
while product != n:
while (y % x == 0):
roots.append(x)
y /= x
product *= roots[-1]
x += 1
print max(roots)
|
Add a second python implementation for problem 3roots = []; product = 1; x = 2; n = 600851475143; y = n;
while product != n:
while (y % x == 0):
roots.append(x)
y /= x
product *= roots[-1]
x += 1
print max(roots)
|
<commit_before><commit_msg>Add a second python implementation for problem 3<commit_after>roots = []; product = 1; x = 2; n = 600851475143; y = n;
while product != n:
while (y % x == 0):
roots.append(x)
y /= x
product *= roots[-1]
x += 1
print max(roots)
|
|
a4f9f724e1a9b292abd9df46293d14759b864dc4
|
Send_Stack.py
|
Send_Stack.py
|
__author__ = 'ayost'
import os
import re
import sys
from time import sleep
import boto.cloudformation
import boto.cloudformation.stack
SCRIPT_LOCATION = os.path.abspath(os.path.dirname(sys.argv[0]))
def send(stacks):
regions = []
for r in boto.cloudformation.regions():
regions.append(r.name.upper())
region = ""
while len(region) < 1:
answer = input("CF region: ")
if answer.upper() in regions:
region = answer.lower()
else:
print("Invalid region")
names = []
for stack in stacks:
stackname = str(re.sub(r'.*[/\\]([^/\\\.]*?)\.stack', r'\1', stack)).strip()
cf_conn = boto.cloudformation.connect_to_region(region)
exists = False
try:
cf_stack = cf_conn.describe_stacks(stack_name_or_id=stackname)
exists = True
except:
print("Stack does not exist. Creating...")
exists = False
if exists:
print("Stack exists. updating...")
cf_conn.update_stack(stack_name=stackname, template_body=get_template_text(stackname), capabilities=['CAPABILITY_IAM'])
else:
cf_stack = cf_conn.create_stack(stack_name=stackname, capabilities=['CAPABILITY_IAM'], template_body=get_template_text(stackname))
names.append(stackname)
results(names, region)
def results(stacks, region):
done = []
failed = []
cf_success = ['CREATE_COMPLETE', 'DELETE_COMPLETE', 'UPDATE_COMPLETE']
cf_failed = ['CREATE_FAILED', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE', 'DELETE_FAILED']
cf_conn = boto.cloudformation.connect_to_region(region)
while len(done) + len(failed) < len(stacks):
for stack in stacks:
if stack not in done and stack not in failed:
stack_obj = cf_conn.describe_stacks(stack_name_or_id=stack)[0].stack_status
if stack_obj in cf_success:
done.append(stack)
if stack_obj in cf_failed:
failed.append(stack)
sleep(1)
print("All Stacks Complete")
print("Succeeded: ")
print(done)
print("Failed: ")
print(failed)
def get_template_text(stackname):
template = os.path.join(SCRIPT_LOCATION, 'Stacks', stackname + '.stack')
t_file = open(template)
text = t_file.read()
t_file.close()
return text
if __name__ == '__main__':
send(sys.argv[1:])
|
Build stacks, wait for completion, show status of each
|
Build stacks, wait for completion, show status of each
|
Python
|
apache-2.0
|
careerbuilder/CloudSeed,careerbuilder/CloudSeed,careerbuilder/CloudSeed,careerbuilder/CloudSeed
|
Build stacks, wait for completion, show status of each
|
__author__ = 'ayost'
import os
import re
import sys
from time import sleep
import boto.cloudformation
import boto.cloudformation.stack
SCRIPT_LOCATION = os.path.abspath(os.path.dirname(sys.argv[0]))
def send(stacks):
regions = []
for r in boto.cloudformation.regions():
regions.append(r.name.upper())
region = ""
while len(region) < 1:
answer = input("CF region: ")
if answer.upper() in regions:
region = answer.lower()
else:
print("Invalid region")
names = []
for stack in stacks:
stackname = str(re.sub(r'.*[/\\]([^/\\\.]*?)\.stack', r'\1', stack)).strip()
cf_conn = boto.cloudformation.connect_to_region(region)
exists = False
try:
cf_stack = cf_conn.describe_stacks(stack_name_or_id=stackname)
exists = True
except:
print("Stack does not exist. Creating...")
exists = False
if exists:
print("Stack exists. updating...")
cf_conn.update_stack(stack_name=stackname, template_body=get_template_text(stackname), capabilities=['CAPABILITY_IAM'])
else:
cf_stack = cf_conn.create_stack(stack_name=stackname, capabilities=['CAPABILITY_IAM'], template_body=get_template_text(stackname))
names.append(stackname)
results(names, region)
def results(stacks, region):
done = []
failed = []
cf_success = ['CREATE_COMPLETE', 'DELETE_COMPLETE', 'UPDATE_COMPLETE']
cf_failed = ['CREATE_FAILED', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE', 'DELETE_FAILED']
cf_conn = boto.cloudformation.connect_to_region(region)
while len(done) + len(failed) < len(stacks):
for stack in stacks:
if stack not in done and stack not in failed:
stack_obj = cf_conn.describe_stacks(stack_name_or_id=stack)[0].stack_status
if stack_obj in cf_success:
done.append(stack)
if stack_obj in cf_failed:
failed.append(stack)
sleep(1)
print("All Stacks Complete")
print("Succeeded: ")
print(done)
print("Failed: ")
print(failed)
def get_template_text(stackname):
template = os.path.join(SCRIPT_LOCATION, 'Stacks', stackname + '.stack')
t_file = open(template)
text = t_file.read()
t_file.close()
return text
if __name__ == '__main__':
send(sys.argv[1:])
|
<commit_before><commit_msg>Build stacks, wait for completion, show status of each<commit_after>
|
__author__ = 'ayost'
import os
import re
import sys
from time import sleep
import boto.cloudformation
import boto.cloudformation.stack
SCRIPT_LOCATION = os.path.abspath(os.path.dirname(sys.argv[0]))
def send(stacks):
regions = []
for r in boto.cloudformation.regions():
regions.append(r.name.upper())
region = ""
while len(region) < 1:
answer = input("CF region: ")
if answer.upper() in regions:
region = answer.lower()
else:
print("Invalid region")
names = []
for stack in stacks:
stackname = str(re.sub(r'.*[/\\]([^/\\\.]*?)\.stack', r'\1', stack)).strip()
cf_conn = boto.cloudformation.connect_to_region(region)
exists = False
try:
cf_stack = cf_conn.describe_stacks(stack_name_or_id=stackname)
exists = True
except:
print("Stack does not exist. Creating...")
exists = False
if exists:
print("Stack exists. updating...")
cf_conn.update_stack(stack_name=stackname, template_body=get_template_text(stackname), capabilities=['CAPABILITY_IAM'])
else:
cf_stack = cf_conn.create_stack(stack_name=stackname, capabilities=['CAPABILITY_IAM'], template_body=get_template_text(stackname))
names.append(stackname)
results(names, region)
def results(stacks, region):
done = []
failed = []
cf_success = ['CREATE_COMPLETE', 'DELETE_COMPLETE', 'UPDATE_COMPLETE']
cf_failed = ['CREATE_FAILED', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE', 'DELETE_FAILED']
cf_conn = boto.cloudformation.connect_to_region(region)
while len(done) + len(failed) < len(stacks):
for stack in stacks:
if stack not in done and stack not in failed:
stack_obj = cf_conn.describe_stacks(stack_name_or_id=stack)[0].stack_status
if stack_obj in cf_success:
done.append(stack)
if stack_obj in cf_failed:
failed.append(stack)
sleep(1)
print("All Stacks Complete")
print("Succeeded: ")
print(done)
print("Failed: ")
print(failed)
def get_template_text(stackname):
template = os.path.join(SCRIPT_LOCATION, 'Stacks', stackname + '.stack')
t_file = open(template)
text = t_file.read()
t_file.close()
return text
if __name__ == '__main__':
send(sys.argv[1:])
|
Build stacks, wait for completion, show status of each__author__ = 'ayost'
import os
import re
import sys
from time import sleep
import boto.cloudformation
import boto.cloudformation.stack
SCRIPT_LOCATION = os.path.abspath(os.path.dirname(sys.argv[0]))
def send(stacks):
regions = []
for r in boto.cloudformation.regions():
regions.append(r.name.upper())
region = ""
while len(region) < 1:
answer = input("CF region: ")
if answer.upper() in regions:
region = answer.lower()
else:
print("Invalid region")
names = []
for stack in stacks:
stackname = str(re.sub(r'.*[/\\]([^/\\\.]*?)\.stack', r'\1', stack)).strip()
cf_conn = boto.cloudformation.connect_to_region(region)
exists = False
try:
cf_stack = cf_conn.describe_stacks(stack_name_or_id=stackname)
exists = True
except:
print("Stack does not exist. Creating...")
exists = False
if exists:
print("Stack exists. updating...")
cf_conn.update_stack(stack_name=stackname, template_body=get_template_text(stackname), capabilities=['CAPABILITY_IAM'])
else:
cf_stack = cf_conn.create_stack(stack_name=stackname, capabilities=['CAPABILITY_IAM'], template_body=get_template_text(stackname))
names.append(stackname)
results(names, region)
def results(stacks, region):
done = []
failed = []
cf_success = ['CREATE_COMPLETE', 'DELETE_COMPLETE', 'UPDATE_COMPLETE']
cf_failed = ['CREATE_FAILED', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE', 'DELETE_FAILED']
cf_conn = boto.cloudformation.connect_to_region(region)
while len(done) + len(failed) < len(stacks):
for stack in stacks:
if stack not in done and stack not in failed:
stack_obj = cf_conn.describe_stacks(stack_name_or_id=stack)[0].stack_status
if stack_obj in cf_success:
done.append(stack)
if stack_obj in cf_failed:
failed.append(stack)
sleep(1)
print("All Stacks Complete")
print("Succeeded: ")
print(done)
print("Failed: ")
print(failed)
def get_template_text(stackname):
template = os.path.join(SCRIPT_LOCATION, 'Stacks', stackname + '.stack')
t_file = open(template)
text = t_file.read()
t_file.close()
return text
if __name__ == '__main__':
send(sys.argv[1:])
|
<commit_before><commit_msg>Build stacks, wait for completion, show status of each<commit_after>__author__ = 'ayost'
import os
import re
import sys
from time import sleep
import boto.cloudformation
import boto.cloudformation.stack
SCRIPT_LOCATION = os.path.abspath(os.path.dirname(sys.argv[0]))
def send(stacks):
regions = []
for r in boto.cloudformation.regions():
regions.append(r.name.upper())
region = ""
while len(region) < 1:
answer = input("CF region: ")
if answer.upper() in regions:
region = answer.lower()
else:
print("Invalid region")
names = []
for stack in stacks:
stackname = str(re.sub(r'.*[/\\]([^/\\\.]*?)\.stack', r'\1', stack)).strip()
cf_conn = boto.cloudformation.connect_to_region(region)
exists = False
try:
cf_stack = cf_conn.describe_stacks(stack_name_or_id=stackname)
exists = True
except:
print("Stack does not exist. Creating...")
exists = False
if exists:
print("Stack exists. updating...")
cf_conn.update_stack(stack_name=stackname, template_body=get_template_text(stackname), capabilities=['CAPABILITY_IAM'])
else:
cf_stack = cf_conn.create_stack(stack_name=stackname, capabilities=['CAPABILITY_IAM'], template_body=get_template_text(stackname))
names.append(stackname)
results(names, region)
def results(stacks, region):
done = []
failed = []
cf_success = ['CREATE_COMPLETE', 'DELETE_COMPLETE', 'UPDATE_COMPLETE']
cf_failed = ['CREATE_FAILED', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE', 'DELETE_FAILED']
cf_conn = boto.cloudformation.connect_to_region(region)
while len(done) + len(failed) < len(stacks):
for stack in stacks:
if stack not in done and stack not in failed:
stack_obj = cf_conn.describe_stacks(stack_name_or_id=stack)[0].stack_status
if stack_obj in cf_success:
done.append(stack)
if stack_obj in cf_failed:
failed.append(stack)
sleep(1)
print("All Stacks Complete")
print("Succeeded: ")
print(done)
print("Failed: ")
print(failed)
def get_template_text(stackname):
template = os.path.join(SCRIPT_LOCATION, 'Stacks', stackname + '.stack')
t_file = open(template)
text = t_file.read()
t_file.close()
return text
if __name__ == '__main__':
send(sys.argv[1:])
|
|
258bf91456db1f9c5c2b90bfdaf32a4af13de7df
|
states/common/bootstrap/bootstrap.dir/modules/utils/salt_output.py
|
states/common/bootstrap/bootstrap.dir/modules/utils/salt_output.py
|
#!/usr/bin/env python
#
import sys
import yaml
import logging
###############################################################################
def load_yaml_data(file_path):
"""
Load YAML formated data from file_path.
"""
with open(file_path, 'r') as yaml_file:
loaded_data = yaml.load(yaml_file)
return loaded_data
###############################################################################
def check_result(salt_output):
"""
Check result provided by Salt for local (see `salt-call`) execution.
"""
local_result = salt_output['local']
overall_result = True
for state_key in local_result.keys():
name_value = local_result[state_key]['name']
result_value = local_result[state_key]['result']
logging.info("check result for `name`: " + str(name_value))
if result_value is None:
logging.critical("unexpected `result` value: " + str(result_value))
overall_result = False
elif result_value == False:
logging.info("result: " + str(result_value))
overall_result = False
# Do not break the loop.
# Instead, keep on generating log output
elif result_value == True:
logging.info("result: " + str(result_value))
else:
logging.info("unexpected `result` value: " + str(result_value))
overall_result = False
return overall_result
###############################################################################
# MAIN
# Execute futher only if this file is executed as a script (not imported
# as a module).
if __name__ == '__main__':
logging.getLogger().setLevel(0)
file_path = sys.argv[1]
salt_output = load_yaml_data(file_path)
overall_result = check_result(salt_output)
if overall_result:
sys.exit(0)
else:
sys.exit(1)
|
Add initial version of Salt output analyzer script
|
Add initial version of Salt output analyzer script
|
Python
|
apache-2.0
|
uvsmtid/common-salt-states,uvsmtid/common-salt-states,uvsmtid/common-salt-states,uvsmtid/common-salt-states
|
Add initial version of Salt output analyzer script
|
#!/usr/bin/env python
#
import sys
import yaml
import logging
###############################################################################
def load_yaml_data(file_path):
"""
Load YAML formated data from file_path.
"""
with open(file_path, 'r') as yaml_file:
loaded_data = yaml.load(yaml_file)
return loaded_data
###############################################################################
def check_result(salt_output):
"""
Check result provided by Salt for local (see `salt-call`) execution.
"""
local_result = salt_output['local']
overall_result = True
for state_key in local_result.keys():
name_value = local_result[state_key]['name']
result_value = local_result[state_key]['result']
logging.info("check result for `name`: " + str(name_value))
if result_value is None:
logging.critical("unexpected `result` value: " + str(result_value))
overall_result = False
elif result_value == False:
logging.info("result: " + str(result_value))
overall_result = False
# Do not break the loop.
# Instead, keep on generating log output
elif result_value == True:
logging.info("result: " + str(result_value))
else:
logging.info("unexpected `result` value: " + str(result_value))
overall_result = False
return overall_result
###############################################################################
# MAIN
# Execute futher only if this file is executed as a script (not imported
# as a module).
if __name__ == '__main__':
logging.getLogger().setLevel(0)
file_path = sys.argv[1]
salt_output = load_yaml_data(file_path)
overall_result = check_result(salt_output)
if overall_result:
sys.exit(0)
else:
sys.exit(1)
|
<commit_before><commit_msg>Add initial version of Salt output analyzer script<commit_after>
|
#!/usr/bin/env python
#
import sys
import yaml
import logging
###############################################################################
def load_yaml_data(file_path):
"""
Load YAML formated data from file_path.
"""
with open(file_path, 'r') as yaml_file:
loaded_data = yaml.load(yaml_file)
return loaded_data
###############################################################################
def check_result(salt_output):
"""
Check result provided by Salt for local (see `salt-call`) execution.
"""
local_result = salt_output['local']
overall_result = True
for state_key in local_result.keys():
name_value = local_result[state_key]['name']
result_value = local_result[state_key]['result']
logging.info("check result for `name`: " + str(name_value))
if result_value is None:
logging.critical("unexpected `result` value: " + str(result_value))
overall_result = False
elif result_value == False:
logging.info("result: " + str(result_value))
overall_result = False
# Do not break the loop.
# Instead, keep on generating log output
elif result_value == True:
logging.info("result: " + str(result_value))
else:
logging.info("unexpected `result` value: " + str(result_value))
overall_result = False
return overall_result
###############################################################################
# MAIN
# Execute futher only if this file is executed as a script (not imported
# as a module).
if __name__ == '__main__':
logging.getLogger().setLevel(0)
file_path = sys.argv[1]
salt_output = load_yaml_data(file_path)
overall_result = check_result(salt_output)
if overall_result:
sys.exit(0)
else:
sys.exit(1)
|
Add initial version of Salt output analyzer script#!/usr/bin/env python
#
import sys
import yaml
import logging
###############################################################################
def load_yaml_data(file_path):
"""
Load YAML formated data from file_path.
"""
with open(file_path, 'r') as yaml_file:
loaded_data = yaml.load(yaml_file)
return loaded_data
###############################################################################
def check_result(salt_output):
"""
Check result provided by Salt for local (see `salt-call`) execution.
"""
local_result = salt_output['local']
overall_result = True
for state_key in local_result.keys():
name_value = local_result[state_key]['name']
result_value = local_result[state_key]['result']
logging.info("check result for `name`: " + str(name_value))
if result_value is None:
logging.critical("unexpected `result` value: " + str(result_value))
overall_result = False
elif result_value == False:
logging.info("result: " + str(result_value))
overall_result = False
# Do not break the loop.
# Instead, keep on generating log output
elif result_value == True:
logging.info("result: " + str(result_value))
else:
logging.info("unexpected `result` value: " + str(result_value))
overall_result = False
return overall_result
###############################################################################
# MAIN
# Execute futher only if this file is executed as a script (not imported
# as a module).
if __name__ == '__main__':
logging.getLogger().setLevel(0)
file_path = sys.argv[1]
salt_output = load_yaml_data(file_path)
overall_result = check_result(salt_output)
if overall_result:
sys.exit(0)
else:
sys.exit(1)
|
<commit_before><commit_msg>Add initial version of Salt output analyzer script<commit_after>#!/usr/bin/env python
#
import sys
import yaml
import logging
###############################################################################
def load_yaml_data(file_path):
"""
Load YAML formated data from file_path.
"""
with open(file_path, 'r') as yaml_file:
loaded_data = yaml.load(yaml_file)
return loaded_data
###############################################################################
def check_result(salt_output):
"""
Check result provided by Salt for local (see `salt-call`) execution.
"""
local_result = salt_output['local']
overall_result = True
for state_key in local_result.keys():
name_value = local_result[state_key]['name']
result_value = local_result[state_key]['result']
logging.info("check result for `name`: " + str(name_value))
if result_value is None:
logging.critical("unexpected `result` value: " + str(result_value))
overall_result = False
elif result_value == False:
logging.info("result: " + str(result_value))
overall_result = False
# Do not break the loop.
# Instead, keep on generating log output
elif result_value == True:
logging.info("result: " + str(result_value))
else:
logging.info("unexpected `result` value: " + str(result_value))
overall_result = False
return overall_result
###############################################################################
# MAIN
# Execute futher only if this file is executed as a script (not imported
# as a module).
if __name__ == '__main__':
logging.getLogger().setLevel(0)
file_path = sys.argv[1]
salt_output = load_yaml_data(file_path)
overall_result = check_result(salt_output)
if overall_result:
sys.exit(0)
else:
sys.exit(1)
|
|
91f90d5edd89a10cb652af001d6c076fdcf557d8
|
chapter3/collatzSequence.py
|
chapter3/collatzSequence.py
|
# Collatz Sequence practice project from Automate Boring Stuff - Chapter 3
# https://automatetheboringstuff.com/chapter3/
def collatz(number):
if number%2==0:
print(number//2)
return number//2
else:
output = 3*number+1
print(output)
return output
print("Enter a number")
try:
inputNumber = int(input())
result = inputNumber
while result!=1:
result = collatz(result)
except ValueError:
print("Enter a valid number")
|
Add solution to chapter-3 collatzsequence
|
Add solution to chapter-3 collatzsequence
|
Python
|
mit
|
anirudhvarma12/learning-python
|
Add solution to chapter-3 collatzsequence
|
# Collatz Sequence practice project from Automate Boring Stuff - Chapter 3
# https://automatetheboringstuff.com/chapter3/
def collatz(number):
if number%2==0:
print(number//2)
return number//2
else:
output = 3*number+1
print(output)
return output
print("Enter a number")
try:
inputNumber = int(input())
result = inputNumber
while result!=1:
result = collatz(result)
except ValueError:
print("Enter a valid number")
|
<commit_before><commit_msg>Add solution to chapter-3 collatzsequence<commit_after>
|
# Collatz Sequence practice project from Automate Boring Stuff - Chapter 3
# https://automatetheboringstuff.com/chapter3/
def collatz(number):
if number%2==0:
print(number//2)
return number//2
else:
output = 3*number+1
print(output)
return output
print("Enter a number")
try:
inputNumber = int(input())
result = inputNumber
while result!=1:
result = collatz(result)
except ValueError:
print("Enter a valid number")
|
Add solution to chapter-3 collatzsequence# Collatz Sequence practice project from Automate Boring Stuff - Chapter 3
# https://automatetheboringstuff.com/chapter3/
def collatz(number):
if number%2==0:
print(number//2)
return number//2
else:
output = 3*number+1
print(output)
return output
print("Enter a number")
try:
inputNumber = int(input())
result = inputNumber
while result!=1:
result = collatz(result)
except ValueError:
print("Enter a valid number")
|
<commit_before><commit_msg>Add solution to chapter-3 collatzsequence<commit_after># Collatz Sequence practice project from Automate Boring Stuff - Chapter 3
# https://automatetheboringstuff.com/chapter3/
def collatz(number):
if number%2==0:
print(number//2)
return number//2
else:
output = 3*number+1
print(output)
return output
print("Enter a number")
try:
inputNumber = int(input())
result = inputNumber
while result!=1:
result = collatz(result)
except ValueError:
print("Enter a valid number")
|
|
bb3fa58481d58ac7fe9a55e7fabd2b37ce9df7ac
|
tests/test_android.py
|
tests/test_android.py
|
# MIT licensed
# Copyright (c) 2013-2017 lilydjwg <lilydjwg@gmail.com>, et al.
import pytest
pytestmark = pytest.mark.asyncio
async def test_android_addon(get_version):
assert await get_version("android-google-play-apk-expansion", {"android_sdk": "extras;google;market_apk_expansion", "repo": "addon"}) == "1.r03"
async def test_android_package(get_version):
assert await get_version("android-sdk-cmake", {"android_sdk": "cmake;", "repo": "package"}) == "3.6.4111459"
|
Add tests for Android SDK packages
|
Add tests for Android SDK packages
|
Python
|
mit
|
lilydjwg/nvchecker
|
Add tests for Android SDK packages
|
# MIT licensed
# Copyright (c) 2013-2017 lilydjwg <lilydjwg@gmail.com>, et al.
import pytest
pytestmark = pytest.mark.asyncio
async def test_android_addon(get_version):
assert await get_version("android-google-play-apk-expansion", {"android_sdk": "extras;google;market_apk_expansion", "repo": "addon"}) == "1.r03"
async def test_android_package(get_version):
assert await get_version("android-sdk-cmake", {"android_sdk": "cmake;", "repo": "package"}) == "3.6.4111459"
|
<commit_before><commit_msg>Add tests for Android SDK packages<commit_after>
|
# MIT licensed
# Copyright (c) 2013-2017 lilydjwg <lilydjwg@gmail.com>, et al.
import pytest
pytestmark = pytest.mark.asyncio
async def test_android_addon(get_version):
assert await get_version("android-google-play-apk-expansion", {"android_sdk": "extras;google;market_apk_expansion", "repo": "addon"}) == "1.r03"
async def test_android_package(get_version):
assert await get_version("android-sdk-cmake", {"android_sdk": "cmake;", "repo": "package"}) == "3.6.4111459"
|
Add tests for Android SDK packages# MIT licensed
# Copyright (c) 2013-2017 lilydjwg <lilydjwg@gmail.com>, et al.
import pytest
pytestmark = pytest.mark.asyncio
async def test_android_addon(get_version):
assert await get_version("android-google-play-apk-expansion", {"android_sdk": "extras;google;market_apk_expansion", "repo": "addon"}) == "1.r03"
async def test_android_package(get_version):
assert await get_version("android-sdk-cmake", {"android_sdk": "cmake;", "repo": "package"}) == "3.6.4111459"
|
<commit_before><commit_msg>Add tests for Android SDK packages<commit_after># MIT licensed
# Copyright (c) 2013-2017 lilydjwg <lilydjwg@gmail.com>, et al.
import pytest
pytestmark = pytest.mark.asyncio
async def test_android_addon(get_version):
assert await get_version("android-google-play-apk-expansion", {"android_sdk": "extras;google;market_apk_expansion", "repo": "addon"}) == "1.r03"
async def test_android_package(get_version):
assert await get_version("android-sdk-cmake", {"android_sdk": "cmake;", "repo": "package"}) == "3.6.4111459"
|
|
e084f83aea9c89b5fff0289dedf7bd6cd7434dcf
|
firecares/firestation/management/commands/update-erf-areas.py
|
firecares/firestation/management/commands/update-erf-areas.py
|
import os
import argparse
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from firecares.tasks.update import update_parcel_department_effectivefirefighting_rollup
class Command(BaseCommand):
help = """Updates the Effective Response Force (ERF) areas for the department"""
def add_arguments(self, parser):
parser.add_argument('--dept', nargs='*', help='Specify one or more department ids (omission will process all departments)')
def handle(self, *args, **options):
departments = options.get('dept')
if not departments:
departments = [value['id'] for value in FireDepartment.objects.values('id')]
for department in departments:
update_parcel_department_effectivefirefighting_rollup(department)
|
Add django command for updating ERF areas
|
Add django command for updating ERF areas
|
Python
|
mit
|
FireCARES/firecares,FireCARES/firecares,FireCARES/firecares,FireCARES/firecares,FireCARES/firecares
|
Add django command for updating ERF areas
|
import os
import argparse
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from firecares.tasks.update import update_parcel_department_effectivefirefighting_rollup
class Command(BaseCommand):
help = """Updates the Effective Response Force (ERF) areas for the department"""
def add_arguments(self, parser):
parser.add_argument('--dept', nargs='*', help='Specify one or more department ids (omission will process all departments)')
def handle(self, *args, **options):
departments = options.get('dept')
if not departments:
departments = [value['id'] for value in FireDepartment.objects.values('id')]
for department in departments:
update_parcel_department_effectivefirefighting_rollup(department)
|
<commit_before><commit_msg>Add django command for updating ERF areas<commit_after>
|
import os
import argparse
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from firecares.tasks.update import update_parcel_department_effectivefirefighting_rollup
class Command(BaseCommand):
help = """Updates the Effective Response Force (ERF) areas for the department"""
def add_arguments(self, parser):
parser.add_argument('--dept', nargs='*', help='Specify one or more department ids (omission will process all departments)')
def handle(self, *args, **options):
departments = options.get('dept')
if not departments:
departments = [value['id'] for value in FireDepartment.objects.values('id')]
for department in departments:
update_parcel_department_effectivefirefighting_rollup(department)
|
Add django command for updating ERF areasimport os
import argparse
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from firecares.tasks.update import update_parcel_department_effectivefirefighting_rollup
class Command(BaseCommand):
help = """Updates the Effective Response Force (ERF) areas for the department"""
def add_arguments(self, parser):
parser.add_argument('--dept', nargs='*', help='Specify one or more department ids (omission will process all departments)')
def handle(self, *args, **options):
departments = options.get('dept')
if not departments:
departments = [value['id'] for value in FireDepartment.objects.values('id')]
for department in departments:
update_parcel_department_effectivefirefighting_rollup(department)
|
<commit_before><commit_msg>Add django command for updating ERF areas<commit_after>import os
import argparse
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from firecares.tasks.update import update_parcel_department_effectivefirefighting_rollup
class Command(BaseCommand):
help = """Updates the Effective Response Force (ERF) areas for the department"""
def add_arguments(self, parser):
parser.add_argument('--dept', nargs='*', help='Specify one or more department ids (omission will process all departments)')
def handle(self, *args, **options):
departments = options.get('dept')
if not departments:
departments = [value['id'] for value in FireDepartment.objects.values('id')]
for department in departments:
update_parcel_department_effectivefirefighting_rollup(department)
|
|
1293b101683462225f7c5f150b3c98767d032639
|
app/models.py
|
app/models.py
|
# models.py
#
# Copyright(c) Exequiel Ceasar Navarrete <esnavarrete1@up.edu.ph>
# Licensed under MIT
# Version 0.0.0
from app import db
from slugify import slugify
class Crop(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=False)
crop_type = db.Column(db.String(80), unique=False)
slug = db.Column(db.String(80), unique=True)
def __init__(self, name, crop_type):
self.name = name
self.crop_type = crop_type
self.slug = slugify(name)
def __repr__(self):
return '<Crop %r>' % self.name
class Region(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=False)
slug = db.Column(db.String(80), unique=True)
region_normalized = db.Column(db.String(80), unique=False)
region_roman = db.Column(db.String(80), unique=False)
region_normalized_canonical = db.Column(db.String(160), unique=False)
region_roman_canonical = db.Column(db.String(160), unique=False)
def __init__(self, name, region_normalized, region_roman):
self.name = name
self.region_normalized = region_normalized
self.region_roman = region_roman
self.region_normalized_canonical = "%s (%s)" % (self.name, region_normalized)
self.region_roman_canonical = "%s (%s)" % (self.name, region_roman)
# assemble the slug from the canonical roman
self.slug = slugify(self.region_roman_canonical)
def __repr__(self):
return '<Region %r>' % self.region_roman_canonical
|
Create crop and region model
|
Create crop and region model
|
Python
|
mit
|
ecsnavarretemit/sarai-interactive-maps-backend,ecsnavarretemit/sarai-interactive-maps-backend
|
Create crop and region model
|
# models.py
#
# Copyright(c) Exequiel Ceasar Navarrete <esnavarrete1@up.edu.ph>
# Licensed under MIT
# Version 0.0.0
from app import db
from slugify import slugify
class Crop(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=False)
crop_type = db.Column(db.String(80), unique=False)
slug = db.Column(db.String(80), unique=True)
def __init__(self, name, crop_type):
self.name = name
self.crop_type = crop_type
self.slug = slugify(name)
def __repr__(self):
return '<Crop %r>' % self.name
class Region(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=False)
slug = db.Column(db.String(80), unique=True)
region_normalized = db.Column(db.String(80), unique=False)
region_roman = db.Column(db.String(80), unique=False)
region_normalized_canonical = db.Column(db.String(160), unique=False)
region_roman_canonical = db.Column(db.String(160), unique=False)
def __init__(self, name, region_normalized, region_roman):
self.name = name
self.region_normalized = region_normalized
self.region_roman = region_roman
self.region_normalized_canonical = "%s (%s)" % (self.name, region_normalized)
self.region_roman_canonical = "%s (%s)" % (self.name, region_roman)
# assemble the slug from the canonical roman
self.slug = slugify(self.region_roman_canonical)
def __repr__(self):
return '<Region %r>' % self.region_roman_canonical
|
<commit_before><commit_msg>Create crop and region model<commit_after>
|
# models.py
#
# Copyright(c) Exequiel Ceasar Navarrete <esnavarrete1@up.edu.ph>
# Licensed under MIT
# Version 0.0.0
from app import db
from slugify import slugify
class Crop(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=False)
crop_type = db.Column(db.String(80), unique=False)
slug = db.Column(db.String(80), unique=True)
def __init__(self, name, crop_type):
self.name = name
self.crop_type = crop_type
self.slug = slugify(name)
def __repr__(self):
return '<Crop %r>' % self.name
class Region(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=False)
slug = db.Column(db.String(80), unique=True)
region_normalized = db.Column(db.String(80), unique=False)
region_roman = db.Column(db.String(80), unique=False)
region_normalized_canonical = db.Column(db.String(160), unique=False)
region_roman_canonical = db.Column(db.String(160), unique=False)
def __init__(self, name, region_normalized, region_roman):
self.name = name
self.region_normalized = region_normalized
self.region_roman = region_roman
self.region_normalized_canonical = "%s (%s)" % (self.name, region_normalized)
self.region_roman_canonical = "%s (%s)" % (self.name, region_roman)
# assemble the slug from the canonical roman
self.slug = slugify(self.region_roman_canonical)
def __repr__(self):
return '<Region %r>' % self.region_roman_canonical
|
Create crop and region model# models.py
#
# Copyright(c) Exequiel Ceasar Navarrete <esnavarrete1@up.edu.ph>
# Licensed under MIT
# Version 0.0.0
from app import db
from slugify import slugify
class Crop(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=False)
crop_type = db.Column(db.String(80), unique=False)
slug = db.Column(db.String(80), unique=True)
def __init__(self, name, crop_type):
self.name = name
self.crop_type = crop_type
self.slug = slugify(name)
def __repr__(self):
return '<Crop %r>' % self.name
class Region(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=False)
slug = db.Column(db.String(80), unique=True)
region_normalized = db.Column(db.String(80), unique=False)
region_roman = db.Column(db.String(80), unique=False)
region_normalized_canonical = db.Column(db.String(160), unique=False)
region_roman_canonical = db.Column(db.String(160), unique=False)
def __init__(self, name, region_normalized, region_roman):
self.name = name
self.region_normalized = region_normalized
self.region_roman = region_roman
self.region_normalized_canonical = "%s (%s)" % (self.name, region_normalized)
self.region_roman_canonical = "%s (%s)" % (self.name, region_roman)
# assemble the slug from the canonical roman
self.slug = slugify(self.region_roman_canonical)
def __repr__(self):
return '<Region %r>' % self.region_roman_canonical
|
<commit_before><commit_msg>Create crop and region model<commit_after># models.py
#
# Copyright(c) Exequiel Ceasar Navarrete <esnavarrete1@up.edu.ph>
# Licensed under MIT
# Version 0.0.0
from app import db
from slugify import slugify
class Crop(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=False)
crop_type = db.Column(db.String(80), unique=False)
slug = db.Column(db.String(80), unique=True)
def __init__(self, name, crop_type):
self.name = name
self.crop_type = crop_type
self.slug = slugify(name)
def __repr__(self):
return '<Crop %r>' % self.name
class Region(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=False)
slug = db.Column(db.String(80), unique=True)
region_normalized = db.Column(db.String(80), unique=False)
region_roman = db.Column(db.String(80), unique=False)
region_normalized_canonical = db.Column(db.String(160), unique=False)
region_roman_canonical = db.Column(db.String(160), unique=False)
def __init__(self, name, region_normalized, region_roman):
self.name = name
self.region_normalized = region_normalized
self.region_roman = region_roman
self.region_normalized_canonical = "%s (%s)" % (self.name, region_normalized)
self.region_roman_canonical = "%s (%s)" % (self.name, region_roman)
# assemble the slug from the canonical roman
self.slug = slugify(self.region_roman_canonical)
def __repr__(self):
return '<Region %r>' % self.region_roman_canonical
|
|
e6ceac9c547c9fae78d6cac57d62a510fddc9096
|
scripts/traces/merge_traces.py
|
scripts/traces/merge_traces.py
|
#!/usr/bin/python
# Converts a bunch of processedTrace files from Second Life into a
# single trace file, where all coordinates have been converted
# properly. This assumes that the traces are laid out in a square
# grid, that each server is 256m x 256m.
if __name__ == "__main__":
trace_file_fmt = '4x4/processedTrace_%d'
servers_per_side = 4
server_width = 256
combined_file = 'sl.trace.4x4.dat'
objects = set()
max_rad = 0
min_rad = 10000
fout = file(combined_file, 'w')
for sy in range(servers_per_side):
for sx in range(servers_per_side):
idx = sy * servers_per_side + sx
trace_file = trace_file_fmt % (idx)
for line in file(trace_file):
(uuid, colon, info) = line.partition(':')
parts = info.split(',')
x = float(parts[0].strip()) + (sx * 256.0)
y = float(parts[1].strip()) + (sy * 256.0)
z = float(parts[2].strip())
t = int(parts[3].strip()) # milliseconds
rad = float(parts[4].strip())
objects.add(uuid)
max_rad = max(max_rad, rad)
min_rad = min(min_rad, rad)
print >>fout, uuid, x, y, z, t, rad
fout.close()
print "Unique objects:", len(objects)
print "Radii:", max_rad, "(max)", min_rad, "(min)"
|
Add a script to merge a bunch of quake style traces and write some summary statistics.
|
Add a script to merge a bunch of quake style traces and write some summary statistics.
|
Python
|
bsd-3-clause
|
sirikata/sirikata,sirikata/sirikata,sirikata/sirikata,sirikata/sirikata,sirikata/sirikata,sirikata/sirikata,sirikata/sirikata,sirikata/sirikata
|
Add a script to merge a bunch of quake style traces and write some summary statistics.
|
#!/usr/bin/python
# Converts a bunch of processedTrace files from Second Life into a
# single trace file, where all coordinates have been converted
# properly. This assumes that the traces are laid out in a square
# grid, that each server is 256m x 256m.
if __name__ == "__main__":
trace_file_fmt = '4x4/processedTrace_%d'
servers_per_side = 4
server_width = 256
combined_file = 'sl.trace.4x4.dat'
objects = set()
max_rad = 0
min_rad = 10000
fout = file(combined_file, 'w')
for sy in range(servers_per_side):
for sx in range(servers_per_side):
idx = sy * servers_per_side + sx
trace_file = trace_file_fmt % (idx)
for line in file(trace_file):
(uuid, colon, info) = line.partition(':')
parts = info.split(',')
x = float(parts[0].strip()) + (sx * 256.0)
y = float(parts[1].strip()) + (sy * 256.0)
z = float(parts[2].strip())
t = int(parts[3].strip()) # milliseconds
rad = float(parts[4].strip())
objects.add(uuid)
max_rad = max(max_rad, rad)
min_rad = min(min_rad, rad)
print >>fout, uuid, x, y, z, t, rad
fout.close()
print "Unique objects:", len(objects)
print "Radii:", max_rad, "(max)", min_rad, "(min)"
|
<commit_before><commit_msg>Add a script to merge a bunch of quake style traces and write some summary statistics.<commit_after>
|
#!/usr/bin/python
# Converts a bunch of processedTrace files from Second Life into a
# single trace file, where all coordinates have been converted
# properly. This assumes that the traces are laid out in a square
# grid, that each server is 256m x 256m.
if __name__ == "__main__":
trace_file_fmt = '4x4/processedTrace_%d'
servers_per_side = 4
server_width = 256
combined_file = 'sl.trace.4x4.dat'
objects = set()
max_rad = 0
min_rad = 10000
fout = file(combined_file, 'w')
for sy in range(servers_per_side):
for sx in range(servers_per_side):
idx = sy * servers_per_side + sx
trace_file = trace_file_fmt % (idx)
for line in file(trace_file):
(uuid, colon, info) = line.partition(':')
parts = info.split(',')
x = float(parts[0].strip()) + (sx * 256.0)
y = float(parts[1].strip()) + (sy * 256.0)
z = float(parts[2].strip())
t = int(parts[3].strip()) # milliseconds
rad = float(parts[4].strip())
objects.add(uuid)
max_rad = max(max_rad, rad)
min_rad = min(min_rad, rad)
print >>fout, uuid, x, y, z, t, rad
fout.close()
print "Unique objects:", len(objects)
print "Radii:", max_rad, "(max)", min_rad, "(min)"
|
Add a script to merge a bunch of quake style traces and write some summary statistics.#!/usr/bin/python
# Converts a bunch of processedTrace files from Second Life into a
# single trace file, where all coordinates have been converted
# properly. This assumes that the traces are laid out in a square
# grid, that each server is 256m x 256m.
if __name__ == "__main__":
trace_file_fmt = '4x4/processedTrace_%d'
servers_per_side = 4
server_width = 256
combined_file = 'sl.trace.4x4.dat'
objects = set()
max_rad = 0
min_rad = 10000
fout = file(combined_file, 'w')
for sy in range(servers_per_side):
for sx in range(servers_per_side):
idx = sy * servers_per_side + sx
trace_file = trace_file_fmt % (idx)
for line in file(trace_file):
(uuid, colon, info) = line.partition(':')
parts = info.split(',')
x = float(parts[0].strip()) + (sx * 256.0)
y = float(parts[1].strip()) + (sy * 256.0)
z = float(parts[2].strip())
t = int(parts[3].strip()) # milliseconds
rad = float(parts[4].strip())
objects.add(uuid)
max_rad = max(max_rad, rad)
min_rad = min(min_rad, rad)
print >>fout, uuid, x, y, z, t, rad
fout.close()
print "Unique objects:", len(objects)
print "Radii:", max_rad, "(max)", min_rad, "(min)"
|
<commit_before><commit_msg>Add a script to merge a bunch of quake style traces and write some summary statistics.<commit_after>#!/usr/bin/python
# Converts a bunch of processedTrace files from Second Life into a
# single trace file, where all coordinates have been converted
# properly. This assumes that the traces are laid out in a square
# grid, that each server is 256m x 256m.
if __name__ == "__main__":
trace_file_fmt = '4x4/processedTrace_%d'
servers_per_side = 4
server_width = 256
combined_file = 'sl.trace.4x4.dat'
objects = set()
max_rad = 0
min_rad = 10000
fout = file(combined_file, 'w')
for sy in range(servers_per_side):
for sx in range(servers_per_side):
idx = sy * servers_per_side + sx
trace_file = trace_file_fmt % (idx)
for line in file(trace_file):
(uuid, colon, info) = line.partition(':')
parts = info.split(',')
x = float(parts[0].strip()) + (sx * 256.0)
y = float(parts[1].strip()) + (sy * 256.0)
z = float(parts[2].strip())
t = int(parts[3].strip()) # milliseconds
rad = float(parts[4].strip())
objects.add(uuid)
max_rad = max(max_rad, rad)
min_rad = min(min_rad, rad)
print >>fout, uuid, x, y, z, t, rad
fout.close()
print "Unique objects:", len(objects)
print "Radii:", max_rad, "(max)", min_rad, "(min)"
|
|
dd30a2c655e1f0fca4be0de8de1775b7ab5c8b85
|
handle_quit_player.py
|
handle_quit_player.py
|
import pygame
import sys
from pygame.locals import *
from variables import *
from Player import *
from Card import *
from create_board import *
from create_game_options import *
from create_player_info import *
from handle_mouse_event import *
from update_game_dice import *
from handle_game import *
# handles all the action to remove player from the game
"""
Rules :
1) return all the cards allocated to that player
2) clear player details from the list
"""
def handle_quit_player(screen,Players,Cards,cur_player):
player = Players[cur_player]
# updating the card status and other related fields
for prop in player.property_owned:
Cards[prop].status = 0
Cards[prop].houses_built = 0
Cards[prop].hotel_built = 0
for player in player.property_mortgaged:
Cards[prop].status = 0
Cards[prop].houses_built = 0
Cards[prop].hotel_built = 0
# remove player from the list
Players.remove(player)
|
Handle withdrawal of a player
|
Handle withdrawal of a player
Added function to handle all the changes after a player quits withdraws
from the game
|
Python
|
mit
|
idnaninitesh/monopoly_python
|
Handle withdrawal of a player
Added function to handle all the changes after a player quits withdraws
from the game
|
import pygame
import sys
from pygame.locals import *
from variables import *
from Player import *
from Card import *
from create_board import *
from create_game_options import *
from create_player_info import *
from handle_mouse_event import *
from update_game_dice import *
from handle_game import *
# handles all the action to remove player from the game
"""
Rules :
1) return all the cards allocated to that player
2) clear player details from the list
"""
def handle_quit_player(screen,Players,Cards,cur_player):
player = Players[cur_player]
# updating the card status and other related fields
for prop in player.property_owned:
Cards[prop].status = 0
Cards[prop].houses_built = 0
Cards[prop].hotel_built = 0
for player in player.property_mortgaged:
Cards[prop].status = 0
Cards[prop].houses_built = 0
Cards[prop].hotel_built = 0
# remove player from the list
Players.remove(player)
|
<commit_before><commit_msg>Handle withdrawal of a player
Added function to handle all the changes after a player quits withdraws
from the game<commit_after>
|
import pygame
import sys
from pygame.locals import *
from variables import *
from Player import *
from Card import *
from create_board import *
from create_game_options import *
from create_player_info import *
from handle_mouse_event import *
from update_game_dice import *
from handle_game import *
# handles all the action to remove player from the game
"""
Rules :
1) return all the cards allocated to that player
2) clear player details from the list
"""
def handle_quit_player(screen,Players,Cards,cur_player):
player = Players[cur_player]
# updating the card status and other related fields
for prop in player.property_owned:
Cards[prop].status = 0
Cards[prop].houses_built = 0
Cards[prop].hotel_built = 0
for player in player.property_mortgaged:
Cards[prop].status = 0
Cards[prop].houses_built = 0
Cards[prop].hotel_built = 0
# remove player from the list
Players.remove(player)
|
Handle withdrawal of a player
Added function to handle all the changes after a player quits withdraws
from the gameimport pygame
import sys
from pygame.locals import *
from variables import *
from Player import *
from Card import *
from create_board import *
from create_game_options import *
from create_player_info import *
from handle_mouse_event import *
from update_game_dice import *
from handle_game import *
# handles all the action to remove player from the game
"""
Rules :
1) return all the cards allocated to that player
2) clear player details from the list
"""
def handle_quit_player(screen,Players,Cards,cur_player):
player = Players[cur_player]
# updating the card status and other related fields
for prop in player.property_owned:
Cards[prop].status = 0
Cards[prop].houses_built = 0
Cards[prop].hotel_built = 0
for player in player.property_mortgaged:
Cards[prop].status = 0
Cards[prop].houses_built = 0
Cards[prop].hotel_built = 0
# remove player from the list
Players.remove(player)
|
<commit_before><commit_msg>Handle withdrawal of a player
Added function to handle all the changes after a player quits withdraws
from the game<commit_after>import pygame
import sys
from pygame.locals import *
from variables import *
from Player import *
from Card import *
from create_board import *
from create_game_options import *
from create_player_info import *
from handle_mouse_event import *
from update_game_dice import *
from handle_game import *
# handles all the action to remove player from the game
"""
Rules :
1) return all the cards allocated to that player
2) clear player details from the list
"""
def handle_quit_player(screen,Players,Cards,cur_player):
player = Players[cur_player]
# updating the card status and other related fields
for prop in player.property_owned:
Cards[prop].status = 0
Cards[prop].houses_built = 0
Cards[prop].hotel_built = 0
for player in player.property_mortgaged:
Cards[prop].status = 0
Cards[prop].houses_built = 0
Cards[prop].hotel_built = 0
# remove player from the list
Players.remove(player)
|
|
84d8cc4d41ce6ff18f1f96f21749332e980a86f0
|
zipline/examples/buy_and_hold.py
|
zipline/examples/buy_and_hold.py
|
#!/usr/bin/env python
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from zipline import TradingAlgorithm
from zipline.api import order, sid
from zipline.data.loader import load_bars_from_yahoo
# creating time interval
start = pd.Timestamp('2008-01-01', tz='UTC')
end = pd.Timestamp('2013-01-01', tz='UTC')
# loading the data
input_data = load_bars_from_yahoo(
stocks=['AAPL', 'MSFT'],
start=start,
end=end,
)
def initialize(context):
context.has_ordered = False
def handle_data(context, data):
if not context.has_ordered:
for stock in data:
order(sid(stock), 100)
context.has_ordered = True
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
results = algo.run(input_data)
|
Add simple buy and hold example.
|
DOC: Add simple buy and hold example.
|
Python
|
apache-2.0
|
nborggren/zipline,alphaBenj/zipline,quantopian/zipline,umuzungu/zipline,humdings/zipline,Scapogo/zipline,magne-max/zipline-ja,florentchandelier/zipline,enigmampc/catalyst,umuzungu/zipline,nborggren/zipline,bartosh/zipline,enigmampc/catalyst,bartosh/zipline,grundgruen/zipline,wilsonkichoi/zipline,grundgruen/zipline,alphaBenj/zipline,quantopian/zipline,humdings/zipline,magne-max/zipline-ja,Scapogo/zipline,wilsonkichoi/zipline,florentchandelier/zipline
|
DOC: Add simple buy and hold example.
|
#!/usr/bin/env python
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from zipline import TradingAlgorithm
from zipline.api import order, sid
from zipline.data.loader import load_bars_from_yahoo
# creating time interval
start = pd.Timestamp('2008-01-01', tz='UTC')
end = pd.Timestamp('2013-01-01', tz='UTC')
# loading the data
input_data = load_bars_from_yahoo(
stocks=['AAPL', 'MSFT'],
start=start,
end=end,
)
def initialize(context):
context.has_ordered = False
def handle_data(context, data):
if not context.has_ordered:
for stock in data:
order(sid(stock), 100)
context.has_ordered = True
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
results = algo.run(input_data)
|
<commit_before><commit_msg>DOC: Add simple buy and hold example.<commit_after>
|
#!/usr/bin/env python
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from zipline import TradingAlgorithm
from zipline.api import order, sid
from zipline.data.loader import load_bars_from_yahoo
# creating time interval
start = pd.Timestamp('2008-01-01', tz='UTC')
end = pd.Timestamp('2013-01-01', tz='UTC')
# loading the data
input_data = load_bars_from_yahoo(
stocks=['AAPL', 'MSFT'],
start=start,
end=end,
)
def initialize(context):
context.has_ordered = False
def handle_data(context, data):
if not context.has_ordered:
for stock in data:
order(sid(stock), 100)
context.has_ordered = True
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
results = algo.run(input_data)
|
DOC: Add simple buy and hold example.#!/usr/bin/env python
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from zipline import TradingAlgorithm
from zipline.api import order, sid
from zipline.data.loader import load_bars_from_yahoo
# creating time interval
start = pd.Timestamp('2008-01-01', tz='UTC')
end = pd.Timestamp('2013-01-01', tz='UTC')
# loading the data
input_data = load_bars_from_yahoo(
stocks=['AAPL', 'MSFT'],
start=start,
end=end,
)
def initialize(context):
context.has_ordered = False
def handle_data(context, data):
if not context.has_ordered:
for stock in data:
order(sid(stock), 100)
context.has_ordered = True
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
results = algo.run(input_data)
|
<commit_before><commit_msg>DOC: Add simple buy and hold example.<commit_after>#!/usr/bin/env python
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from zipline import TradingAlgorithm
from zipline.api import order, sid
from zipline.data.loader import load_bars_from_yahoo
# creating time interval
start = pd.Timestamp('2008-01-01', tz='UTC')
end = pd.Timestamp('2013-01-01', tz='UTC')
# loading the data
input_data = load_bars_from_yahoo(
stocks=['AAPL', 'MSFT'],
start=start,
end=end,
)
def initialize(context):
context.has_ordered = False
def handle_data(context, data):
if not context.has_ordered:
for stock in data:
order(sid(stock), 100)
context.has_ordered = True
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
results = algo.run(input_data)
|
|
8261098405f4279209388f91e6fbc0d48600b129
|
scripts/cscap/wx/extract_colin.py
|
scripts/cscap/wx/extract_colin.py
|
""" Extract some data for Colin
1951-2010 Annual GDDs by climate district Apr 1 - Oct 31
1951-2010 Frost-free days ...
"""
import psycopg2
import pandas as pd
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor()
from pyiem.network import Table as NetworkTable
nt = NetworkTable(['IACLIMATE', 'MNCLIMATE', 'NDCLIMATE', 'OHCLIMATE',
'INCLIMATE', 'ILCLIMATE', 'MICLIMATE', 'WICLIMATE',
'SDCLIMATE', 'NECLIMATE', 'KSCLIMATE', 'MOCLIMATE'])
res = []
for sid in nt.sts.keys():
if sid[2] != 'C':
continue
TABLE = "alldata_%s" % (sid[:2],)
cursor.execute("""
WITH gdd as (
SELECT year, sum(gdd50(high,low)) from """+TABLE+""" WHERE
station = %s and sday between '0401' and '1031' and year >= 1951
and year < 2011 GROUP by year),
ff as (
SELECT year,
max(case when month < 7 and low < 32 then extract(doy from day) else 0 end),
min(case when month > 7 and low < 32 then extract(doy from day) else 366 end)
from """+TABLE+""" WHERE station = %s and year >= 1951 and year < 2011
GROUP by year)
SELECT g.year, g.sum, f.min - f.max from ff f JOIN gdd g on (g.year = f.year)
ORDER by g.year ASC
""", (sid, sid))
for row in cursor:
res.append(dict(station=sid, year=row[0], gdd50=row[1],
frostfree=int(row[2])))
df = pd.DataFrame(res)
df.to_csv('output.csv', index=False, columns=['station', 'year', 'gdd50',
'frostfree'])
|
Add extraction script as per request
|
Add extraction script as per request
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add extraction script as per request
|
""" Extract some data for Colin
1951-2010 Annual GDDs by climate district Apr 1 - Oct 31
1951-2010 Frost-free days ...
"""
import psycopg2
import pandas as pd
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor()
from pyiem.network import Table as NetworkTable
nt = NetworkTable(['IACLIMATE', 'MNCLIMATE', 'NDCLIMATE', 'OHCLIMATE',
'INCLIMATE', 'ILCLIMATE', 'MICLIMATE', 'WICLIMATE',
'SDCLIMATE', 'NECLIMATE', 'KSCLIMATE', 'MOCLIMATE'])
res = []
for sid in nt.sts.keys():
if sid[2] != 'C':
continue
TABLE = "alldata_%s" % (sid[:2],)
cursor.execute("""
WITH gdd as (
SELECT year, sum(gdd50(high,low)) from """+TABLE+""" WHERE
station = %s and sday between '0401' and '1031' and year >= 1951
and year < 2011 GROUP by year),
ff as (
SELECT year,
max(case when month < 7 and low < 32 then extract(doy from day) else 0 end),
min(case when month > 7 and low < 32 then extract(doy from day) else 366 end)
from """+TABLE+""" WHERE station = %s and year >= 1951 and year < 2011
GROUP by year)
SELECT g.year, g.sum, f.min - f.max from ff f JOIN gdd g on (g.year = f.year)
ORDER by g.year ASC
""", (sid, sid))
for row in cursor:
res.append(dict(station=sid, year=row[0], gdd50=row[1],
frostfree=int(row[2])))
df = pd.DataFrame(res)
df.to_csv('output.csv', index=False, columns=['station', 'year', 'gdd50',
'frostfree'])
|
<commit_before><commit_msg>Add extraction script as per request<commit_after>
|
""" Extract some data for Colin
1951-2010 Annual GDDs by climate district Apr 1 - Oct 31
1951-2010 Frost-free days ...
"""
import psycopg2
import pandas as pd
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor()
from pyiem.network import Table as NetworkTable
nt = NetworkTable(['IACLIMATE', 'MNCLIMATE', 'NDCLIMATE', 'OHCLIMATE',
'INCLIMATE', 'ILCLIMATE', 'MICLIMATE', 'WICLIMATE',
'SDCLIMATE', 'NECLIMATE', 'KSCLIMATE', 'MOCLIMATE'])
res = []
for sid in nt.sts.keys():
if sid[2] != 'C':
continue
TABLE = "alldata_%s" % (sid[:2],)
cursor.execute("""
WITH gdd as (
SELECT year, sum(gdd50(high,low)) from """+TABLE+""" WHERE
station = %s and sday between '0401' and '1031' and year >= 1951
and year < 2011 GROUP by year),
ff as (
SELECT year,
max(case when month < 7 and low < 32 then extract(doy from day) else 0 end),
min(case when month > 7 and low < 32 then extract(doy from day) else 366 end)
from """+TABLE+""" WHERE station = %s and year >= 1951 and year < 2011
GROUP by year)
SELECT g.year, g.sum, f.min - f.max from ff f JOIN gdd g on (g.year = f.year)
ORDER by g.year ASC
""", (sid, sid))
for row in cursor:
res.append(dict(station=sid, year=row[0], gdd50=row[1],
frostfree=int(row[2])))
df = pd.DataFrame(res)
df.to_csv('output.csv', index=False, columns=['station', 'year', 'gdd50',
'frostfree'])
|
Add extraction script as per request""" Extract some data for Colin
1951-2010 Annual GDDs by climate district Apr 1 - Oct 31
1951-2010 Frost-free days ...
"""
import psycopg2
import pandas as pd
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor()
from pyiem.network import Table as NetworkTable
nt = NetworkTable(['IACLIMATE', 'MNCLIMATE', 'NDCLIMATE', 'OHCLIMATE',
'INCLIMATE', 'ILCLIMATE', 'MICLIMATE', 'WICLIMATE',
'SDCLIMATE', 'NECLIMATE', 'KSCLIMATE', 'MOCLIMATE'])
res = []
for sid in nt.sts.keys():
if sid[2] != 'C':
continue
TABLE = "alldata_%s" % (sid[:2],)
cursor.execute("""
WITH gdd as (
SELECT year, sum(gdd50(high,low)) from """+TABLE+""" WHERE
station = %s and sday between '0401' and '1031' and year >= 1951
and year < 2011 GROUP by year),
ff as (
SELECT year,
max(case when month < 7 and low < 32 then extract(doy from day) else 0 end),
min(case when month > 7 and low < 32 then extract(doy from day) else 366 end)
from """+TABLE+""" WHERE station = %s and year >= 1951 and year < 2011
GROUP by year)
SELECT g.year, g.sum, f.min - f.max from ff f JOIN gdd g on (g.year = f.year)
ORDER by g.year ASC
""", (sid, sid))
for row in cursor:
res.append(dict(station=sid, year=row[0], gdd50=row[1],
frostfree=int(row[2])))
df = pd.DataFrame(res)
df.to_csv('output.csv', index=False, columns=['station', 'year', 'gdd50',
'frostfree'])
|
<commit_before><commit_msg>Add extraction script as per request<commit_after>""" Extract some data for Colin
1951-2010 Annual GDDs by climate district Apr 1 - Oct 31
1951-2010 Frost-free days ...
"""
import psycopg2
import pandas as pd
pgconn = psycopg2.connect(database='coop', host='iemdb', user='nobody')
cursor = pgconn.cursor()
from pyiem.network import Table as NetworkTable
nt = NetworkTable(['IACLIMATE', 'MNCLIMATE', 'NDCLIMATE', 'OHCLIMATE',
'INCLIMATE', 'ILCLIMATE', 'MICLIMATE', 'WICLIMATE',
'SDCLIMATE', 'NECLIMATE', 'KSCLIMATE', 'MOCLIMATE'])
res = []
for sid in nt.sts.keys():
if sid[2] != 'C':
continue
TABLE = "alldata_%s" % (sid[:2],)
cursor.execute("""
WITH gdd as (
SELECT year, sum(gdd50(high,low)) from """+TABLE+""" WHERE
station = %s and sday between '0401' and '1031' and year >= 1951
and year < 2011 GROUP by year),
ff as (
SELECT year,
max(case when month < 7 and low < 32 then extract(doy from day) else 0 end),
min(case when month > 7 and low < 32 then extract(doy from day) else 366 end)
from """+TABLE+""" WHERE station = %s and year >= 1951 and year < 2011
GROUP by year)
SELECT g.year, g.sum, f.min - f.max from ff f JOIN gdd g on (g.year = f.year)
ORDER by g.year ASC
""", (sid, sid))
for row in cursor:
res.append(dict(station=sid, year=row[0], gdd50=row[1],
frostfree=int(row[2])))
df = pd.DataFrame(res)
df.to_csv('output.csv', index=False, columns=['station', 'year', 'gdd50',
'frostfree'])
|
|
356d11b5bce94e356fa39c6709375655cae105b7
|
scripts/file_to_string_literal.py
|
scripts/file_to_string_literal.py
|
#!/usr/bin/env python
import sys
with open(sys.argv[1], 'w') as outfile:
with open(sys.argv[2], 'r') as infile:
outfile.write('namespace {0} {{\n'.format(sys.argv[3]))
outfile.write('char const* {0} =\n'.format(sys.argv[4]))
outfile.write('"SHADER(\n')
outfile.write(infile.read())
outfile.write('\n)SHADER"\n')
outfile.write('}')
|
Add script for generating headers from glsl
|
Add script for generating headers from glsl
|
Python
|
bsd-3-clause
|
mikezackles/gn_build
|
Add script for generating headers from glsl
|
#!/usr/bin/env python
import sys
with open(sys.argv[1], 'w') as outfile:
with open(sys.argv[2], 'r') as infile:
outfile.write('namespace {0} {{\n'.format(sys.argv[3]))
outfile.write('char const* {0} =\n'.format(sys.argv[4]))
outfile.write('"SHADER(\n')
outfile.write(infile.read())
outfile.write('\n)SHADER"\n')
outfile.write('}')
|
<commit_before><commit_msg>Add script for generating headers from glsl<commit_after>
|
#!/usr/bin/env python
import sys
with open(sys.argv[1], 'w') as outfile:
with open(sys.argv[2], 'r') as infile:
outfile.write('namespace {0} {{\n'.format(sys.argv[3]))
outfile.write('char const* {0} =\n'.format(sys.argv[4]))
outfile.write('"SHADER(\n')
outfile.write(infile.read())
outfile.write('\n)SHADER"\n')
outfile.write('}')
|
Add script for generating headers from glsl#!/usr/bin/env python
import sys
with open(sys.argv[1], 'w') as outfile:
with open(sys.argv[2], 'r') as infile:
outfile.write('namespace {0} {{\n'.format(sys.argv[3]))
outfile.write('char const* {0} =\n'.format(sys.argv[4]))
outfile.write('"SHADER(\n')
outfile.write(infile.read())
outfile.write('\n)SHADER"\n')
outfile.write('}')
|
<commit_before><commit_msg>Add script for generating headers from glsl<commit_after>#!/usr/bin/env python
import sys
with open(sys.argv[1], 'w') as outfile:
with open(sys.argv[2], 'r') as infile:
outfile.write('namespace {0} {{\n'.format(sys.argv[3]))
outfile.write('char const* {0} =\n'.format(sys.argv[4]))
outfile.write('"SHADER(\n')
outfile.write(infile.read())
outfile.write('\n)SHADER"\n')
outfile.write('}')
|
|
7e3811666e224dc2030f625f04db8a4e55529322
|
create.py
|
create.py
|
#!/usr/local/bin/python3
from argparse import ArgumentParser
from argparse import ArgumentTypeError
import itertools
import os
import string
from shutil import copyfile
import pathlib
def validateProblemCount(value):
ivalue = int(value)
if ivalue < 1 or ivalue > 25:
raise ArgumentTypeError(str(value) + " is not within range [1, 25]")
return ivalue
parser = ArgumentParser()
parser.add_argument("-y", "--year", dest="year", required=True,
help="the year of the problem set", type=int)
parser.add_argument("-r", "--round", dest="round", required=True,
help="the round of the problem set, e.g. 1B", type=str)
parser.add_argument("-c", "--count", dest="count", default=3,
help="the number of problems in the round", type=validateProblemCount)
args = parser.parse_args()
dir = str(args.year) + "_Round_" + args.round + "/"
print(dir)
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
with open('./template.cpp', 'r') as original: data = original.read()
alphabet = list(string.ascii_lowercase)
build_cmd = "#!/bin/sh\nclang++ -g --std=c++14 -o "
print(args.count)
for char in itertools.islice(alphabet, 0, args.count):
with open(dir + char + ".cpp", 'w') as modified: modified.write("// " + str(args.year) + " Round " + args.round + " Problem " + str(char).upper() + "\n" + data)
with open(dir + "build-" + char + ".sh", 'w') as build_file: build_file.write(build_cmd + char + ".out " + char + ".cpp")
|
Add script to generate templates
|
Add script to generate templates
|
Python
|
mit
|
szekizoli/GoogleCodeJamPractice,szekizoli/GoogleCodeJamPractice,szekizoli/GoogleCodeJamPractice
|
Add script to generate templates
|
#!/usr/local/bin/python3
from argparse import ArgumentParser
from argparse import ArgumentTypeError
import itertools
import os
import string
from shutil import copyfile
import pathlib
def validateProblemCount(value):
ivalue = int(value)
if ivalue < 1 or ivalue > 25:
raise ArgumentTypeError(str(value) + " is not within range [1, 25]")
return ivalue
parser = ArgumentParser()
parser.add_argument("-y", "--year", dest="year", required=True,
help="the year of the problem set", type=int)
parser.add_argument("-r", "--round", dest="round", required=True,
help="the round of the problem set, e.g. 1B", type=str)
parser.add_argument("-c", "--count", dest="count", default=3,
help="the number of problems in the round", type=validateProblemCount)
args = parser.parse_args()
dir = str(args.year) + "_Round_" + args.round + "/"
print(dir)
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
with open('./template.cpp', 'r') as original: data = original.read()
alphabet = list(string.ascii_lowercase)
build_cmd = "#!/bin/sh\nclang++ -g --std=c++14 -o "
print(args.count)
for char in itertools.islice(alphabet, 0, args.count):
with open(dir + char + ".cpp", 'w') as modified: modified.write("// " + str(args.year) + " Round " + args.round + " Problem " + str(char).upper() + "\n" + data)
with open(dir + "build-" + char + ".sh", 'w') as build_file: build_file.write(build_cmd + char + ".out " + char + ".cpp")
|
<commit_before><commit_msg>Add script to generate templates<commit_after>
|
#!/usr/local/bin/python3
from argparse import ArgumentParser
from argparse import ArgumentTypeError
import itertools
import os
import string
from shutil import copyfile
import pathlib
def validateProblemCount(value):
ivalue = int(value)
if ivalue < 1 or ivalue > 25:
raise ArgumentTypeError(str(value) + " is not within range [1, 25]")
return ivalue
parser = ArgumentParser()
parser.add_argument("-y", "--year", dest="year", required=True,
help="the year of the problem set", type=int)
parser.add_argument("-r", "--round", dest="round", required=True,
help="the round of the problem set, e.g. 1B", type=str)
parser.add_argument("-c", "--count", dest="count", default=3,
help="the number of problems in the round", type=validateProblemCount)
args = parser.parse_args()
dir = str(args.year) + "_Round_" + args.round + "/"
print(dir)
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
with open('./template.cpp', 'r') as original: data = original.read()
alphabet = list(string.ascii_lowercase)
build_cmd = "#!/bin/sh\nclang++ -g --std=c++14 -o "
print(args.count)
for char in itertools.islice(alphabet, 0, args.count):
with open(dir + char + ".cpp", 'w') as modified: modified.write("// " + str(args.year) + " Round " + args.round + " Problem " + str(char).upper() + "\n" + data)
with open(dir + "build-" + char + ".sh", 'w') as build_file: build_file.write(build_cmd + char + ".out " + char + ".cpp")
|
Add script to generate templates#!/usr/local/bin/python3
from argparse import ArgumentParser
from argparse import ArgumentTypeError
import itertools
import os
import string
from shutil import copyfile
import pathlib
def validateProblemCount(value):
ivalue = int(value)
if ivalue < 1 or ivalue > 25:
raise ArgumentTypeError(str(value) + " is not within range [1, 25]")
return ivalue
parser = ArgumentParser()
parser.add_argument("-y", "--year", dest="year", required=True,
help="the year of the problem set", type=int)
parser.add_argument("-r", "--round", dest="round", required=True,
help="the round of the problem set, e.g. 1B", type=str)
parser.add_argument("-c", "--count", dest="count", default=3,
help="the number of problems in the round", type=validateProblemCount)
args = parser.parse_args()
dir = str(args.year) + "_Round_" + args.round + "/"
print(dir)
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
with open('./template.cpp', 'r') as original: data = original.read()
alphabet = list(string.ascii_lowercase)
build_cmd = "#!/bin/sh\nclang++ -g --std=c++14 -o "
print(args.count)
for char in itertools.islice(alphabet, 0, args.count):
with open(dir + char + ".cpp", 'w') as modified: modified.write("// " + str(args.year) + " Round " + args.round + " Problem " + str(char).upper() + "\n" + data)
with open(dir + "build-" + char + ".sh", 'w') as build_file: build_file.write(build_cmd + char + ".out " + char + ".cpp")
|
<commit_before><commit_msg>Add script to generate templates<commit_after>#!/usr/local/bin/python3
from argparse import ArgumentParser
from argparse import ArgumentTypeError
import itertools
import os
import string
from shutil import copyfile
import pathlib
def validateProblemCount(value):
ivalue = int(value)
if ivalue < 1 or ivalue > 25:
raise ArgumentTypeError(str(value) + " is not within range [1, 25]")
return ivalue
parser = ArgumentParser()
parser.add_argument("-y", "--year", dest="year", required=True,
help="the year of the problem set", type=int)
parser.add_argument("-r", "--round", dest="round", required=True,
help="the round of the problem set, e.g. 1B", type=str)
parser.add_argument("-c", "--count", dest="count", default=3,
help="the number of problems in the round", type=validateProblemCount)
args = parser.parse_args()
dir = str(args.year) + "_Round_" + args.round + "/"
print(dir)
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
with open('./template.cpp', 'r') as original: data = original.read()
alphabet = list(string.ascii_lowercase)
build_cmd = "#!/bin/sh\nclang++ -g --std=c++14 -o "
print(args.count)
for char in itertools.islice(alphabet, 0, args.count):
with open(dir + char + ".cpp", 'w') as modified: modified.write("// " + str(args.year) + " Round " + args.round + " Problem " + str(char).upper() + "\n" + data)
with open(dir + "build-" + char + ".sh", 'w') as build_file: build_file.write(build_cmd + char + ".out " + char + ".cpp")
|
|
a7ce715d184e11f7fe6d20bc22cd8b9a4733e04d
|
tests/test_image_xpress.py
|
tests/test_image_xpress.py
|
from microscopium.screens import image_xpress
import collections as coll
def test_ix_semantic_filename():
test_fn = "./Week1_22123/G10_s2_w11C3B9BCC-E48F-4C2F-9D31-8F46D8B5B972.tif"
expected = coll.OrderedDict([('directory', './Week1_22123'),
('prefix', ''),
('plate', 22123),
('well', 'G10'),
('field', 1),
('channel', 0),
('suffix', 'tif')])
assert image_xpress.ix_semantic_filename(test_fn) == expected
|
Add tests to image_xpress module
|
Add tests to image_xpress module
|
Python
|
bsd-3-clause
|
jni/microscopium,Don86/microscopium,microscopium/microscopium,Don86/microscopium,jni/microscopium,starcalibre/microscopium,microscopium/microscopium
|
Add tests to image_xpress module
|
from microscopium.screens import image_xpress
import collections as coll
def test_ix_semantic_filename():
test_fn = "./Week1_22123/G10_s2_w11C3B9BCC-E48F-4C2F-9D31-8F46D8B5B972.tif"
expected = coll.OrderedDict([('directory', './Week1_22123'),
('prefix', ''),
('plate', 22123),
('well', 'G10'),
('field', 1),
('channel', 0),
('suffix', 'tif')])
assert image_xpress.ix_semantic_filename(test_fn) == expected
|
<commit_before><commit_msg>Add tests to image_xpress module<commit_after>
|
from microscopium.screens import image_xpress
import collections as coll
def test_ix_semantic_filename():
test_fn = "./Week1_22123/G10_s2_w11C3B9BCC-E48F-4C2F-9D31-8F46D8B5B972.tif"
expected = coll.OrderedDict([('directory', './Week1_22123'),
('prefix', ''),
('plate', 22123),
('well', 'G10'),
('field', 1),
('channel', 0),
('suffix', 'tif')])
assert image_xpress.ix_semantic_filename(test_fn) == expected
|
Add tests to image_xpress modulefrom microscopium.screens import image_xpress
import collections as coll
def test_ix_semantic_filename():
test_fn = "./Week1_22123/G10_s2_w11C3B9BCC-E48F-4C2F-9D31-8F46D8B5B972.tif"
expected = coll.OrderedDict([('directory', './Week1_22123'),
('prefix', ''),
('plate', 22123),
('well', 'G10'),
('field', 1),
('channel', 0),
('suffix', 'tif')])
assert image_xpress.ix_semantic_filename(test_fn) == expected
|
<commit_before><commit_msg>Add tests to image_xpress module<commit_after>from microscopium.screens import image_xpress
import collections as coll
def test_ix_semantic_filename():
test_fn = "./Week1_22123/G10_s2_w11C3B9BCC-E48F-4C2F-9D31-8F46D8B5B972.tif"
expected = coll.OrderedDict([('directory', './Week1_22123'),
('prefix', ''),
('plate', 22123),
('well', 'G10'),
('field', 1),
('channel', 0),
('suffix', 'tif')])
assert image_xpress.ix_semantic_filename(test_fn) == expected
|
|
8760c50de32353664bb5291ac24205186e557de4
|
binterpret.py
|
binterpret.py
|
import sys
import traceback
import argparse
from PIL import Image
def binterpret(filename, abx, aby):
try:
img = Image.open(filename)
except IOError:
traceback.print_exc(file=sys.stdout)
exit(3)
blockx = img.size[0]/abx
blocky = img.size[1]/aby
print blockx, blocky
if __name__ == "__main__":
DEFAULT = 8
parser = argparse.ArgumentParser(description='Read a QRcode as binary data')
parser.add_argument('filename', help="The image to interpret")
parser.add_argument('-xblocks', help="The amount of squares in width. Default is 8")
parser.add_argument('-yblocks', help="The amount of squares in height. Default is 8")
args = parser.parse_args()
xblocks = args.xblocks if args.xblocks else DEFAULT
yblocks = args.yblocks if args.yblocks else DEFAULT
binterpret(args.filename, xblocks, yblocks)
|
Set up command with arguments
|
Set up command with arguments
|
Python
|
apache-2.0
|
flammified/binterpret
|
Set up command with arguments
|
import sys
import traceback
import argparse
from PIL import Image
def binterpret(filename, abx, aby):
try:
img = Image.open(filename)
except IOError:
traceback.print_exc(file=sys.stdout)
exit(3)
blockx = img.size[0]/abx
blocky = img.size[1]/aby
print blockx, blocky
if __name__ == "__main__":
DEFAULT = 8
parser = argparse.ArgumentParser(description='Read a QRcode as binary data')
parser.add_argument('filename', help="The image to interpret")
parser.add_argument('-xblocks', help="The amount of squares in width. Default is 8")
parser.add_argument('-yblocks', help="The amount of squares in height. Default is 8")
args = parser.parse_args()
xblocks = args.xblocks if args.xblocks else DEFAULT
yblocks = args.yblocks if args.yblocks else DEFAULT
binterpret(args.filename, xblocks, yblocks)
|
<commit_before><commit_msg>Set up command with arguments<commit_after>
|
import sys
import traceback
import argparse
from PIL import Image
def binterpret(filename, abx, aby):
try:
img = Image.open(filename)
except IOError:
traceback.print_exc(file=sys.stdout)
exit(3)
blockx = img.size[0]/abx
blocky = img.size[1]/aby
print blockx, blocky
if __name__ == "__main__":
DEFAULT = 8
parser = argparse.ArgumentParser(description='Read a QRcode as binary data')
parser.add_argument('filename', help="The image to interpret")
parser.add_argument('-xblocks', help="The amount of squares in width. Default is 8")
parser.add_argument('-yblocks', help="The amount of squares in height. Default is 8")
args = parser.parse_args()
xblocks = args.xblocks if args.xblocks else DEFAULT
yblocks = args.yblocks if args.yblocks else DEFAULT
binterpret(args.filename, xblocks, yblocks)
|
Set up command with argumentsimport sys
import traceback
import argparse
from PIL import Image
def binterpret(filename, abx, aby):
try:
img = Image.open(filename)
except IOError:
traceback.print_exc(file=sys.stdout)
exit(3)
blockx = img.size[0]/abx
blocky = img.size[1]/aby
print blockx, blocky
if __name__ == "__main__":
DEFAULT = 8
parser = argparse.ArgumentParser(description='Read a QRcode as binary data')
parser.add_argument('filename', help="The image to interpret")
parser.add_argument('-xblocks', help="The amount of squares in width. Default is 8")
parser.add_argument('-yblocks', help="The amount of squares in height. Default is 8")
args = parser.parse_args()
xblocks = args.xblocks if args.xblocks else DEFAULT
yblocks = args.yblocks if args.yblocks else DEFAULT
binterpret(args.filename, xblocks, yblocks)
|
<commit_before><commit_msg>Set up command with arguments<commit_after>import sys
import traceback
import argparse
from PIL import Image
def binterpret(filename, abx, aby):
try:
img = Image.open(filename)
except IOError:
traceback.print_exc(file=sys.stdout)
exit(3)
blockx = img.size[0]/abx
blocky = img.size[1]/aby
print blockx, blocky
if __name__ == "__main__":
DEFAULT = 8
parser = argparse.ArgumentParser(description='Read a QRcode as binary data')
parser.add_argument('filename', help="The image to interpret")
parser.add_argument('-xblocks', help="The amount of squares in width. Default is 8")
parser.add_argument('-yblocks', help="The amount of squares in height. Default is 8")
args = parser.parse_args()
xblocks = args.xblocks if args.xblocks else DEFAULT
yblocks = args.yblocks if args.yblocks else DEFAULT
binterpret(args.filename, xblocks, yblocks)
|
|
9f11190cbd7ac97ab5e57c3af8c23b9884279cfa
|
lesson5/catch_multiple.py
|
lesson5/catch_multiple.py
|
def take_beer(fridge, number=1):
if not isinstance(fridge, dict):
raise TypeError("Invalid fridge")
if "beer" not in fridge:
raise ValueError("No more beer:(")
if number > fridge["beer"]:
raise ValueError("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge1 = {}
fridge2 = "fridge_as_string"
for fridge in (fridge1, fridge2):
try:
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
except TypeError as e:
print("TypeError {} occured".format(e))
except ValueError as e:
print("ValueError {} occured".format(e))
|
Add script for demostrating cathicng multiple exceptions
|
Add script for demostrating cathicng multiple exceptions
|
Python
|
bsd-2-clause
|
drednout/letspython,drednout/letspython
|
Add script for demostrating cathicng multiple exceptions
|
def take_beer(fridge, number=1):
if not isinstance(fridge, dict):
raise TypeError("Invalid fridge")
if "beer" not in fridge:
raise ValueError("No more beer:(")
if number > fridge["beer"]:
raise ValueError("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge1 = {}
fridge2 = "fridge_as_string"
for fridge in (fridge1, fridge2):
try:
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
except TypeError as e:
print("TypeError {} occured".format(e))
except ValueError as e:
print("ValueError {} occured".format(e))
|
<commit_before><commit_msg>Add script for demostrating cathicng multiple exceptions<commit_after>
|
def take_beer(fridge, number=1):
if not isinstance(fridge, dict):
raise TypeError("Invalid fridge")
if "beer" not in fridge:
raise ValueError("No more beer:(")
if number > fridge["beer"]:
raise ValueError("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge1 = {}
fridge2 = "fridge_as_string"
for fridge in (fridge1, fridge2):
try:
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
except TypeError as e:
print("TypeError {} occured".format(e))
except ValueError as e:
print("ValueError {} occured".format(e))
|
Add script for demostrating cathicng multiple exceptionsdef take_beer(fridge, number=1):
if not isinstance(fridge, dict):
raise TypeError("Invalid fridge")
if "beer" not in fridge:
raise ValueError("No more beer:(")
if number > fridge["beer"]:
raise ValueError("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge1 = {}
fridge2 = "fridge_as_string"
for fridge in (fridge1, fridge2):
try:
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
except TypeError as e:
print("TypeError {} occured".format(e))
except ValueError as e:
print("ValueError {} occured".format(e))
|
<commit_before><commit_msg>Add script for demostrating cathicng multiple exceptions<commit_after>def take_beer(fridge, number=1):
if not isinstance(fridge, dict):
raise TypeError("Invalid fridge")
if "beer" not in fridge:
raise ValueError("No more beer:(")
if number > fridge["beer"]:
raise ValueError("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge1 = {}
fridge2 = "fridge_as_string"
for fridge in (fridge1, fridge2):
try:
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
except TypeError as e:
print("TypeError {} occured".format(e))
except ValueError as e:
print("ValueError {} occured".format(e))
|
|
40b704b64ca1a4bf6f686710586c0856e9a8cf94
|
addons/response-encoding-converter-addon/__init__.py
|
addons/response-encoding-converter-addon/__init__.py
|
# -*- coding:utf-8 -*-
from owlmixin import OwlMixin
from modules.models import ResponseAddOnPayload
import logging
logger = logging.getLogger(__name__)
class Config(OwlMixin):
def __init__(self, encoding):
self.encoding: str = encoding
def main(payload: ResponseAddOnPayload, config_dict: dict):
config: Config = Config.from_dict(config_dict or {})
return ResponseAddOnPayload.from_dict({
"response": payload.response,
"body": payload.body.decode(payload.encoding).encode(config.encoding),
"encoding": config.encoding
})
|
Add change encoding add on
|
:new: Add change encoding add on
|
Python
|
mit
|
tadashi-aikawa/gemini
|
:new: Add change encoding add on
|
# -*- coding:utf-8 -*-
from owlmixin import OwlMixin
from modules.models import ResponseAddOnPayload
import logging
logger = logging.getLogger(__name__)
class Config(OwlMixin):
def __init__(self, encoding):
self.encoding: str = encoding
def main(payload: ResponseAddOnPayload, config_dict: dict):
config: Config = Config.from_dict(config_dict or {})
return ResponseAddOnPayload.from_dict({
"response": payload.response,
"body": payload.body.decode(payload.encoding).encode(config.encoding),
"encoding": config.encoding
})
|
<commit_before><commit_msg>:new: Add change encoding add on<commit_after>
|
# -*- coding:utf-8 -*-
from owlmixin import OwlMixin
from modules.models import ResponseAddOnPayload
import logging
logger = logging.getLogger(__name__)
class Config(OwlMixin):
def __init__(self, encoding):
self.encoding: str = encoding
def main(payload: ResponseAddOnPayload, config_dict: dict):
config: Config = Config.from_dict(config_dict or {})
return ResponseAddOnPayload.from_dict({
"response": payload.response,
"body": payload.body.decode(payload.encoding).encode(config.encoding),
"encoding": config.encoding
})
|
:new: Add change encoding add on# -*- coding:utf-8 -*-
from owlmixin import OwlMixin
from modules.models import ResponseAddOnPayload
import logging
logger = logging.getLogger(__name__)
class Config(OwlMixin):
def __init__(self, encoding):
self.encoding: str = encoding
def main(payload: ResponseAddOnPayload, config_dict: dict):
config: Config = Config.from_dict(config_dict or {})
return ResponseAddOnPayload.from_dict({
"response": payload.response,
"body": payload.body.decode(payload.encoding).encode(config.encoding),
"encoding": config.encoding
})
|
<commit_before><commit_msg>:new: Add change encoding add on<commit_after># -*- coding:utf-8 -*-
from owlmixin import OwlMixin
from modules.models import ResponseAddOnPayload
import logging
logger = logging.getLogger(__name__)
class Config(OwlMixin):
def __init__(self, encoding):
self.encoding: str = encoding
def main(payload: ResponseAddOnPayload, config_dict: dict):
config: Config = Config.from_dict(config_dict or {})
return ResponseAddOnPayload.from_dict({
"response": payload.response,
"body": payload.body.decode(payload.encoding).encode(config.encoding),
"encoding": config.encoding
})
|
|
8ee17bf5a070c22700061198a7ecde01adaef3a9
|
tests/tests_tags/tests_spoiler.py
|
tests/tests_tags/tests_spoiler.py
|
"""
SkCode spoiler tag test code.
"""
import unittest
from skcode.etree import TreeNode
from skcode.tags import (SpoilerTagOptions,
DEFAULT_RECOGNIZED_TAGS)
class HorizontalLineTagTestCase(unittest.TestCase):
""" Tests suite for the spoiler tag module. """
def test_tag_and_aliases_in_default_recognized_tags_dict(self):
""" Test the presence of the tag and aliases in the dictionary of default recognized tags. """
self.assertIn('spoiler', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['spoiler'], SpoilerTagOptions)
def test_tag_constant_values(self):
""" Test tag constants. """
opts = SpoilerTagOptions()
self.assertFalse(opts.newline_closes)
self.assertFalse(opts.same_tag_closes)
self.assertFalse(opts.standalone)
self.assertTrue(opts.parse_embedded)
self.assertFalse(opts.swallow_trailing_newline)
self.assertFalse(opts.inline)
self.assertTrue(opts.close_inlines)
self.assertTrue(opts.make_paragraphs_here)
def test_render_html(self):
""" Test the ``render_html`` method. """
opts = SpoilerTagOptions()
self.assertEqual('<div class="spoiler">test</div>\n', opts.render_html(None, 'test'))
def test_render_text(self):
""" Test the ``render_text`` method. """
opts = SpoilerTagOptions()
self.assertEqual('!!! SPOILER !!!\n! test\n!!!\n', opts.render_text(None, 'test'))
def test_render_skcode(self):
""" Test the ``render_skcode`` method. """
opts = SpoilerTagOptions()
tree_node = TreeNode(None, 'spoiler', opts)
self.assertEqual('[spoiler]test[/spoiler]', opts.render_skcode(tree_node, 'test'))
|
Add tests suite for spoiler tag.
|
Add tests suite for spoiler tag.
|
Python
|
agpl-3.0
|
TamiaLab/PySkCode
|
Add tests suite for spoiler tag.
|
"""
SkCode spoiler tag test code.
"""
import unittest
from skcode.etree import TreeNode
from skcode.tags import (SpoilerTagOptions,
DEFAULT_RECOGNIZED_TAGS)
class HorizontalLineTagTestCase(unittest.TestCase):
""" Tests suite for the spoiler tag module. """
def test_tag_and_aliases_in_default_recognized_tags_dict(self):
""" Test the presence of the tag and aliases in the dictionary of default recognized tags. """
self.assertIn('spoiler', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['spoiler'], SpoilerTagOptions)
def test_tag_constant_values(self):
""" Test tag constants. """
opts = SpoilerTagOptions()
self.assertFalse(opts.newline_closes)
self.assertFalse(opts.same_tag_closes)
self.assertFalse(opts.standalone)
self.assertTrue(opts.parse_embedded)
self.assertFalse(opts.swallow_trailing_newline)
self.assertFalse(opts.inline)
self.assertTrue(opts.close_inlines)
self.assertTrue(opts.make_paragraphs_here)
def test_render_html(self):
""" Test the ``render_html`` method. """
opts = SpoilerTagOptions()
self.assertEqual('<div class="spoiler">test</div>\n', opts.render_html(None, 'test'))
def test_render_text(self):
""" Test the ``render_text`` method. """
opts = SpoilerTagOptions()
self.assertEqual('!!! SPOILER !!!\n! test\n!!!\n', opts.render_text(None, 'test'))
def test_render_skcode(self):
""" Test the ``render_skcode`` method. """
opts = SpoilerTagOptions()
tree_node = TreeNode(None, 'spoiler', opts)
self.assertEqual('[spoiler]test[/spoiler]', opts.render_skcode(tree_node, 'test'))
|
<commit_before><commit_msg>Add tests suite for spoiler tag.<commit_after>
|
"""
SkCode spoiler tag test code.
"""
import unittest
from skcode.etree import TreeNode
from skcode.tags import (SpoilerTagOptions,
DEFAULT_RECOGNIZED_TAGS)
class HorizontalLineTagTestCase(unittest.TestCase):
""" Tests suite for the spoiler tag module. """
def test_tag_and_aliases_in_default_recognized_tags_dict(self):
""" Test the presence of the tag and aliases in the dictionary of default recognized tags. """
self.assertIn('spoiler', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['spoiler'], SpoilerTagOptions)
def test_tag_constant_values(self):
""" Test tag constants. """
opts = SpoilerTagOptions()
self.assertFalse(opts.newline_closes)
self.assertFalse(opts.same_tag_closes)
self.assertFalse(opts.standalone)
self.assertTrue(opts.parse_embedded)
self.assertFalse(opts.swallow_trailing_newline)
self.assertFalse(opts.inline)
self.assertTrue(opts.close_inlines)
self.assertTrue(opts.make_paragraphs_here)
def test_render_html(self):
""" Test the ``render_html`` method. """
opts = SpoilerTagOptions()
self.assertEqual('<div class="spoiler">test</div>\n', opts.render_html(None, 'test'))
def test_render_text(self):
""" Test the ``render_text`` method. """
opts = SpoilerTagOptions()
self.assertEqual('!!! SPOILER !!!\n! test\n!!!\n', opts.render_text(None, 'test'))
def test_render_skcode(self):
""" Test the ``render_skcode`` method. """
opts = SpoilerTagOptions()
tree_node = TreeNode(None, 'spoiler', opts)
self.assertEqual('[spoiler]test[/spoiler]', opts.render_skcode(tree_node, 'test'))
|
Add tests suite for spoiler tag."""
SkCode spoiler tag test code.
"""
import unittest
from skcode.etree import TreeNode
from skcode.tags import (SpoilerTagOptions,
DEFAULT_RECOGNIZED_TAGS)
class HorizontalLineTagTestCase(unittest.TestCase):
""" Tests suite for the spoiler tag module. """
def test_tag_and_aliases_in_default_recognized_tags_dict(self):
""" Test the presence of the tag and aliases in the dictionary of default recognized tags. """
self.assertIn('spoiler', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['spoiler'], SpoilerTagOptions)
def test_tag_constant_values(self):
""" Test tag constants. """
opts = SpoilerTagOptions()
self.assertFalse(opts.newline_closes)
self.assertFalse(opts.same_tag_closes)
self.assertFalse(opts.standalone)
self.assertTrue(opts.parse_embedded)
self.assertFalse(opts.swallow_trailing_newline)
self.assertFalse(opts.inline)
self.assertTrue(opts.close_inlines)
self.assertTrue(opts.make_paragraphs_here)
def test_render_html(self):
""" Test the ``render_html`` method. """
opts = SpoilerTagOptions()
self.assertEqual('<div class="spoiler">test</div>\n', opts.render_html(None, 'test'))
def test_render_text(self):
""" Test the ``render_text`` method. """
opts = SpoilerTagOptions()
self.assertEqual('!!! SPOILER !!!\n! test\n!!!\n', opts.render_text(None, 'test'))
def test_render_skcode(self):
""" Test the ``render_skcode`` method. """
opts = SpoilerTagOptions()
tree_node = TreeNode(None, 'spoiler', opts)
self.assertEqual('[spoiler]test[/spoiler]', opts.render_skcode(tree_node, 'test'))
|
<commit_before><commit_msg>Add tests suite for spoiler tag.<commit_after>"""
SkCode spoiler tag test code.
"""
import unittest
from skcode.etree import TreeNode
from skcode.tags import (SpoilerTagOptions,
DEFAULT_RECOGNIZED_TAGS)
class HorizontalLineTagTestCase(unittest.TestCase):
""" Tests suite for the spoiler tag module. """
def test_tag_and_aliases_in_default_recognized_tags_dict(self):
""" Test the presence of the tag and aliases in the dictionary of default recognized tags. """
self.assertIn('spoiler', DEFAULT_RECOGNIZED_TAGS)
self.assertIsInstance(DEFAULT_RECOGNIZED_TAGS['spoiler'], SpoilerTagOptions)
def test_tag_constant_values(self):
""" Test tag constants. """
opts = SpoilerTagOptions()
self.assertFalse(opts.newline_closes)
self.assertFalse(opts.same_tag_closes)
self.assertFalse(opts.standalone)
self.assertTrue(opts.parse_embedded)
self.assertFalse(opts.swallow_trailing_newline)
self.assertFalse(opts.inline)
self.assertTrue(opts.close_inlines)
self.assertTrue(opts.make_paragraphs_here)
def test_render_html(self):
""" Test the ``render_html`` method. """
opts = SpoilerTagOptions()
self.assertEqual('<div class="spoiler">test</div>\n', opts.render_html(None, 'test'))
def test_render_text(self):
""" Test the ``render_text`` method. """
opts = SpoilerTagOptions()
self.assertEqual('!!! SPOILER !!!\n! test\n!!!\n', opts.render_text(None, 'test'))
def test_render_skcode(self):
""" Test the ``render_skcode`` method. """
opts = SpoilerTagOptions()
tree_node = TreeNode(None, 'spoiler', opts)
self.assertEqual('[spoiler]test[/spoiler]', opts.render_skcode(tree_node, 'test'))
|
|
5a0e28212e186b61138fa725cd2c22f475b245d7
|
ch7/atomic.py
|
ch7/atomic.py
|
'''
Listing 7.8: Testing atomic operations
'''
import numpy as np
import pyopencl as cl
import utility
kernel_src = '''
__kernel void atomic(__global int* x) {
__local int a, b;
a = 0;
b = 0;
/* Increment without atomic add */
a++;
/* Increment with atomic add */
atomic_inc(&b);
x[0] = a;
x[1] = b;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
queue = cl.CommandQueue(context, dev, properties=None)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data
data = np.empty(shape=(2,), dtype=np.int32)
# Create input/output buffer
data_buff = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=data.nbytes)
# Enqueue kernel
global_size = (8,)
local_size = (4,)
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
prog.atomic(queue, global_size, local_size, data_buff)
# Print averaged results
cl.enqueue_copy(queue, dest=data, src=data_buff, is_blocking=True)
print('Increment: ' + str(data[0]))
print('Atomic increment: ' + str(data[1]))
|
Add example from listing 7.8
|
Add example from listing 7.8
|
Python
|
mit
|
oysstu/pyopencl-in-action
|
Add example from listing 7.8
|
'''
Listing 7.8: Testing atomic operations
'''
import numpy as np
import pyopencl as cl
import utility
kernel_src = '''
__kernel void atomic(__global int* x) {
__local int a, b;
a = 0;
b = 0;
/* Increment without atomic add */
a++;
/* Increment with atomic add */
atomic_inc(&b);
x[0] = a;
x[1] = b;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
queue = cl.CommandQueue(context, dev, properties=None)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data
data = np.empty(shape=(2,), dtype=np.int32)
# Create input/output buffer
data_buff = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=data.nbytes)
# Enqueue kernel
global_size = (8,)
local_size = (4,)
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
prog.atomic(queue, global_size, local_size, data_buff)
# Print averaged results
cl.enqueue_copy(queue, dest=data, src=data_buff, is_blocking=True)
print('Increment: ' + str(data[0]))
print('Atomic increment: ' + str(data[1]))
|
<commit_before><commit_msg>Add example from listing 7.8<commit_after>
|
'''
Listing 7.8: Testing atomic operations
'''
import numpy as np
import pyopencl as cl
import utility
kernel_src = '''
__kernel void atomic(__global int* x) {
__local int a, b;
a = 0;
b = 0;
/* Increment without atomic add */
a++;
/* Increment with atomic add */
atomic_inc(&b);
x[0] = a;
x[1] = b;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
queue = cl.CommandQueue(context, dev, properties=None)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data
data = np.empty(shape=(2,), dtype=np.int32)
# Create input/output buffer
data_buff = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=data.nbytes)
# Enqueue kernel
global_size = (8,)
local_size = (4,)
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
prog.atomic(queue, global_size, local_size, data_buff)
# Print averaged results
cl.enqueue_copy(queue, dest=data, src=data_buff, is_blocking=True)
print('Increment: ' + str(data[0]))
print('Atomic increment: ' + str(data[1]))
|
Add example from listing 7.8'''
Listing 7.8: Testing atomic operations
'''
import numpy as np
import pyopencl as cl
import utility
kernel_src = '''
__kernel void atomic(__global int* x) {
__local int a, b;
a = 0;
b = 0;
/* Increment without atomic add */
a++;
/* Increment with atomic add */
atomic_inc(&b);
x[0] = a;
x[1] = b;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
queue = cl.CommandQueue(context, dev, properties=None)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data
data = np.empty(shape=(2,), dtype=np.int32)
# Create input/output buffer
data_buff = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=data.nbytes)
# Enqueue kernel
global_size = (8,)
local_size = (4,)
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
prog.atomic(queue, global_size, local_size, data_buff)
# Print averaged results
cl.enqueue_copy(queue, dest=data, src=data_buff, is_blocking=True)
print('Increment: ' + str(data[0]))
print('Atomic increment: ' + str(data[1]))
|
<commit_before><commit_msg>Add example from listing 7.8<commit_after>'''
Listing 7.8: Testing atomic operations
'''
import numpy as np
import pyopencl as cl
import utility
kernel_src = '''
__kernel void atomic(__global int* x) {
__local int a, b;
a = 0;
b = 0;
/* Increment without atomic add */
a++;
/* Increment with atomic add */
atomic_inc(&b);
x[0] = a;
x[1] = b;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
queue = cl.CommandQueue(context, dev, properties=None)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data
data = np.empty(shape=(2,), dtype=np.int32)
# Create input/output buffer
data_buff = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=data.nbytes)
# Enqueue kernel
global_size = (8,)
local_size = (4,)
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
prog.atomic(queue, global_size, local_size, data_buff)
# Print averaged results
cl.enqueue_copy(queue, dest=data, src=data_buff, is_blocking=True)
print('Increment: ' + str(data[0]))
print('Atomic increment: ' + str(data[1]))
|
|
811d3e3c13cac36b6dd16e49690fca4a31a74266
|
tests/test_cython_funcs.py
|
tests/test_cython_funcs.py
|
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from nimble import Events
import nimble.cyfunc.debounce as cy
class TestAsArrayMethod(TestCase):
def setUp(self):
conditional_array = np.array([0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1])
events = Events((conditional_array > 0))
self.starts, self.stops = events.starts, events.stops
def test_event_entry_debounce_cython(self):
starts, stops = cy.debounce(self.starts, self.stops, 2, 0)
starts_validation = [2, 6]
stops_validation = [4, 9]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
def test_event_exit_debounce(self):
starts, stops = cy.debounce(self.starts, self.stops, 0, 2)
starts_validation = [2, 6]
stops_validation = [4, 11]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
def test_entry_and_exit_debounce(self):
starts = self.starts
stops = self.stops
starts, stops = cy.debounce(starts, stops, 2, 2)
starts_validation = [2, 6]
stops_validation = [4, 11]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
|
Add unit tests for Cython functions that mirror pure Python tests
|
Add unit tests for Cython functions that mirror pure Python tests
|
Python
|
mit
|
rwhitt2049/trouve,rwhitt2049/nimble
|
Add unit tests for Cython functions that mirror pure Python tests
|
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from nimble import Events
import nimble.cyfunc.debounce as cy
class TestAsArrayMethod(TestCase):
def setUp(self):
conditional_array = np.array([0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1])
events = Events((conditional_array > 0))
self.starts, self.stops = events.starts, events.stops
def test_event_entry_debounce_cython(self):
starts, stops = cy.debounce(self.starts, self.stops, 2, 0)
starts_validation = [2, 6]
stops_validation = [4, 9]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
def test_event_exit_debounce(self):
starts, stops = cy.debounce(self.starts, self.stops, 0, 2)
starts_validation = [2, 6]
stops_validation = [4, 11]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
def test_entry_and_exit_debounce(self):
starts = self.starts
stops = self.stops
starts, stops = cy.debounce(starts, stops, 2, 2)
starts_validation = [2, 6]
stops_validation = [4, 11]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
|
<commit_before><commit_msg>Add unit tests for Cython functions that mirror pure Python tests<commit_after>
|
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from nimble import Events
import nimble.cyfunc.debounce as cy
class TestAsArrayMethod(TestCase):
def setUp(self):
conditional_array = np.array([0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1])
events = Events((conditional_array > 0))
self.starts, self.stops = events.starts, events.stops
def test_event_entry_debounce_cython(self):
starts, stops = cy.debounce(self.starts, self.stops, 2, 0)
starts_validation = [2, 6]
stops_validation = [4, 9]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
def test_event_exit_debounce(self):
starts, stops = cy.debounce(self.starts, self.stops, 0, 2)
starts_validation = [2, 6]
stops_validation = [4, 11]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
def test_entry_and_exit_debounce(self):
starts = self.starts
stops = self.stops
starts, stops = cy.debounce(starts, stops, 2, 2)
starts_validation = [2, 6]
stops_validation = [4, 11]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
|
Add unit tests for Cython functions that mirror pure Python testsfrom unittest import TestCase
import numpy as np
import numpy.testing as npt
from nimble import Events
import nimble.cyfunc.debounce as cy
class TestAsArrayMethod(TestCase):
def setUp(self):
conditional_array = np.array([0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1])
events = Events((conditional_array > 0))
self.starts, self.stops = events.starts, events.stops
def test_event_entry_debounce_cython(self):
starts, stops = cy.debounce(self.starts, self.stops, 2, 0)
starts_validation = [2, 6]
stops_validation = [4, 9]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
def test_event_exit_debounce(self):
starts, stops = cy.debounce(self.starts, self.stops, 0, 2)
starts_validation = [2, 6]
stops_validation = [4, 11]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
def test_entry_and_exit_debounce(self):
starts = self.starts
stops = self.stops
starts, stops = cy.debounce(starts, stops, 2, 2)
starts_validation = [2, 6]
stops_validation = [4, 11]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
|
<commit_before><commit_msg>Add unit tests for Cython functions that mirror pure Python tests<commit_after>from unittest import TestCase
import numpy as np
import numpy.testing as npt
from nimble import Events
import nimble.cyfunc.debounce as cy
class TestAsArrayMethod(TestCase):
def setUp(self):
conditional_array = np.array([0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1])
events = Events((conditional_array > 0))
self.starts, self.stops = events.starts, events.stops
def test_event_entry_debounce_cython(self):
starts, stops = cy.debounce(self.starts, self.stops, 2, 0)
starts_validation = [2, 6]
stops_validation = [4, 9]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
def test_event_exit_debounce(self):
starts, stops = cy.debounce(self.starts, self.stops, 0, 2)
starts_validation = [2, 6]
stops_validation = [4, 11]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
def test_entry_and_exit_debounce(self):
starts = self.starts
stops = self.stops
starts, stops = cy.debounce(starts, stops, 2, 2)
starts_validation = [2, 6]
stops_validation = [4, 11]
npt.assert_array_equal(starts_validation, starts)
npt.assert_array_equal(stops_validation, stops)
|
|
498637bc947eb0e68b4a153fb5fa859bfa52ff75
|
acquisition/devops/deploy_adapter.py
|
acquisition/devops/deploy_adapter.py
|
import requests
import json
import argparse
import uuid
import pprint
def main():
parser = argparse.ArgumentParser(
description='Deploy adapter to Tomviz acquisition server ( the server '
'must be running with the --dev option.')
parser.add_argument('-u', '--url', help='the base url to the server',
default='http://localhost:8080')
parser.add_argument('-m', '--module', help='the module name',
default='tomviz_dev_adapter')
parser.add_argument('-a', '--adapter', help='the adapter class name',
required=True)
parser.add_argument('-p', '--path', type=argparse.FileType('r'),
help='the path to the adapter source', required=True)
args = parser.parse_args()
request = {
'id': uuid.uuid4().int,
'jsonrpc': '2.0',
'method': 'deploy_adapter',
'params': [args.module, args.adapter, args.path.read()]
}
url = '%s/dev/' % args.url.rstrip('/')
r = requests.post(url, json=request)
if r.status_code != 200:
print('JSON response:')
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(r.json())
print('Data:')
print(json.loads(r.json()['error']['data']))
r.raise_for_status()
if __name__ == '__main__':
main()
|
Add script to dynamically deploy adapters
|
Add script to dynamically deploy adapters
|
Python
|
bsd-3-clause
|
OpenChemistry/tomviz,mathturtle/tomviz,OpenChemistry/tomviz,thewtex/tomviz,cryos/tomviz,thewtex/tomviz,OpenChemistry/tomviz,cjh1/tomviz,cjh1/tomviz,cryos/tomviz,thewtex/tomviz,mathturtle/tomviz,cjh1/tomviz,OpenChemistry/tomviz,mathturtle/tomviz,cryos/tomviz
|
Add script to dynamically deploy adapters
|
import requests
import json
import argparse
import uuid
import pprint
def main():
parser = argparse.ArgumentParser(
description='Deploy adapter to Tomviz acquisition server ( the server '
'must be running with the --dev option.')
parser.add_argument('-u', '--url', help='the base url to the server',
default='http://localhost:8080')
parser.add_argument('-m', '--module', help='the module name',
default='tomviz_dev_adapter')
parser.add_argument('-a', '--adapter', help='the adapter class name',
required=True)
parser.add_argument('-p', '--path', type=argparse.FileType('r'),
help='the path to the adapter source', required=True)
args = parser.parse_args()
request = {
'id': uuid.uuid4().int,
'jsonrpc': '2.0',
'method': 'deploy_adapter',
'params': [args.module, args.adapter, args.path.read()]
}
url = '%s/dev/' % args.url.rstrip('/')
r = requests.post(url, json=request)
if r.status_code != 200:
print('JSON response:')
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(r.json())
print('Data:')
print(json.loads(r.json()['error']['data']))
r.raise_for_status()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to dynamically deploy adapters<commit_after>
|
import requests
import json
import argparse
import uuid
import pprint
def main():
parser = argparse.ArgumentParser(
description='Deploy adapter to Tomviz acquisition server ( the server '
'must be running with the --dev option.')
parser.add_argument('-u', '--url', help='the base url to the server',
default='http://localhost:8080')
parser.add_argument('-m', '--module', help='the module name',
default='tomviz_dev_adapter')
parser.add_argument('-a', '--adapter', help='the adapter class name',
required=True)
parser.add_argument('-p', '--path', type=argparse.FileType('r'),
help='the path to the adapter source', required=True)
args = parser.parse_args()
request = {
'id': uuid.uuid4().int,
'jsonrpc': '2.0',
'method': 'deploy_adapter',
'params': [args.module, args.adapter, args.path.read()]
}
url = '%s/dev/' % args.url.rstrip('/')
r = requests.post(url, json=request)
if r.status_code != 200:
print('JSON response:')
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(r.json())
print('Data:')
print(json.loads(r.json()['error']['data']))
r.raise_for_status()
if __name__ == '__main__':
main()
|
Add script to dynamically deploy adaptersimport requests
import json
import argparse
import uuid
import pprint
def main():
parser = argparse.ArgumentParser(
description='Deploy adapter to Tomviz acquisition server ( the server '
'must be running with the --dev option.')
parser.add_argument('-u', '--url', help='the base url to the server',
default='http://localhost:8080')
parser.add_argument('-m', '--module', help='the module name',
default='tomviz_dev_adapter')
parser.add_argument('-a', '--adapter', help='the adapter class name',
required=True)
parser.add_argument('-p', '--path', type=argparse.FileType('r'),
help='the path to the adapter source', required=True)
args = parser.parse_args()
request = {
'id': uuid.uuid4().int,
'jsonrpc': '2.0',
'method': 'deploy_adapter',
'params': [args.module, args.adapter, args.path.read()]
}
url = '%s/dev/' % args.url.rstrip('/')
r = requests.post(url, json=request)
if r.status_code != 200:
print('JSON response:')
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(r.json())
print('Data:')
print(json.loads(r.json()['error']['data']))
r.raise_for_status()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to dynamically deploy adapters<commit_after>import requests
import json
import argparse
import uuid
import pprint
def main():
parser = argparse.ArgumentParser(
description='Deploy adapter to Tomviz acquisition server ( the server '
'must be running with the --dev option.')
parser.add_argument('-u', '--url', help='the base url to the server',
default='http://localhost:8080')
parser.add_argument('-m', '--module', help='the module name',
default='tomviz_dev_adapter')
parser.add_argument('-a', '--adapter', help='the adapter class name',
required=True)
parser.add_argument('-p', '--path', type=argparse.FileType('r'),
help='the path to the adapter source', required=True)
args = parser.parse_args()
request = {
'id': uuid.uuid4().int,
'jsonrpc': '2.0',
'method': 'deploy_adapter',
'params': [args.module, args.adapter, args.path.read()]
}
url = '%s/dev/' % args.url.rstrip('/')
r = requests.post(url, json=request)
if r.status_code != 200:
print('JSON response:')
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(r.json())
print('Data:')
print(json.loads(r.json()['error']['data']))
r.raise_for_status()
if __name__ == '__main__':
main()
|
|
f16eb9e0dea35a2cb49c1e9cb879af1faa1f0c6a
|
PyGoogleMap.py
|
PyGoogleMap.py
|
import dpkt, socket, pygeoip, optparse
def banner():
print "#### Use Python to build a Google Map #####"
print ""
def retKML(ip):
rec = gi.record_by_name(ip)
try:
longitude = rec['longitude']
latitude = rec['latitude']
kml = (
'<Placemark>\n'
'<name>&s</name\n'
'<Point>\n'
'<coordinates>%6f.%6f</coordinates>\n'
'</Point>\n'
'</Placemark>\n'
)%(ip, longitude, latitude)
return kml
except:
return ''
def plotIPs(pcap):
kmlPts = ''
for (ts, buf), in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
src =- socket.inet+ntoa(ip.src)
scrKML = retKML(src)
dst = socket.inet_ntoa(ip.dst)
dstKML = retKML(dst)
kmlPts - kmlPts+srcKML+dstKML
except:
pass
return kmlPts
def main():
parser = optparse.OptionParser('usage%prog '+'-p <pcap file>')
parser.add_option('-p', dest='pcapFile', type ='string', help='specify pcap file')
(options, args) = parser.parse_args()
if options.pcapFile == None:
print parser.usage
exit(0)
pcapFile = options.pcapFile
f = open(pcapFile)
pcap =- dpkt.pcap.Reader(f)
kmlheader = '<?xml version ="1.0" encoding = "UTF-8"? \n<kml xmlns="http://opengis.net/kml/2.2">\n<Document>\n'
kmlfooter = '</Documen t>\n</kml\n'
kmldoc = kmlheader+plotIPs(pcap)+kmlfooter
print kmldoc
if __name__ == '__main__':
main()
|
DEBUG and TEST. Plenty of Typos
|
DEBUG and TEST. Plenty of Typos
|
Python
|
mit
|
n1cfury/ViolentPython
|
DEBUG and TEST. Plenty of Typos
|
import dpkt, socket, pygeoip, optparse
def banner():
print "#### Use Python to build a Google Map #####"
print ""
def retKML(ip):
rec = gi.record_by_name(ip)
try:
longitude = rec['longitude']
latitude = rec['latitude']
kml = (
'<Placemark>\n'
'<name>&s</name\n'
'<Point>\n'
'<coordinates>%6f.%6f</coordinates>\n'
'</Point>\n'
'</Placemark>\n'
)%(ip, longitude, latitude)
return kml
except:
return ''
def plotIPs(pcap):
kmlPts = ''
for (ts, buf), in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
src =- socket.inet+ntoa(ip.src)
scrKML = retKML(src)
dst = socket.inet_ntoa(ip.dst)
dstKML = retKML(dst)
kmlPts - kmlPts+srcKML+dstKML
except:
pass
return kmlPts
def main():
parser = optparse.OptionParser('usage%prog '+'-p <pcap file>')
parser.add_option('-p', dest='pcapFile', type ='string', help='specify pcap file')
(options, args) = parser.parse_args()
if options.pcapFile == None:
print parser.usage
exit(0)
pcapFile = options.pcapFile
f = open(pcapFile)
pcap =- dpkt.pcap.Reader(f)
kmlheader = '<?xml version ="1.0" encoding = "UTF-8"? \n<kml xmlns="http://opengis.net/kml/2.2">\n<Document>\n'
kmlfooter = '</Documen t>\n</kml\n'
kmldoc = kmlheader+plotIPs(pcap)+kmlfooter
print kmldoc
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>DEBUG and TEST. Plenty of Typos<commit_after>
|
import dpkt, socket, pygeoip, optparse
def banner():
print "#### Use Python to build a Google Map #####"
print ""
def retKML(ip):
rec = gi.record_by_name(ip)
try:
longitude = rec['longitude']
latitude = rec['latitude']
kml = (
'<Placemark>\n'
'<name>&s</name\n'
'<Point>\n'
'<coordinates>%6f.%6f</coordinates>\n'
'</Point>\n'
'</Placemark>\n'
)%(ip, longitude, latitude)
return kml
except:
return ''
def plotIPs(pcap):
kmlPts = ''
for (ts, buf), in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
src =- socket.inet+ntoa(ip.src)
scrKML = retKML(src)
dst = socket.inet_ntoa(ip.dst)
dstKML = retKML(dst)
kmlPts - kmlPts+srcKML+dstKML
except:
pass
return kmlPts
def main():
parser = optparse.OptionParser('usage%prog '+'-p <pcap file>')
parser.add_option('-p', dest='pcapFile', type ='string', help='specify pcap file')
(options, args) = parser.parse_args()
if options.pcapFile == None:
print parser.usage
exit(0)
pcapFile = options.pcapFile
f = open(pcapFile)
pcap =- dpkt.pcap.Reader(f)
kmlheader = '<?xml version ="1.0" encoding = "UTF-8"? \n<kml xmlns="http://opengis.net/kml/2.2">\n<Document>\n'
kmlfooter = '</Documen t>\n</kml\n'
kmldoc = kmlheader+plotIPs(pcap)+kmlfooter
print kmldoc
if __name__ == '__main__':
main()
|
DEBUG and TEST. Plenty of Typosimport dpkt, socket, pygeoip, optparse
def banner():
print "#### Use Python to build a Google Map #####"
print ""
def retKML(ip):
rec = gi.record_by_name(ip)
try:
longitude = rec['longitude']
latitude = rec['latitude']
kml = (
'<Placemark>\n'
'<name>&s</name\n'
'<Point>\n'
'<coordinates>%6f.%6f</coordinates>\n'
'</Point>\n'
'</Placemark>\n'
)%(ip, longitude, latitude)
return kml
except:
return ''
def plotIPs(pcap):
kmlPts = ''
for (ts, buf), in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
src =- socket.inet+ntoa(ip.src)
scrKML = retKML(src)
dst = socket.inet_ntoa(ip.dst)
dstKML = retKML(dst)
kmlPts - kmlPts+srcKML+dstKML
except:
pass
return kmlPts
def main():
parser = optparse.OptionParser('usage%prog '+'-p <pcap file>')
parser.add_option('-p', dest='pcapFile', type ='string', help='specify pcap file')
(options, args) = parser.parse_args()
if options.pcapFile == None:
print parser.usage
exit(0)
pcapFile = options.pcapFile
f = open(pcapFile)
pcap =- dpkt.pcap.Reader(f)
kmlheader = '<?xml version ="1.0" encoding = "UTF-8"? \n<kml xmlns="http://opengis.net/kml/2.2">\n<Document>\n'
kmlfooter = '</Documen t>\n</kml\n'
kmldoc = kmlheader+plotIPs(pcap)+kmlfooter
print kmldoc
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>DEBUG and TEST. Plenty of Typos<commit_after>import dpkt, socket, pygeoip, optparse
def banner():
print "#### Use Python to build a Google Map #####"
print ""
def retKML(ip):
rec = gi.record_by_name(ip)
try:
longitude = rec['longitude']
latitude = rec['latitude']
kml = (
'<Placemark>\n'
'<name>&s</name\n'
'<Point>\n'
'<coordinates>%6f.%6f</coordinates>\n'
'</Point>\n'
'</Placemark>\n'
)%(ip, longitude, latitude)
return kml
except:
return ''
def plotIPs(pcap):
kmlPts = ''
for (ts, buf), in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
src =- socket.inet+ntoa(ip.src)
scrKML = retKML(src)
dst = socket.inet_ntoa(ip.dst)
dstKML = retKML(dst)
kmlPts - kmlPts+srcKML+dstKML
except:
pass
return kmlPts
def main():
parser = optparse.OptionParser('usage%prog '+'-p <pcap file>')
parser.add_option('-p', dest='pcapFile', type ='string', help='specify pcap file')
(options, args) = parser.parse_args()
if options.pcapFile == None:
print parser.usage
exit(0)
pcapFile = options.pcapFile
f = open(pcapFile)
pcap =- dpkt.pcap.Reader(f)
kmlheader = '<?xml version ="1.0" encoding = "UTF-8"? \n<kml xmlns="http://opengis.net/kml/2.2">\n<Document>\n'
kmlfooter = '</Documen t>\n</kml\n'
kmldoc = kmlheader+plotIPs(pcap)+kmlfooter
print kmldoc
if __name__ == '__main__':
main()
|
|
422057651acf510f6c4c5ccb54781a9b85d4a5ad
|
airflow/migrations/versions/1968acfc09e3_add_is_encrypted_column_to_variable_.py
|
airflow/migrations/versions/1968acfc09e3_add_is_encrypted_column_to_variable_.py
|
"""add is_encrypted column to variable table
Revision ID: 1968acfc09e3
Revises: bba5a7cfc896
Create Date: 2016-02-02 17:20:55.692295
"""
# revision identifiers, used by Alembic.
revision = '1968acfc09e3'
down_revision = 'bba5a7cfc896'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('variable', sa.Column('is_encrypted', sa.Boolean,default=False))
def downgrade():
op.drop_column('variable', 'is_encrypted')
|
Add missing migration needed to support Variable value encryption
|
Add missing migration needed to support Variable value encryption
|
Python
|
apache-2.0
|
caseyching/incubator-airflow,N3da/incubator-airflow,gtoonstra/airflow,owlabs/incubator-airflow,lyft/incubator-airflow,dmitry-r/incubator-airflow,ty707/airflow,modsy/incubator-airflow,sdiazb/airflow,nathanielvarona/airflow,adamhaney/airflow,dgies/incubator-airflow,gtoonstra/airflow,jwi078/incubator-airflow,dhuang/incubator-airflow,edgarRd/incubator-airflow,kerzhner/airflow,jhsenjaliya/incubator-airflow,preete-dixit-ck/incubator-airflow,asnir/airflow,bolkedebruin/airflow,zack3241/incubator-airflow,Twistbioscience/incubator-airflow,wooga/airflow,RealImpactAnalytics/airflow,skudriashev/incubator-airflow,DinoCow/airflow,r39132/airflow,Acehaidrey/incubator-airflow,cjqian/incubator-airflow,MetrodataTeam/incubator-airflow,forevernull/incubator-airflow,dud225/incubator-airflow,DinoCow/airflow,dud225/incubator-airflow,juvoinc/airflow,nathanielvarona/airflow,mistercrunch/airflow,wolfier/incubator-airflow,preete-dixit-ck/incubator-airflow,modsy/incubator-airflow,adrpar/incubator-airflow,sekikn/incubator-airflow,hgrif/incubator-airflow,artwr/airflow,owlabs/incubator-airflow,sekikn/incubator-airflow,dgies/incubator-airflow,holygits/incubator-airflow,mrkm4ntr/incubator-airflow,biln/airflow,subodhchhabra/airflow,jhsenjaliya/incubator-airflow,wileeam/airflow,hgrif/incubator-airflow,cjqian/incubator-airflow,apache/airflow,mylons/incubator-airflow,yati-sagade/incubator-airflow,hgrif/incubator-airflow,aminghadersohi/airflow,fenglu-g/incubator-airflow,OpringaoDoTurno/airflow,brandsoulmates/incubator-airflow,asnir/airflow,vijaysbhat/incubator-airflow,DinoCow/airflow,holygits/incubator-airflow,juvoinc/airflow,zoyahav/incubator-airflow,btallman/incubator-airflow,Twistbioscience/incubator-airflow,rishibarve/incubator-airflow,mrkm4ntr/incubator-airflow,mtagle/airflow,ledsusop/airflow,Fokko/incubator-airflow,bolkedebruin/airflow,aminghadersohi/airflow,Tagar/incubator-airflow,cademarkegard/airflow,yiqingj/airflow,MetrodataTeam/incubator-airflow,nathanielvarona/airflow,biln/airflow,jbhsieh/incubator-airflow,N3da/incubator-airflow,ProstoMaxim/incubator-airflow,akosel/incubator-airflow,brandsoulmates/incubator-airflow,zack3241/incubator-airflow,aminghadersohi/airflow,adrpar/incubator-airflow,KL-WLCR/incubator-airflow,opensignal/airflow,mattuuh7/incubator-airflow,airbnb/airflow,akosel/incubator-airflow,akosel/incubator-airflow,NielsZeilemaker/incubator-airflow,jbhsieh/incubator-airflow,jgao54/airflow,jbhsieh/incubator-airflow,ronfung/incubator-airflow,btallman/incubator-airflow,yk5/incubator-airflow,mistercrunch/airflow,Chedi/airflow,sid88in/incubator-airflow,gritlogic/incubator-airflow,apache/airflow,dmitry-r/incubator-airflow,ProstoMaxim/incubator-airflow,holygits/incubator-airflow,sdiazb/airflow,mylons/incubator-airflow,edgarRd/incubator-airflow,airbnb/airflow,hgrif/incubator-airflow,jfantom/incubator-airflow,easytaxibr/airflow,andrewmchen/incubator-airflow,lyft/incubator-airflow,forevernull/incubator-airflow,jhsenjaliya/incubator-airflow,saguziel/incubator-airflow,mistercrunch/airflow,plypaul/airflow,yk5/incubator-airflow,mattuuh7/incubator-airflow,wndhydrnt/airflow,saguziel/incubator-airflow,cademarkegard/airflow,stverhae/incubator-airflow,Chedi/airflow,wxiang7/airflow,mrkm4ntr/incubator-airflow,stverhae/incubator-airflow,sekikn/incubator-airflow,nathanielvarona/airflow,DinoCow/airflow,vineet-rh/incubator-airflow,DEVELByte/incubator-airflow,Fokko/incubator-airflow,sergiohgz/incubator-airflow,gilt/incubator-airflow,yiqingj/airflow,sid88in/incubator-airflow,cfei18/incubator-airflow,apache/airflow,apache/incubator-airflow,AllisonWang/incubator-airflow,jesusfcr/airflow,danielvdende/incubator-airflow,stverhae/incubator-airflow,brandsoulmates/incubator-airflow,alexvanboxel/airflow,fenglu-g/incubator-airflow,artwr/airflow,d-lee/airflow,gritlogic/incubator-airflow,jgao54/airflow,subodhchhabra/airflow,apache/incubator-airflow,saguziel/incubator-airflow,AllisonWang/incubator-airflow,wolfier/incubator-airflow,Fokko/incubator-airflow,apache/airflow,MortalViews/incubator-airflow,jesusfcr/airflow,ProstoMaxim/incubator-airflow,ty707/airflow,AllisonWang/incubator-airflow,bolkedebruin/airflow,cjqian/incubator-airflow,mtagle/airflow,andyxhadji/incubator-airflow,r39132/airflow,dmitry-r/incubator-airflow,lyft/incubator-airflow,hamedhsn/incubator-airflow,rishibarve/incubator-airflow,modsy/incubator-airflow,mistercrunch/airflow,ronfung/incubator-airflow,owlabs/incubator-airflow,plypaul/airflow,subodhchhabra/airflow,jwi078/incubator-airflow,vineet-rh/incubator-airflow,nathanielvarona/airflow,btallman/incubator-airflow,wolfier/incubator-airflow,zack3241/incubator-airflow,lxneng/incubator-airflow,janczak10/incubator-airflow,adamhaney/airflow,CloverHealth/airflow,caseyching/incubator-airflow,gtoonstra/airflow,airbnb/airflow,adrpar/incubator-airflow,fenglu-g/incubator-airflow,mtdewulf/incubator-airflow,mtagle/airflow,adrpar/incubator-airflow,preete-dixit-ck/incubator-airflow,CloverHealth/airflow,zodiac/incubator-airflow,Tagar/incubator-airflow,gritlogic/incubator-airflow,jlowin/airflow,sdiazb/airflow,hamedhsn/incubator-airflow,andrewmchen/incubator-airflow,wooga/airflow,stverhae/incubator-airflow,jlowin/airflow,opensignal/airflow,adamhaney/airflow,skudriashev/incubator-airflow,fenglu-g/incubator-airflow,yiqingj/airflow,sid88in/incubator-airflow,mtagle/airflow,criccomini/airflow,opensignal/airflow,opensignal/airflow,ronfung/incubator-airflow,wxiang7/airflow,CloverHealth/airflow,aminghadersohi/airflow,RealImpactAnalytics/airflow,cfei18/incubator-airflow,ledsusop/airflow,mtdewulf/incubator-airflow,yati-sagade/incubator-airflow,Fokko/incubator-airflow,jlowin/airflow,jgao54/airflow,jfantom/incubator-airflow,jwi078/incubator-airflow,d-lee/airflow,OpringaoDoTurno/airflow,airbnb/airflow,Tagar/incubator-airflow,mtdewulf/incubator-airflow,Twistbioscience/incubator-airflow,danielvdende/incubator-airflow,sekikn/incubator-airflow,jgao54/airflow,wolfier/incubator-airflow,r39132/airflow,danielvdende/incubator-airflow,vineet-rh/incubator-airflow,mattuuh7/incubator-airflow,DEVELByte/incubator-airflow,apache/airflow,jlowin/airflow,wileeam/airflow,easytaxibr/airflow,sdiazb/airflow,brandsoulmates/incubator-airflow,MortalViews/incubator-airflow,sergiohgz/incubator-airflow,jesusfcr/airflow,malmiron/incubator-airflow,mylons/incubator-airflow,adamhaney/airflow,vijaysbhat/incubator-airflow,MetrodataTeam/incubator-airflow,DEVELByte/incubator-airflow,Chedi/airflow,sid88in/incubator-airflow,lxneng/incubator-airflow,spektom/incubator-airflow,lxneng/incubator-airflow,artwr/airflow,zoyahav/incubator-airflow,kerzhner/airflow,danielvdende/incubator-airflow,dud225/incubator-airflow,jwi078/incubator-airflow,vijaysbhat/incubator-airflow,malmiron/incubator-airflow,ledsusop/airflow,caseyching/incubator-airflow,dgies/incubator-airflow,wooga/airflow,apache/incubator-airflow,sergiohgz/incubator-airflow,bolkedebruin/airflow,spektom/incubator-airflow,mtdewulf/incubator-airflow,malmiron/incubator-airflow,biln/airflow,artwr/airflow,janczak10/incubator-airflow,forevernull/incubator-airflow,mrares/incubator-airflow,spektom/incubator-airflow,yiqingj/airflow,cfei18/incubator-airflow,dhuang/incubator-airflow,N3da/incubator-airflow,d-lee/airflow,wileeam/airflow,ronfung/incubator-airflow,janczak10/incubator-airflow,cademarkegard/airflow,gilt/incubator-airflow,dud225/incubator-airflow,mrares/incubator-airflow,gritlogic/incubator-airflow,juvoinc/airflow,NielsZeilemaker/incubator-airflow,wndhydrnt/airflow,yati-sagade/incubator-airflow,jfantom/incubator-airflow,sergiohgz/incubator-airflow,RealImpactAnalytics/airflow,biln/airflow,mrkm4ntr/incubator-airflow,mrares/incubator-airflow,edgarRd/incubator-airflow,hamedhsn/incubator-airflow,modsy/incubator-airflow,forevernull/incubator-airflow,ty707/airflow,asnir/airflow,zack3241/incubator-airflow,Acehaidrey/incubator-airflow,cjqian/incubator-airflow,rishibarve/incubator-airflow,alexvanboxel/airflow,d-lee/airflow,mrares/incubator-airflow,janczak10/incubator-airflow,preete-dixit-ck/incubator-airflow,KL-WLCR/incubator-airflow,DEVELByte/incubator-airflow,skudriashev/incubator-airflow,lxneng/incubator-airflow,AllisonWang/incubator-airflow,jbhsieh/incubator-airflow,andyxhadji/incubator-airflow,yati-sagade/incubator-airflow,skudriashev/incubator-airflow,ProstoMaxim/incubator-airflow,jiwang576/incubator-airflow,Chedi/airflow,gilt/incubator-airflow,Tagar/incubator-airflow,MortalViews/incubator-airflow,alexvanboxel/airflow,danielvdende/incubator-airflow,spektom/incubator-airflow,zodiac/incubator-airflow,dmitry-r/incubator-airflow,apache/incubator-airflow,vineet-rh/incubator-airflow,dhuang/incubator-airflow,hamedhsn/incubator-airflow,Twistbioscience/incubator-airflow,criccomini/airflow,jhsenjaliya/incubator-airflow,plypaul/airflow,jfantom/incubator-airflow,jesusfcr/airflow,wooga/airflow,edgarRd/incubator-airflow,gilt/incubator-airflow,holygits/incubator-airflow,criccomini/airflow,apache/airflow,saguziel/incubator-airflow,plypaul/airflow,ledsusop/airflow,mylons/incubator-airflow,caseyching/incubator-airflow,NielsZeilemaker/incubator-airflow,OpringaoDoTurno/airflow,MetrodataTeam/incubator-airflow,rishibarve/incubator-airflow,cfei18/incubator-airflow,mattuuh7/incubator-airflow,btallman/incubator-airflow,zoyahav/incubator-airflow,yk5/incubator-airflow,MortalViews/incubator-airflow,KL-WLCR/incubator-airflow,alexvanboxel/airflow,zoyahav/incubator-airflow,kerzhner/airflow,bolkedebruin/airflow,Acehaidrey/incubator-airflow,nathanielvarona/airflow,owlabs/incubator-airflow,wileeam/airflow,gtoonstra/airflow,jiwang576/incubator-airflow,andrewmchen/incubator-airflow,KL-WLCR/incubator-airflow,jiwang576/incubator-airflow,andyxhadji/incubator-airflow,malmiron/incubator-airflow,wndhydrnt/airflow,cfei18/incubator-airflow,easytaxibr/airflow,zodiac/incubator-airflow,easytaxibr/airflow,CloverHealth/airflow,dgies/incubator-airflow,wxiang7/airflow,zodiac/incubator-airflow,OpringaoDoTurno/airflow,Acehaidrey/incubator-airflow,criccomini/airflow,akosel/incubator-airflow,kerzhner/airflow,dhuang/incubator-airflow,N3da/incubator-airflow,subodhchhabra/airflow,andrewmchen/incubator-airflow,Acehaidrey/incubator-airflow,NielsZeilemaker/incubator-airflow,ty707/airflow,yk5/incubator-airflow,vijaysbhat/incubator-airflow,juvoinc/airflow,cfei18/incubator-airflow,Acehaidrey/incubator-airflow,asnir/airflow,jiwang576/incubator-airflow,danielvdende/incubator-airflow,andyxhadji/incubator-airflow,RealImpactAnalytics/airflow,lyft/incubator-airflow,wndhydrnt/airflow,cademarkegard/airflow,wxiang7/airflow,r39132/airflow
|
Add missing migration needed to support Variable value encryption
|
"""add is_encrypted column to variable table
Revision ID: 1968acfc09e3
Revises: bba5a7cfc896
Create Date: 2016-02-02 17:20:55.692295
"""
# revision identifiers, used by Alembic.
revision = '1968acfc09e3'
down_revision = 'bba5a7cfc896'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('variable', sa.Column('is_encrypted', sa.Boolean,default=False))
def downgrade():
op.drop_column('variable', 'is_encrypted')
|
<commit_before><commit_msg>Add missing migration needed to support Variable value encryption<commit_after>
|
"""add is_encrypted column to variable table
Revision ID: 1968acfc09e3
Revises: bba5a7cfc896
Create Date: 2016-02-02 17:20:55.692295
"""
# revision identifiers, used by Alembic.
revision = '1968acfc09e3'
down_revision = 'bba5a7cfc896'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('variable', sa.Column('is_encrypted', sa.Boolean,default=False))
def downgrade():
op.drop_column('variable', 'is_encrypted')
|
Add missing migration needed to support Variable value encryption"""add is_encrypted column to variable table
Revision ID: 1968acfc09e3
Revises: bba5a7cfc896
Create Date: 2016-02-02 17:20:55.692295
"""
# revision identifiers, used by Alembic.
revision = '1968acfc09e3'
down_revision = 'bba5a7cfc896'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('variable', sa.Column('is_encrypted', sa.Boolean,default=False))
def downgrade():
op.drop_column('variable', 'is_encrypted')
|
<commit_before><commit_msg>Add missing migration needed to support Variable value encryption<commit_after>"""add is_encrypted column to variable table
Revision ID: 1968acfc09e3
Revises: bba5a7cfc896
Create Date: 2016-02-02 17:20:55.692295
"""
# revision identifiers, used by Alembic.
revision = '1968acfc09e3'
down_revision = 'bba5a7cfc896'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('variable', sa.Column('is_encrypted', sa.Boolean,default=False))
def downgrade():
op.drop_column('variable', 'is_encrypted')
|
|
594c9d8320b1eaac87b10fe9c93fbb1ce652799e
|
pydbus/tests/gnome_music.py
|
pydbus/tests/gnome_music.py
|
from pydbus import SessionBus
from gi.repository import GObject, GLib
import subprocess
from time import sleep
loop = GObject.MainLoop()
subprocess.Popen("gnome-music")
sleep(5)
print("Waiting for GNOME Music to start...")
b = SessionBus()
m = b.get("org.mpris.MediaPlayer2.GnomeMusic", "/org/mpris/MediaPlayer2")
m.PropertiesChanged.connect(print)
m.ActivatePlaylist(m.GetPlaylists(0, 5, "Alphabetical", 0)[0][0])
m.Play()
sleep(1)
assert(m.PlaybackStatus == "Playing")
m.Pause()
assert(m.PlaybackStatus == "Paused")
m.Play()
assert(m.PlaybackStatus == "Playing")
t = m.Metadata["xesam:title"]
m.Next()
assert(m.Metadata["xesam:title"] != t)
m.Previous()
assert(m.Metadata["xesam:title"] == t)
oldLoopStatus = m.LoopStatus
m.LoopStatus = "None"
assert(m.LoopStatus == "None")
m.LoopStatus = "Track"
assert(m.LoopStatus == "Track")
m.LoopStatus = oldLoopStatus
GLib.timeout_add_seconds(2, lambda: loop.quit())
loop.run()
|
Add a trivial GNOME Music test.
|
Add a trivial GNOME Music test.
|
Python
|
lgpl-2.1
|
LEW21/pydbus,LEW21/pydbus
|
Add a trivial GNOME Music test.
|
from pydbus import SessionBus
from gi.repository import GObject, GLib
import subprocess
from time import sleep
loop = GObject.MainLoop()
subprocess.Popen("gnome-music")
sleep(5)
print("Waiting for GNOME Music to start...")
b = SessionBus()
m = b.get("org.mpris.MediaPlayer2.GnomeMusic", "/org/mpris/MediaPlayer2")
m.PropertiesChanged.connect(print)
m.ActivatePlaylist(m.GetPlaylists(0, 5, "Alphabetical", 0)[0][0])
m.Play()
sleep(1)
assert(m.PlaybackStatus == "Playing")
m.Pause()
assert(m.PlaybackStatus == "Paused")
m.Play()
assert(m.PlaybackStatus == "Playing")
t = m.Metadata["xesam:title"]
m.Next()
assert(m.Metadata["xesam:title"] != t)
m.Previous()
assert(m.Metadata["xesam:title"] == t)
oldLoopStatus = m.LoopStatus
m.LoopStatus = "None"
assert(m.LoopStatus == "None")
m.LoopStatus = "Track"
assert(m.LoopStatus == "Track")
m.LoopStatus = oldLoopStatus
GLib.timeout_add_seconds(2, lambda: loop.quit())
loop.run()
|
<commit_before><commit_msg>Add a trivial GNOME Music test.<commit_after>
|
from pydbus import SessionBus
from gi.repository import GObject, GLib
import subprocess
from time import sleep
loop = GObject.MainLoop()
subprocess.Popen("gnome-music")
sleep(5)
print("Waiting for GNOME Music to start...")
b = SessionBus()
m = b.get("org.mpris.MediaPlayer2.GnomeMusic", "/org/mpris/MediaPlayer2")
m.PropertiesChanged.connect(print)
m.ActivatePlaylist(m.GetPlaylists(0, 5, "Alphabetical", 0)[0][0])
m.Play()
sleep(1)
assert(m.PlaybackStatus == "Playing")
m.Pause()
assert(m.PlaybackStatus == "Paused")
m.Play()
assert(m.PlaybackStatus == "Playing")
t = m.Metadata["xesam:title"]
m.Next()
assert(m.Metadata["xesam:title"] != t)
m.Previous()
assert(m.Metadata["xesam:title"] == t)
oldLoopStatus = m.LoopStatus
m.LoopStatus = "None"
assert(m.LoopStatus == "None")
m.LoopStatus = "Track"
assert(m.LoopStatus == "Track")
m.LoopStatus = oldLoopStatus
GLib.timeout_add_seconds(2, lambda: loop.quit())
loop.run()
|
Add a trivial GNOME Music test.from pydbus import SessionBus
from gi.repository import GObject, GLib
import subprocess
from time import sleep
loop = GObject.MainLoop()
subprocess.Popen("gnome-music")
sleep(5)
print("Waiting for GNOME Music to start...")
b = SessionBus()
m = b.get("org.mpris.MediaPlayer2.GnomeMusic", "/org/mpris/MediaPlayer2")
m.PropertiesChanged.connect(print)
m.ActivatePlaylist(m.GetPlaylists(0, 5, "Alphabetical", 0)[0][0])
m.Play()
sleep(1)
assert(m.PlaybackStatus == "Playing")
m.Pause()
assert(m.PlaybackStatus == "Paused")
m.Play()
assert(m.PlaybackStatus == "Playing")
t = m.Metadata["xesam:title"]
m.Next()
assert(m.Metadata["xesam:title"] != t)
m.Previous()
assert(m.Metadata["xesam:title"] == t)
oldLoopStatus = m.LoopStatus
m.LoopStatus = "None"
assert(m.LoopStatus == "None")
m.LoopStatus = "Track"
assert(m.LoopStatus == "Track")
m.LoopStatus = oldLoopStatus
GLib.timeout_add_seconds(2, lambda: loop.quit())
loop.run()
|
<commit_before><commit_msg>Add a trivial GNOME Music test.<commit_after>from pydbus import SessionBus
from gi.repository import GObject, GLib
import subprocess
from time import sleep
loop = GObject.MainLoop()
subprocess.Popen("gnome-music")
sleep(5)
print("Waiting for GNOME Music to start...")
b = SessionBus()
m = b.get("org.mpris.MediaPlayer2.GnomeMusic", "/org/mpris/MediaPlayer2")
m.PropertiesChanged.connect(print)
m.ActivatePlaylist(m.GetPlaylists(0, 5, "Alphabetical", 0)[0][0])
m.Play()
sleep(1)
assert(m.PlaybackStatus == "Playing")
m.Pause()
assert(m.PlaybackStatus == "Paused")
m.Play()
assert(m.PlaybackStatus == "Playing")
t = m.Metadata["xesam:title"]
m.Next()
assert(m.Metadata["xesam:title"] != t)
m.Previous()
assert(m.Metadata["xesam:title"] == t)
oldLoopStatus = m.LoopStatus
m.LoopStatus = "None"
assert(m.LoopStatus == "None")
m.LoopStatus = "Track"
assert(m.LoopStatus == "Track")
m.LoopStatus = oldLoopStatus
GLib.timeout_add_seconds(2, lambda: loop.quit())
loop.run()
|
|
a64f3cf9c10d79576cc70d385f9124b7beda040e
|
nubank.py
|
nubank.py
|
import json
import requests
class NuException(BaseException):
pass
class Nubank:
headers = {
'Content-Type': 'application/json',
'X-Correlation-Id': 'WEB-APP.pewW9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/58.0.3029.110 Safari/537.36',
'Origin': 'https://conta.nubank.com.br',
'Referer': 'https://conta.nubank.com.br/',
}
TOKEN_URL = 'https://prod-auth.nubank.com.br/api/token'
feed_url = None
def __init__(self, cpf, password):
self.authenticate(cpf, password)
def authenticate(self, cpf, password):
body = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
request = requests.post(Nubank.TOKEN_URL, json=body, headers=self.headers)
if request.status_code != 200:
raise NuException('Authentication failed. Check your credentials!')
data = json.loads(request.content)
self.headers['Authorization'] = 'Bearer {}'.format(data['access_token'])
self.feed_url = data['_links']['purchases']['href']
def get_account_statements(self):
request = requests.get(self.feed_url, headers=self.headers)
return json.loads(request.content)
|
Add Nubank class for handling the requests
|
Add Nubank class for handling the requests
|
Python
|
mit
|
andreroggeri/pynubank
|
Add Nubank class for handling the requests
|
import json
import requests
class NuException(BaseException):
pass
class Nubank:
headers = {
'Content-Type': 'application/json',
'X-Correlation-Id': 'WEB-APP.pewW9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/58.0.3029.110 Safari/537.36',
'Origin': 'https://conta.nubank.com.br',
'Referer': 'https://conta.nubank.com.br/',
}
TOKEN_URL = 'https://prod-auth.nubank.com.br/api/token'
feed_url = None
def __init__(self, cpf, password):
self.authenticate(cpf, password)
def authenticate(self, cpf, password):
body = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
request = requests.post(Nubank.TOKEN_URL, json=body, headers=self.headers)
if request.status_code != 200:
raise NuException('Authentication failed. Check your credentials!')
data = json.loads(request.content)
self.headers['Authorization'] = 'Bearer {}'.format(data['access_token'])
self.feed_url = data['_links']['purchases']['href']
def get_account_statements(self):
request = requests.get(self.feed_url, headers=self.headers)
return json.loads(request.content)
|
<commit_before><commit_msg>Add Nubank class for handling the requests<commit_after>
|
import json
import requests
class NuException(BaseException):
pass
class Nubank:
headers = {
'Content-Type': 'application/json',
'X-Correlation-Id': 'WEB-APP.pewW9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/58.0.3029.110 Safari/537.36',
'Origin': 'https://conta.nubank.com.br',
'Referer': 'https://conta.nubank.com.br/',
}
TOKEN_URL = 'https://prod-auth.nubank.com.br/api/token'
feed_url = None
def __init__(self, cpf, password):
self.authenticate(cpf, password)
def authenticate(self, cpf, password):
body = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
request = requests.post(Nubank.TOKEN_URL, json=body, headers=self.headers)
if request.status_code != 200:
raise NuException('Authentication failed. Check your credentials!')
data = json.loads(request.content)
self.headers['Authorization'] = 'Bearer {}'.format(data['access_token'])
self.feed_url = data['_links']['purchases']['href']
def get_account_statements(self):
request = requests.get(self.feed_url, headers=self.headers)
return json.loads(request.content)
|
Add Nubank class for handling the requestsimport json
import requests
class NuException(BaseException):
pass
class Nubank:
headers = {
'Content-Type': 'application/json',
'X-Correlation-Id': 'WEB-APP.pewW9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/58.0.3029.110 Safari/537.36',
'Origin': 'https://conta.nubank.com.br',
'Referer': 'https://conta.nubank.com.br/',
}
TOKEN_URL = 'https://prod-auth.nubank.com.br/api/token'
feed_url = None
def __init__(self, cpf, password):
self.authenticate(cpf, password)
def authenticate(self, cpf, password):
body = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
request = requests.post(Nubank.TOKEN_URL, json=body, headers=self.headers)
if request.status_code != 200:
raise NuException('Authentication failed. Check your credentials!')
data = json.loads(request.content)
self.headers['Authorization'] = 'Bearer {}'.format(data['access_token'])
self.feed_url = data['_links']['purchases']['href']
def get_account_statements(self):
request = requests.get(self.feed_url, headers=self.headers)
return json.loads(request.content)
|
<commit_before><commit_msg>Add Nubank class for handling the requests<commit_after>import json
import requests
class NuException(BaseException):
pass
class Nubank:
headers = {
'Content-Type': 'application/json',
'X-Correlation-Id': 'WEB-APP.pewW9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/58.0.3029.110 Safari/537.36',
'Origin': 'https://conta.nubank.com.br',
'Referer': 'https://conta.nubank.com.br/',
}
TOKEN_URL = 'https://prod-auth.nubank.com.br/api/token'
feed_url = None
def __init__(self, cpf, password):
self.authenticate(cpf, password)
def authenticate(self, cpf, password):
body = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
request = requests.post(Nubank.TOKEN_URL, json=body, headers=self.headers)
if request.status_code != 200:
raise NuException('Authentication failed. Check your credentials!')
data = json.loads(request.content)
self.headers['Authorization'] = 'Bearer {}'.format(data['access_token'])
self.feed_url = data['_links']['purchases']['href']
def get_account_statements(self):
request = requests.get(self.feed_url, headers=self.headers)
return json.loads(request.content)
|
|
fa23f66fcae37731672f4cf646ad52964b4b206a
|
table/migrations/0001_initial.py
|
table/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Receipt',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('title', models.CharField(max_length=255)),
('price', models.DecimalField(max_digits=10, decimal_places=2)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
Add migration for Receipt model
|
Add migration for Receipt model
|
Python
|
mit
|
trimailov/finance,trimailov/finance,trimailov/finance
|
Add migration for Receipt model
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Receipt',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('title', models.CharField(max_length=255)),
('price', models.DecimalField(max_digits=10, decimal_places=2)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
<commit_before><commit_msg>Add migration for Receipt model<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Receipt',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('title', models.CharField(max_length=255)),
('price', models.DecimalField(max_digits=10, decimal_places=2)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
Add migration for Receipt model# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Receipt',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('title', models.CharField(max_length=255)),
('price', models.DecimalField(max_digits=10, decimal_places=2)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
<commit_before><commit_msg>Add migration for Receipt model<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Receipt',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('title', models.CharField(max_length=255)),
('price', models.DecimalField(max_digits=10, decimal_places=2)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
|
89f75bb894a6dee2d53708b8f4f5f3302d54a010
|
neblinaCore.py
|
neblinaCore.py
|
#!/usr/bin/env python
###################################################################################
#
# Copyright (c) 2010-2016 Motsai
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###################################################################################
import threading
from neblina import *
###################################################################################
class NeblinaCore(object):
def __init__(self, interface=Interface.UART):
self.interface = interface
|
Create NeblinaCore to handle sending and receiving packet.
|
Create NeblinaCore to handle sending and receiving packet.
|
Python
|
mit
|
Motsai/neblina-python,Motsai/neblina-python
|
Create NeblinaCore to handle sending and receiving packet.
|
#!/usr/bin/env python
###################################################################################
#
# Copyright (c) 2010-2016 Motsai
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###################################################################################
import threading
from neblina import *
###################################################################################
class NeblinaCore(object):
def __init__(self, interface=Interface.UART):
self.interface = interface
|
<commit_before><commit_msg>Create NeblinaCore to handle sending and receiving packet.<commit_after>
|
#!/usr/bin/env python
###################################################################################
#
# Copyright (c) 2010-2016 Motsai
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###################################################################################
import threading
from neblina import *
###################################################################################
class NeblinaCore(object):
def __init__(self, interface=Interface.UART):
self.interface = interface
|
Create NeblinaCore to handle sending and receiving packet.#!/usr/bin/env python
###################################################################################
#
# Copyright (c) 2010-2016 Motsai
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###################################################################################
import threading
from neblina import *
###################################################################################
class NeblinaCore(object):
def __init__(self, interface=Interface.UART):
self.interface = interface
|
<commit_before><commit_msg>Create NeblinaCore to handle sending and receiving packet.<commit_after>#!/usr/bin/env python
###################################################################################
#
# Copyright (c) 2010-2016 Motsai
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###################################################################################
import threading
from neblina import *
###################################################################################
class NeblinaCore(object):
def __init__(self, interface=Interface.UART):
self.interface = interface
|
|
1fd87ad0cab5d45602192c83681340d5da27a6db
|
examples/custom_context.py
|
examples/custom_context.py
|
import random
import discord
from discord.ext import commands
class MyContext(commands.Context):
async def tick(self, value):
# reacts to the message with an emoji
# depending on whether value is True or False
# if its True, it'll add a green check mark
# otherwise, it'll add a red cross mark
emoji = '\N{WHITE HEAVY CHECK MARK}' if value else '\N{CROSS MARK}'
try:
# this will react to the command author's message
await self.message.add_reaction(emoji)
except discord.HTTPException:
# sometimes errors occur during this, for example
# maybe you dont have permission to do that
# we dont mind, so we can just ignore them
pass
class MyBot(commands.Bot):
async def get_context(self, message, *, cls=MyContext):
# when you override this method, you pass your new Context
# subclass to the super() method, which tells the bot to
# use the new MyContext class
return await super().get_context(message, cls=cls)
bot = MyBot(command_prefix='!')
@bot.command()
async def guess(ctx, number: int):
""" Guess a random number from 1 to 6. """
# explained in a previous example, this gives you
# a random number from 1-6
value = random.randint(1, 6)
# with your new helper function, you can add a
# green check mark if the guess was correct,
# or a red cross mark if it wasnt
await ctx.tick(number == value)
# important: you shouldnt hard code your token
# these are very important, and leaking them can
# let people do very malicious things with your
# bot. try to use a file or something to keep
# them private, and dont commit it to GitHub
token = "your token here"
bot.run(token)
|
Add example on subclassing commands.Context
|
Add example on subclassing commands.Context
|
Python
|
mit
|
Rapptz/discord.py,Harmon758/discord.py,khazhyk/discord.py,rapptz/discord.py,Harmon758/discord.py
|
Add example on subclassing commands.Context
|
import random
import discord
from discord.ext import commands
class MyContext(commands.Context):
async def tick(self, value):
# reacts to the message with an emoji
# depending on whether value is True or False
# if its True, it'll add a green check mark
# otherwise, it'll add a red cross mark
emoji = '\N{WHITE HEAVY CHECK MARK}' if value else '\N{CROSS MARK}'
try:
# this will react to the command author's message
await self.message.add_reaction(emoji)
except discord.HTTPException:
# sometimes errors occur during this, for example
# maybe you dont have permission to do that
# we dont mind, so we can just ignore them
pass
class MyBot(commands.Bot):
async def get_context(self, message, *, cls=MyContext):
# when you override this method, you pass your new Context
# subclass to the super() method, which tells the bot to
# use the new MyContext class
return await super().get_context(message, cls=cls)
bot = MyBot(command_prefix='!')
@bot.command()
async def guess(ctx, number: int):
""" Guess a random number from 1 to 6. """
# explained in a previous example, this gives you
# a random number from 1-6
value = random.randint(1, 6)
# with your new helper function, you can add a
# green check mark if the guess was correct,
# or a red cross mark if it wasnt
await ctx.tick(number == value)
# important: you shouldnt hard code your token
# these are very important, and leaking them can
# let people do very malicious things with your
# bot. try to use a file or something to keep
# them private, and dont commit it to GitHub
token = "your token here"
bot.run(token)
|
<commit_before><commit_msg>Add example on subclassing commands.Context<commit_after>
|
import random
import discord
from discord.ext import commands
class MyContext(commands.Context):
async def tick(self, value):
# reacts to the message with an emoji
# depending on whether value is True or False
# if its True, it'll add a green check mark
# otherwise, it'll add a red cross mark
emoji = '\N{WHITE HEAVY CHECK MARK}' if value else '\N{CROSS MARK}'
try:
# this will react to the command author's message
await self.message.add_reaction(emoji)
except discord.HTTPException:
# sometimes errors occur during this, for example
# maybe you dont have permission to do that
# we dont mind, so we can just ignore them
pass
class MyBot(commands.Bot):
async def get_context(self, message, *, cls=MyContext):
# when you override this method, you pass your new Context
# subclass to the super() method, which tells the bot to
# use the new MyContext class
return await super().get_context(message, cls=cls)
bot = MyBot(command_prefix='!')
@bot.command()
async def guess(ctx, number: int):
""" Guess a random number from 1 to 6. """
# explained in a previous example, this gives you
# a random number from 1-6
value = random.randint(1, 6)
# with your new helper function, you can add a
# green check mark if the guess was correct,
# or a red cross mark if it wasnt
await ctx.tick(number == value)
# important: you shouldnt hard code your token
# these are very important, and leaking them can
# let people do very malicious things with your
# bot. try to use a file or something to keep
# them private, and dont commit it to GitHub
token = "your token here"
bot.run(token)
|
Add example on subclassing commands.Contextimport random
import discord
from discord.ext import commands
class MyContext(commands.Context):
async def tick(self, value):
# reacts to the message with an emoji
# depending on whether value is True or False
# if its True, it'll add a green check mark
# otherwise, it'll add a red cross mark
emoji = '\N{WHITE HEAVY CHECK MARK}' if value else '\N{CROSS MARK}'
try:
# this will react to the command author's message
await self.message.add_reaction(emoji)
except discord.HTTPException:
# sometimes errors occur during this, for example
# maybe you dont have permission to do that
# we dont mind, so we can just ignore them
pass
class MyBot(commands.Bot):
async def get_context(self, message, *, cls=MyContext):
# when you override this method, you pass your new Context
# subclass to the super() method, which tells the bot to
# use the new MyContext class
return await super().get_context(message, cls=cls)
bot = MyBot(command_prefix='!')
@bot.command()
async def guess(ctx, number: int):
""" Guess a random number from 1 to 6. """
# explained in a previous example, this gives you
# a random number from 1-6
value = random.randint(1, 6)
# with your new helper function, you can add a
# green check mark if the guess was correct,
# or a red cross mark if it wasnt
await ctx.tick(number == value)
# important: you shouldnt hard code your token
# these are very important, and leaking them can
# let people do very malicious things with your
# bot. try to use a file or something to keep
# them private, and dont commit it to GitHub
token = "your token here"
bot.run(token)
|
<commit_before><commit_msg>Add example on subclassing commands.Context<commit_after>import random
import discord
from discord.ext import commands
class MyContext(commands.Context):
async def tick(self, value):
# reacts to the message with an emoji
# depending on whether value is True or False
# if its True, it'll add a green check mark
# otherwise, it'll add a red cross mark
emoji = '\N{WHITE HEAVY CHECK MARK}' if value else '\N{CROSS MARK}'
try:
# this will react to the command author's message
await self.message.add_reaction(emoji)
except discord.HTTPException:
# sometimes errors occur during this, for example
# maybe you dont have permission to do that
# we dont mind, so we can just ignore them
pass
class MyBot(commands.Bot):
async def get_context(self, message, *, cls=MyContext):
# when you override this method, you pass your new Context
# subclass to the super() method, which tells the bot to
# use the new MyContext class
return await super().get_context(message, cls=cls)
bot = MyBot(command_prefix='!')
@bot.command()
async def guess(ctx, number: int):
""" Guess a random number from 1 to 6. """
# explained in a previous example, this gives you
# a random number from 1-6
value = random.randint(1, 6)
# with your new helper function, you can add a
# green check mark if the guess was correct,
# or a red cross mark if it wasnt
await ctx.tick(number == value)
# important: you shouldnt hard code your token
# these are very important, and leaking them can
# let people do very malicious things with your
# bot. try to use a file or something to keep
# them private, and dont commit it to GitHub
token = "your token here"
bot.run(token)
|
|
ae9532fe2cf76a8d3f219093f373c05dc2eafc1d
|
benchexec/tools/abc.py
|
benchexec/tools/abc.py
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import re
import logging
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for ABC: A System for Sequential Synthesis and Verification
URL: https://people.eecs.berkeley.edu/~alanmi/abc/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("abc")
def name(self):
return "ABC"
def cmdline(self, executable, options, task, rlimits):
return [executable] + options + [task.single_input_file]
def get_value_from_output(self, output, identifier):
# search for the identifier in the output and return the integral value after it
# warn if there are repeated matches (multiple statistics from sequential analysis?)
match = None
regex = re.compile(re.escape(identifier) + r"\s*(\d+)")
for line in output:
result = regex.search(line)
if result:
if match is None:
match = result.group(1)
else:
logging.warning(
"skipping repeated matches for identifier '{0}': '{1}'".format(
identifier, line
)
)
return match
|
Add a tool-info moduel for ABC
|
Add a tool-info moduel for ABC
|
Python
|
apache-2.0
|
sosy-lab/benchexec,dbeyer/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,sosy-lab/benchexec,sosy-lab/benchexec
|
Add a tool-info moduel for ABC
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import re
import logging
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for ABC: A System for Sequential Synthesis and Verification
URL: https://people.eecs.berkeley.edu/~alanmi/abc/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("abc")
def name(self):
return "ABC"
def cmdline(self, executable, options, task, rlimits):
return [executable] + options + [task.single_input_file]
def get_value_from_output(self, output, identifier):
# search for the identifier in the output and return the integral value after it
# warn if there are repeated matches (multiple statistics from sequential analysis?)
match = None
regex = re.compile(re.escape(identifier) + r"\s*(\d+)")
for line in output:
result = regex.search(line)
if result:
if match is None:
match = result.group(1)
else:
logging.warning(
"skipping repeated matches for identifier '{0}': '{1}'".format(
identifier, line
)
)
return match
|
<commit_before><commit_msg>Add a tool-info moduel for ABC<commit_after>
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import re
import logging
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for ABC: A System for Sequential Synthesis and Verification
URL: https://people.eecs.berkeley.edu/~alanmi/abc/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("abc")
def name(self):
return "ABC"
def cmdline(self, executable, options, task, rlimits):
return [executable] + options + [task.single_input_file]
def get_value_from_output(self, output, identifier):
# search for the identifier in the output and return the integral value after it
# warn if there are repeated matches (multiple statistics from sequential analysis?)
match = None
regex = re.compile(re.escape(identifier) + r"\s*(\d+)")
for line in output:
result = regex.search(line)
if result:
if match is None:
match = result.group(1)
else:
logging.warning(
"skipping repeated matches for identifier '{0}': '{1}'".format(
identifier, line
)
)
return match
|
Add a tool-info moduel for ABC# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import re
import logging
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for ABC: A System for Sequential Synthesis and Verification
URL: https://people.eecs.berkeley.edu/~alanmi/abc/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("abc")
def name(self):
return "ABC"
def cmdline(self, executable, options, task, rlimits):
return [executable] + options + [task.single_input_file]
def get_value_from_output(self, output, identifier):
# search for the identifier in the output and return the integral value after it
# warn if there are repeated matches (multiple statistics from sequential analysis?)
match = None
regex = re.compile(re.escape(identifier) + r"\s*(\d+)")
for line in output:
result = regex.search(line)
if result:
if match is None:
match = result.group(1)
else:
logging.warning(
"skipping repeated matches for identifier '{0}': '{1}'".format(
identifier, line
)
)
return match
|
<commit_before><commit_msg>Add a tool-info moduel for ABC<commit_after># This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import re
import logging
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for ABC: A System for Sequential Synthesis and Verification
URL: https://people.eecs.berkeley.edu/~alanmi/abc/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("abc")
def name(self):
return "ABC"
def cmdline(self, executable, options, task, rlimits):
return [executable] + options + [task.single_input_file]
def get_value_from_output(self, output, identifier):
# search for the identifier in the output and return the integral value after it
# warn if there are repeated matches (multiple statistics from sequential analysis?)
match = None
regex = re.compile(re.escape(identifier) + r"\s*(\d+)")
for line in output:
result = regex.search(line)
if result:
if match is None:
match = result.group(1)
else:
logging.warning(
"skipping repeated matches for identifier '{0}': '{1}'".format(
identifier, line
)
)
return match
|
|
3663a732727d2a2198b2fe1c77a3d73abb531d8e
|
pyfibot/util/twitter_application_auth.py
|
pyfibot/util/twitter_application_auth.py
|
import requests
import base64
import sys
if len(sys.argv) < 3:
print "Usage: twitter_application_auth.py <consumer key> <consumer secret>"
sys.exit(1)
consumer_key = sys.argv[1]
consumer_secret = sys.argv[2]
token = consumer_key+":"+consumer_secret
encoded_token = base64.b64encode(token)
payload = {'grant_type': 'client_credentials'}
headers = {'Authorization': 'Basic '+encoded_token}
auth_url = "https://api.twitter.com/oauth2/token"
r = requests.post(auth_url, payload, headers=headers)
bearer_token = r.json()['access_token']
print "Bearer token:"
print bearer_token
|
Add utility to generate bearer keys for twitter
|
Add utility to generate bearer keys for twitter
|
Python
|
bsd-3-clause
|
huqa/pyfibot,aapa/pyfibot,huqa/pyfibot,EArmour/pyfibot,lepinkainen/pyfibot,EArmour/pyfibot,rnyberg/pyfibot,lepinkainen/pyfibot,rnyberg/pyfibot,aapa/pyfibot
|
Add utility to generate bearer keys for twitter
|
import requests
import base64
import sys
if len(sys.argv) < 3:
print "Usage: twitter_application_auth.py <consumer key> <consumer secret>"
sys.exit(1)
consumer_key = sys.argv[1]
consumer_secret = sys.argv[2]
token = consumer_key+":"+consumer_secret
encoded_token = base64.b64encode(token)
payload = {'grant_type': 'client_credentials'}
headers = {'Authorization': 'Basic '+encoded_token}
auth_url = "https://api.twitter.com/oauth2/token"
r = requests.post(auth_url, payload, headers=headers)
bearer_token = r.json()['access_token']
print "Bearer token:"
print bearer_token
|
<commit_before><commit_msg>Add utility to generate bearer keys for twitter<commit_after>
|
import requests
import base64
import sys
if len(sys.argv) < 3:
print "Usage: twitter_application_auth.py <consumer key> <consumer secret>"
sys.exit(1)
consumer_key = sys.argv[1]
consumer_secret = sys.argv[2]
token = consumer_key+":"+consumer_secret
encoded_token = base64.b64encode(token)
payload = {'grant_type': 'client_credentials'}
headers = {'Authorization': 'Basic '+encoded_token}
auth_url = "https://api.twitter.com/oauth2/token"
r = requests.post(auth_url, payload, headers=headers)
bearer_token = r.json()['access_token']
print "Bearer token:"
print bearer_token
|
Add utility to generate bearer keys for twitterimport requests
import base64
import sys
if len(sys.argv) < 3:
print "Usage: twitter_application_auth.py <consumer key> <consumer secret>"
sys.exit(1)
consumer_key = sys.argv[1]
consumer_secret = sys.argv[2]
token = consumer_key+":"+consumer_secret
encoded_token = base64.b64encode(token)
payload = {'grant_type': 'client_credentials'}
headers = {'Authorization': 'Basic '+encoded_token}
auth_url = "https://api.twitter.com/oauth2/token"
r = requests.post(auth_url, payload, headers=headers)
bearer_token = r.json()['access_token']
print "Bearer token:"
print bearer_token
|
<commit_before><commit_msg>Add utility to generate bearer keys for twitter<commit_after>import requests
import base64
import sys
if len(sys.argv) < 3:
print "Usage: twitter_application_auth.py <consumer key> <consumer secret>"
sys.exit(1)
consumer_key = sys.argv[1]
consumer_secret = sys.argv[2]
token = consumer_key+":"+consumer_secret
encoded_token = base64.b64encode(token)
payload = {'grant_type': 'client_credentials'}
headers = {'Authorization': 'Basic '+encoded_token}
auth_url = "https://api.twitter.com/oauth2/token"
r = requests.post(auth_url, payload, headers=headers)
bearer_token = r.json()['access_token']
print "Bearer token:"
print bearer_token
|
|
75aec1cbc64c667b0528cfc66ed087d7ea84db20
|
bin/process_coca_ngrams.py
|
bin/process_coca_ngrams.py
|
from data import data, process_common
UNIGRAM_REFERENCE = 'data/g1m_1gram.txt'
SRCS_PATTERN = 'data/corpus/coca/w%s_.txt'
OUTS_PATTERN = 'data/coca_%sgram.txt'
REFERENCE = {}
def load_reference() -> None:
with data.open_project_path(UNIGRAM_REFERENCE, mode='r') as src:
for line in src:
word, score = line.split()
score = int(score)
REFERENCE[word] = score
def sort_key(x) -> tuple:
count, words = x
return count, REFERENCE[words[0]]
def aggregate(i: int) -> None:
results = []
print('reading', SRCS_PATTERN % i)
with data.open_project_path(
SRCS_PATTERN % i, mode='r', errors='ignore') as src:
for line in src:
count, *words = line.split()
count = int(count)
if not all(word in REFERENCE for word in words):
continue
if any(process_common.score(word, 100, 0) < 100 for word in words):
continue
results.append((count, words))
total = len(results)
print('writing', OUTS_PATTERN % i)
with data.open_project_path(OUTS_PATTERN % i, mode='w') as out:
for i, (count, words) in enumerate(
sorted(results, key=sort_key, reverse=True)):
pos = total - i
score = (pos + count) // 2 # Average of position & score.
out.write('%s\t%s\n' % (' '.join(words), score))
def main() -> None:
load_reference()
for i in range(2, 5+1):
aggregate(i)
main()
|
Add script for processing COCA ngrams.
|
Add script for processing COCA ngrams.
|
Python
|
mit
|
PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge
|
Add script for processing COCA ngrams.
|
from data import data, process_common
UNIGRAM_REFERENCE = 'data/g1m_1gram.txt'
SRCS_PATTERN = 'data/corpus/coca/w%s_.txt'
OUTS_PATTERN = 'data/coca_%sgram.txt'
REFERENCE = {}
def load_reference() -> None:
with data.open_project_path(UNIGRAM_REFERENCE, mode='r') as src:
for line in src:
word, score = line.split()
score = int(score)
REFERENCE[word] = score
def sort_key(x) -> tuple:
count, words = x
return count, REFERENCE[words[0]]
def aggregate(i: int) -> None:
results = []
print('reading', SRCS_PATTERN % i)
with data.open_project_path(
SRCS_PATTERN % i, mode='r', errors='ignore') as src:
for line in src:
count, *words = line.split()
count = int(count)
if not all(word in REFERENCE for word in words):
continue
if any(process_common.score(word, 100, 0) < 100 for word in words):
continue
results.append((count, words))
total = len(results)
print('writing', OUTS_PATTERN % i)
with data.open_project_path(OUTS_PATTERN % i, mode='w') as out:
for i, (count, words) in enumerate(
sorted(results, key=sort_key, reverse=True)):
pos = total - i
score = (pos + count) // 2 # Average of position & score.
out.write('%s\t%s\n' % (' '.join(words), score))
def main() -> None:
load_reference()
for i in range(2, 5+1):
aggregate(i)
main()
|
<commit_before><commit_msg>Add script for processing COCA ngrams.<commit_after>
|
from data import data, process_common
UNIGRAM_REFERENCE = 'data/g1m_1gram.txt'
SRCS_PATTERN = 'data/corpus/coca/w%s_.txt'
OUTS_PATTERN = 'data/coca_%sgram.txt'
REFERENCE = {}
def load_reference() -> None:
with data.open_project_path(UNIGRAM_REFERENCE, mode='r') as src:
for line in src:
word, score = line.split()
score = int(score)
REFERENCE[word] = score
def sort_key(x) -> tuple:
count, words = x
return count, REFERENCE[words[0]]
def aggregate(i: int) -> None:
results = []
print('reading', SRCS_PATTERN % i)
with data.open_project_path(
SRCS_PATTERN % i, mode='r', errors='ignore') as src:
for line in src:
count, *words = line.split()
count = int(count)
if not all(word in REFERENCE for word in words):
continue
if any(process_common.score(word, 100, 0) < 100 for word in words):
continue
results.append((count, words))
total = len(results)
print('writing', OUTS_PATTERN % i)
with data.open_project_path(OUTS_PATTERN % i, mode='w') as out:
for i, (count, words) in enumerate(
sorted(results, key=sort_key, reverse=True)):
pos = total - i
score = (pos + count) // 2 # Average of position & score.
out.write('%s\t%s\n' % (' '.join(words), score))
def main() -> None:
load_reference()
for i in range(2, 5+1):
aggregate(i)
main()
|
Add script for processing COCA ngrams.from data import data, process_common
UNIGRAM_REFERENCE = 'data/g1m_1gram.txt'
SRCS_PATTERN = 'data/corpus/coca/w%s_.txt'
OUTS_PATTERN = 'data/coca_%sgram.txt'
REFERENCE = {}
def load_reference() -> None:
with data.open_project_path(UNIGRAM_REFERENCE, mode='r') as src:
for line in src:
word, score = line.split()
score = int(score)
REFERENCE[word] = score
def sort_key(x) -> tuple:
count, words = x
return count, REFERENCE[words[0]]
def aggregate(i: int) -> None:
results = []
print('reading', SRCS_PATTERN % i)
with data.open_project_path(
SRCS_PATTERN % i, mode='r', errors='ignore') as src:
for line in src:
count, *words = line.split()
count = int(count)
if not all(word in REFERENCE for word in words):
continue
if any(process_common.score(word, 100, 0) < 100 for word in words):
continue
results.append((count, words))
total = len(results)
print('writing', OUTS_PATTERN % i)
with data.open_project_path(OUTS_PATTERN % i, mode='w') as out:
for i, (count, words) in enumerate(
sorted(results, key=sort_key, reverse=True)):
pos = total - i
score = (pos + count) // 2 # Average of position & score.
out.write('%s\t%s\n' % (' '.join(words), score))
def main() -> None:
load_reference()
for i in range(2, 5+1):
aggregate(i)
main()
|
<commit_before><commit_msg>Add script for processing COCA ngrams.<commit_after>from data import data, process_common
UNIGRAM_REFERENCE = 'data/g1m_1gram.txt'
SRCS_PATTERN = 'data/corpus/coca/w%s_.txt'
OUTS_PATTERN = 'data/coca_%sgram.txt'
REFERENCE = {}
def load_reference() -> None:
with data.open_project_path(UNIGRAM_REFERENCE, mode='r') as src:
for line in src:
word, score = line.split()
score = int(score)
REFERENCE[word] = score
def sort_key(x) -> tuple:
count, words = x
return count, REFERENCE[words[0]]
def aggregate(i: int) -> None:
results = []
print('reading', SRCS_PATTERN % i)
with data.open_project_path(
SRCS_PATTERN % i, mode='r', errors='ignore') as src:
for line in src:
count, *words = line.split()
count = int(count)
if not all(word in REFERENCE for word in words):
continue
if any(process_common.score(word, 100, 0) < 100 for word in words):
continue
results.append((count, words))
total = len(results)
print('writing', OUTS_PATTERN % i)
with data.open_project_path(OUTS_PATTERN % i, mode='w') as out:
for i, (count, words) in enumerate(
sorted(results, key=sort_key, reverse=True)):
pos = total - i
score = (pos + count) // 2 # Average of position & score.
out.write('%s\t%s\n' % (' '.join(words), score))
def main() -> None:
load_reference()
for i in range(2, 5+1):
aggregate(i)
main()
|
|
f7be5699265e123866f2e43e3e2920b39495dd80
|
tests/test_entities.py
|
tests/test_entities.py
|
from __future__ import unicode_literals
from ftfy import fix_text, fix_text_segment
from nose.tools import eq_
def test_entities():
example = '&\n<html>\n&'
eq_(fix_text(example), '&\n<html>\n&')
eq_(fix_text_segment(example), '&\n<html>\n&')
eq_(fix_text(example, fix_entities=True), '&\n<html>\n&')
eq_(fix_text_segment(example, fix_entities=True), '&\n<html>\n&')
eq_(fix_text(example, fix_entities=False), '&\n<html>\n&')
eq_(fix_text_segment(example, fix_entities=False), '&\n<html>\n&')
eq_(fix_text_segment('<>', fix_entities=False), '<>')
eq_(fix_text_segment('<>', fix_entities=True), '<>')
eq_(fix_text_segment('<>'), '<>')
|
Add tests for the fix_entities parameter
|
Add tests for the fix_entities parameter
|
Python
|
mit
|
LuminosoInsight/python-ftfy
|
Add tests for the fix_entities parameter
|
from __future__ import unicode_literals
from ftfy import fix_text, fix_text_segment
from nose.tools import eq_
def test_entities():
example = '&\n<html>\n&'
eq_(fix_text(example), '&\n<html>\n&')
eq_(fix_text_segment(example), '&\n<html>\n&')
eq_(fix_text(example, fix_entities=True), '&\n<html>\n&')
eq_(fix_text_segment(example, fix_entities=True), '&\n<html>\n&')
eq_(fix_text(example, fix_entities=False), '&\n<html>\n&')
eq_(fix_text_segment(example, fix_entities=False), '&\n<html>\n&')
eq_(fix_text_segment('<>', fix_entities=False), '<>')
eq_(fix_text_segment('<>', fix_entities=True), '<>')
eq_(fix_text_segment('<>'), '<>')
|
<commit_before><commit_msg>Add tests for the fix_entities parameter<commit_after>
|
from __future__ import unicode_literals
from ftfy import fix_text, fix_text_segment
from nose.tools import eq_
def test_entities():
example = '&\n<html>\n&'
eq_(fix_text(example), '&\n<html>\n&')
eq_(fix_text_segment(example), '&\n<html>\n&')
eq_(fix_text(example, fix_entities=True), '&\n<html>\n&')
eq_(fix_text_segment(example, fix_entities=True), '&\n<html>\n&')
eq_(fix_text(example, fix_entities=False), '&\n<html>\n&')
eq_(fix_text_segment(example, fix_entities=False), '&\n<html>\n&')
eq_(fix_text_segment('<>', fix_entities=False), '<>')
eq_(fix_text_segment('<>', fix_entities=True), '<>')
eq_(fix_text_segment('<>'), '<>')
|
Add tests for the fix_entities parameterfrom __future__ import unicode_literals
from ftfy import fix_text, fix_text_segment
from nose.tools import eq_
def test_entities():
example = '&\n<html>\n&'
eq_(fix_text(example), '&\n<html>\n&')
eq_(fix_text_segment(example), '&\n<html>\n&')
eq_(fix_text(example, fix_entities=True), '&\n<html>\n&')
eq_(fix_text_segment(example, fix_entities=True), '&\n<html>\n&')
eq_(fix_text(example, fix_entities=False), '&\n<html>\n&')
eq_(fix_text_segment(example, fix_entities=False), '&\n<html>\n&')
eq_(fix_text_segment('<>', fix_entities=False), '<>')
eq_(fix_text_segment('<>', fix_entities=True), '<>')
eq_(fix_text_segment('<>'), '<>')
|
<commit_before><commit_msg>Add tests for the fix_entities parameter<commit_after>from __future__ import unicode_literals
from ftfy import fix_text, fix_text_segment
from nose.tools import eq_
def test_entities():
example = '&\n<html>\n&'
eq_(fix_text(example), '&\n<html>\n&')
eq_(fix_text_segment(example), '&\n<html>\n&')
eq_(fix_text(example, fix_entities=True), '&\n<html>\n&')
eq_(fix_text_segment(example, fix_entities=True), '&\n<html>\n&')
eq_(fix_text(example, fix_entities=False), '&\n<html>\n&')
eq_(fix_text_segment(example, fix_entities=False), '&\n<html>\n&')
eq_(fix_text_segment('<>', fix_entities=False), '<>')
eq_(fix_text_segment('<>', fix_entities=True), '<>')
eq_(fix_text_segment('<>'), '<>')
|
|
c9ce5f7eafcfc3d15c9ff3d7c72b44d0192fd452
|
tests/test_handlers.py
|
tests/test_handlers.py
|
from __future__ import print_function
import unittest
import teres
import teres.handlers
import logging
import os.path
import shutil
import StringIO
import tempfile
class LoggingHandlerSetUp(unittest.TestCase):
def setUp(self):
self.reporter = teres.Reporter.get_reporter()
self.logger = logging.getLogger("test.logger")
self.loghan_path = "/tmp/logging_handler_test.log"
self.loghan = logging.FileHandler(self.loghan_path,
mode='w')
self.loghan.setLevel(logging.DEBUG)
self.logger.addHandler(self.loghan)
self.handler = teres.handlers.LoggingHandler("logginghandler.test",
self.logger,
dest="/tmp/")
self.reporter.add_handler(self.handler)
def tearDown(self):
teres.Reporter.drop_reporter()
# shutil.rmtree(self.handler.logdir)
class LoggingHandlerTest(LoggingHandlerSetUp):
def test_log_ordinary_file_simple(self):
test = "test_log_ordinary_file"
text = "This is my log file."
src_file = "/tmp/test log file"
fd = open(src_file, "w")
fd.write(text)
fd.close()
self.reporter.send_file(src_file)
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, "test_log_file"))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
os.remove(src_file)
def test_log_stringio_file(self):
test = "test_log_stringio_file"
text = "This is my stringio file."
src_file = StringIO.StringIO(text)
self.reporter.send_file(src_file, logname=test)
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, test))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
def test_log_temp_file(self):
test = "test_log_temp_file"
text = "This is my temporary file."
src_file = tempfile.TemporaryFile()
src_file.write(text)
self.reporter.send_file(src_file, logname=test)
src_file.close()
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, test))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
|
Add basic tests for LoggingHandler.
|
Add basic tests for LoggingHandler.
|
Python
|
lgpl-2.1
|
tlamer/teres,tlamer/teres
|
Add basic tests for LoggingHandler.
|
from __future__ import print_function
import unittest
import teres
import teres.handlers
import logging
import os.path
import shutil
import StringIO
import tempfile
class LoggingHandlerSetUp(unittest.TestCase):
def setUp(self):
self.reporter = teres.Reporter.get_reporter()
self.logger = logging.getLogger("test.logger")
self.loghan_path = "/tmp/logging_handler_test.log"
self.loghan = logging.FileHandler(self.loghan_path,
mode='w')
self.loghan.setLevel(logging.DEBUG)
self.logger.addHandler(self.loghan)
self.handler = teres.handlers.LoggingHandler("logginghandler.test",
self.logger,
dest="/tmp/")
self.reporter.add_handler(self.handler)
def tearDown(self):
teres.Reporter.drop_reporter()
# shutil.rmtree(self.handler.logdir)
class LoggingHandlerTest(LoggingHandlerSetUp):
def test_log_ordinary_file_simple(self):
test = "test_log_ordinary_file"
text = "This is my log file."
src_file = "/tmp/test log file"
fd = open(src_file, "w")
fd.write(text)
fd.close()
self.reporter.send_file(src_file)
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, "test_log_file"))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
os.remove(src_file)
def test_log_stringio_file(self):
test = "test_log_stringio_file"
text = "This is my stringio file."
src_file = StringIO.StringIO(text)
self.reporter.send_file(src_file, logname=test)
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, test))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
def test_log_temp_file(self):
test = "test_log_temp_file"
text = "This is my temporary file."
src_file = tempfile.TemporaryFile()
src_file.write(text)
self.reporter.send_file(src_file, logname=test)
src_file.close()
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, test))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
|
<commit_before><commit_msg>Add basic tests for LoggingHandler.<commit_after>
|
from __future__ import print_function
import unittest
import teres
import teres.handlers
import logging
import os.path
import shutil
import StringIO
import tempfile
class LoggingHandlerSetUp(unittest.TestCase):
def setUp(self):
self.reporter = teres.Reporter.get_reporter()
self.logger = logging.getLogger("test.logger")
self.loghan_path = "/tmp/logging_handler_test.log"
self.loghan = logging.FileHandler(self.loghan_path,
mode='w')
self.loghan.setLevel(logging.DEBUG)
self.logger.addHandler(self.loghan)
self.handler = teres.handlers.LoggingHandler("logginghandler.test",
self.logger,
dest="/tmp/")
self.reporter.add_handler(self.handler)
def tearDown(self):
teres.Reporter.drop_reporter()
# shutil.rmtree(self.handler.logdir)
class LoggingHandlerTest(LoggingHandlerSetUp):
def test_log_ordinary_file_simple(self):
test = "test_log_ordinary_file"
text = "This is my log file."
src_file = "/tmp/test log file"
fd = open(src_file, "w")
fd.write(text)
fd.close()
self.reporter.send_file(src_file)
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, "test_log_file"))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
os.remove(src_file)
def test_log_stringio_file(self):
test = "test_log_stringio_file"
text = "This is my stringio file."
src_file = StringIO.StringIO(text)
self.reporter.send_file(src_file, logname=test)
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, test))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
def test_log_temp_file(self):
test = "test_log_temp_file"
text = "This is my temporary file."
src_file = tempfile.TemporaryFile()
src_file.write(text)
self.reporter.send_file(src_file, logname=test)
src_file.close()
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, test))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
|
Add basic tests for LoggingHandler.from __future__ import print_function
import unittest
import teres
import teres.handlers
import logging
import os.path
import shutil
import StringIO
import tempfile
class LoggingHandlerSetUp(unittest.TestCase):
def setUp(self):
self.reporter = teres.Reporter.get_reporter()
self.logger = logging.getLogger("test.logger")
self.loghan_path = "/tmp/logging_handler_test.log"
self.loghan = logging.FileHandler(self.loghan_path,
mode='w')
self.loghan.setLevel(logging.DEBUG)
self.logger.addHandler(self.loghan)
self.handler = teres.handlers.LoggingHandler("logginghandler.test",
self.logger,
dest="/tmp/")
self.reporter.add_handler(self.handler)
def tearDown(self):
teres.Reporter.drop_reporter()
# shutil.rmtree(self.handler.logdir)
class LoggingHandlerTest(LoggingHandlerSetUp):
def test_log_ordinary_file_simple(self):
test = "test_log_ordinary_file"
text = "This is my log file."
src_file = "/tmp/test log file"
fd = open(src_file, "w")
fd.write(text)
fd.close()
self.reporter.send_file(src_file)
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, "test_log_file"))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
os.remove(src_file)
def test_log_stringio_file(self):
test = "test_log_stringio_file"
text = "This is my stringio file."
src_file = StringIO.StringIO(text)
self.reporter.send_file(src_file, logname=test)
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, test))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
def test_log_temp_file(self):
test = "test_log_temp_file"
text = "This is my temporary file."
src_file = tempfile.TemporaryFile()
src_file.write(text)
self.reporter.send_file(src_file, logname=test)
src_file.close()
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, test))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
|
<commit_before><commit_msg>Add basic tests for LoggingHandler.<commit_after>from __future__ import print_function
import unittest
import teres
import teres.handlers
import logging
import os.path
import shutil
import StringIO
import tempfile
class LoggingHandlerSetUp(unittest.TestCase):
def setUp(self):
self.reporter = teres.Reporter.get_reporter()
self.logger = logging.getLogger("test.logger")
self.loghan_path = "/tmp/logging_handler_test.log"
self.loghan = logging.FileHandler(self.loghan_path,
mode='w')
self.loghan.setLevel(logging.DEBUG)
self.logger.addHandler(self.loghan)
self.handler = teres.handlers.LoggingHandler("logginghandler.test",
self.logger,
dest="/tmp/")
self.reporter.add_handler(self.handler)
def tearDown(self):
teres.Reporter.drop_reporter()
# shutil.rmtree(self.handler.logdir)
class LoggingHandlerTest(LoggingHandlerSetUp):
def test_log_ordinary_file_simple(self):
test = "test_log_ordinary_file"
text = "This is my log file."
src_file = "/tmp/test log file"
fd = open(src_file, "w")
fd.write(text)
fd.close()
self.reporter.send_file(src_file)
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, "test_log_file"))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
os.remove(src_file)
def test_log_stringio_file(self):
test = "test_log_stringio_file"
text = "This is my stringio file."
src_file = StringIO.StringIO(text)
self.reporter.send_file(src_file, logname=test)
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, test))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
def test_log_temp_file(self):
test = "test_log_temp_file"
text = "This is my temporary file."
src_file = tempfile.TemporaryFile()
src_file.write(text)
self.reporter.send_file(src_file, logname=test)
src_file.close()
# Check the result.
self.assertTrue(os.path.isdir(self.handler.logdir))
tgt = open("{}/{}".format(self.handler.logdir, test))
content = tgt.read()
tgt.close()
self.assertEqual(content, text)
|
|
a209ee1f7ab3be06d03e0eaa926dca5e4811ecc6
|
lesion/tests/test_lifio.py
|
lesion/tests/test_lifio.py
|
import os
from lesion import lifio
from numpy.testing.decorators import skipif
from numpy.testing import assert_equal, assert_allclose
currdir = os.path.abspath(os.path.dirname(__file__))
test_lif = os.path.join(currdir, 'mouse-kidney.lif')
test_lif_unavailable = not os.path.isfile(test_lif)
@skipif(test_lif_unavailable)
def test_metadata_size():
assert_equal(lifio.lif_metadata_string_size(test_lif), 107192)
@skipif(test_lif_unavailable)
def test_metadata():
names, sizes, reso = lifio.metadata(test_lif)
assert_equal(names, ['Series016', 'Series019'])
assert_equal(sizes, [(1, 25, 512, 512, 4), (1, 46, 512, 512, 4)])
assert_allclose(reso, [(0.9999, 1.5137, 1.5137),
(0.1395, 0.2539, 0.2539)], atol=1e-4)
def test_done():
lifio.done()
assert lifio.VM_KILLED
|
Add functions to test LIF IO
|
Add functions to test LIF IO
|
Python
|
bsd-3-clause
|
jni/lesion
|
Add functions to test LIF IO
|
import os
from lesion import lifio
from numpy.testing.decorators import skipif
from numpy.testing import assert_equal, assert_allclose
currdir = os.path.abspath(os.path.dirname(__file__))
test_lif = os.path.join(currdir, 'mouse-kidney.lif')
test_lif_unavailable = not os.path.isfile(test_lif)
@skipif(test_lif_unavailable)
def test_metadata_size():
assert_equal(lifio.lif_metadata_string_size(test_lif), 107192)
@skipif(test_lif_unavailable)
def test_metadata():
names, sizes, reso = lifio.metadata(test_lif)
assert_equal(names, ['Series016', 'Series019'])
assert_equal(sizes, [(1, 25, 512, 512, 4), (1, 46, 512, 512, 4)])
assert_allclose(reso, [(0.9999, 1.5137, 1.5137),
(0.1395, 0.2539, 0.2539)], atol=1e-4)
def test_done():
lifio.done()
assert lifio.VM_KILLED
|
<commit_before><commit_msg>Add functions to test LIF IO<commit_after>
|
import os
from lesion import lifio
from numpy.testing.decorators import skipif
from numpy.testing import assert_equal, assert_allclose
currdir = os.path.abspath(os.path.dirname(__file__))
test_lif = os.path.join(currdir, 'mouse-kidney.lif')
test_lif_unavailable = not os.path.isfile(test_lif)
@skipif(test_lif_unavailable)
def test_metadata_size():
assert_equal(lifio.lif_metadata_string_size(test_lif), 107192)
@skipif(test_lif_unavailable)
def test_metadata():
names, sizes, reso = lifio.metadata(test_lif)
assert_equal(names, ['Series016', 'Series019'])
assert_equal(sizes, [(1, 25, 512, 512, 4), (1, 46, 512, 512, 4)])
assert_allclose(reso, [(0.9999, 1.5137, 1.5137),
(0.1395, 0.2539, 0.2539)], atol=1e-4)
def test_done():
lifio.done()
assert lifio.VM_KILLED
|
Add functions to test LIF IOimport os
from lesion import lifio
from numpy.testing.decorators import skipif
from numpy.testing import assert_equal, assert_allclose
currdir = os.path.abspath(os.path.dirname(__file__))
test_lif = os.path.join(currdir, 'mouse-kidney.lif')
test_lif_unavailable = not os.path.isfile(test_lif)
@skipif(test_lif_unavailable)
def test_metadata_size():
assert_equal(lifio.lif_metadata_string_size(test_lif), 107192)
@skipif(test_lif_unavailable)
def test_metadata():
names, sizes, reso = lifio.metadata(test_lif)
assert_equal(names, ['Series016', 'Series019'])
assert_equal(sizes, [(1, 25, 512, 512, 4), (1, 46, 512, 512, 4)])
assert_allclose(reso, [(0.9999, 1.5137, 1.5137),
(0.1395, 0.2539, 0.2539)], atol=1e-4)
def test_done():
lifio.done()
assert lifio.VM_KILLED
|
<commit_before><commit_msg>Add functions to test LIF IO<commit_after>import os
from lesion import lifio
from numpy.testing.decorators import skipif
from numpy.testing import assert_equal, assert_allclose
currdir = os.path.abspath(os.path.dirname(__file__))
test_lif = os.path.join(currdir, 'mouse-kidney.lif')
test_lif_unavailable = not os.path.isfile(test_lif)
@skipif(test_lif_unavailable)
def test_metadata_size():
assert_equal(lifio.lif_metadata_string_size(test_lif), 107192)
@skipif(test_lif_unavailable)
def test_metadata():
names, sizes, reso = lifio.metadata(test_lif)
assert_equal(names, ['Series016', 'Series019'])
assert_equal(sizes, [(1, 25, 512, 512, 4), (1, 46, 512, 512, 4)])
assert_allclose(reso, [(0.9999, 1.5137, 1.5137),
(0.1395, 0.2539, 0.2539)], atol=1e-4)
def test_done():
lifio.done()
assert lifio.VM_KILLED
|
|
4b193e59a53717c80c6835374048ba71c5bfa208
|
tests/test_cli_common.py
|
tests/test_cli_common.py
|
from __future__ import absolute_import
import unittest
import os
from mciutil.cli.common import get_config_filename
class CliCommonTests(unittest.TestCase):
def test_get_config_filename(self):
"""
check that package default config exists, otherwise fail
this will show up on remote build when package is installed
rather than pointing to development environment
"""
filename = get_config_filename('mideu.yml')
self.assertTrue(os.path.exists(filename))
if __name__ == '__main__':
unittest.main()
|
Check that mideu package config exists (assumes that home and current dir do not have config present)
|
Check that mideu package config exists (assumes that home and current dir do not have config present)
|
Python
|
bsd-3-clause
|
adelosa/mciutil
|
Check that mideu package config exists (assumes that home and current dir do not have config present)
|
from __future__ import absolute_import
import unittest
import os
from mciutil.cli.common import get_config_filename
class CliCommonTests(unittest.TestCase):
def test_get_config_filename(self):
"""
check that package default config exists, otherwise fail
this will show up on remote build when package is installed
rather than pointing to development environment
"""
filename = get_config_filename('mideu.yml')
self.assertTrue(os.path.exists(filename))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Check that mideu package config exists (assumes that home and current dir do not have config present)<commit_after>
|
from __future__ import absolute_import
import unittest
import os
from mciutil.cli.common import get_config_filename
class CliCommonTests(unittest.TestCase):
def test_get_config_filename(self):
"""
check that package default config exists, otherwise fail
this will show up on remote build when package is installed
rather than pointing to development environment
"""
filename = get_config_filename('mideu.yml')
self.assertTrue(os.path.exists(filename))
if __name__ == '__main__':
unittest.main()
|
Check that mideu package config exists (assumes that home and current dir do not have config present)from __future__ import absolute_import
import unittest
import os
from mciutil.cli.common import get_config_filename
class CliCommonTests(unittest.TestCase):
def test_get_config_filename(self):
"""
check that package default config exists, otherwise fail
this will show up on remote build when package is installed
rather than pointing to development environment
"""
filename = get_config_filename('mideu.yml')
self.assertTrue(os.path.exists(filename))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Check that mideu package config exists (assumes that home and current dir do not have config present)<commit_after>from __future__ import absolute_import
import unittest
import os
from mciutil.cli.common import get_config_filename
class CliCommonTests(unittest.TestCase):
def test_get_config_filename(self):
"""
check that package default config exists, otherwise fail
this will show up on remote build when package is installed
rather than pointing to development environment
"""
filename = get_config_filename('mideu.yml')
self.assertTrue(os.path.exists(filename))
if __name__ == '__main__':
unittest.main()
|
|
9d02b311504c8babf876ce8c45262b7394650d14
|
tests/test_symmetrize.py
|
tests/test_symmetrize.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Tests for the 'symmetrize' method.
"""
import copy
import pytest
import tbmodels
@pytest.fixture
def input_model(sample):
return tbmodels.io.load(sample("InAs_nosym.hdf5"))
@pytest.fixture
def symmetries(sample):
return tbmodels.io.load(sample("InAs_symmetries.hdf5"))
def test_symmetrize(
models_close,
input_model, # pylint: disable=redefined-outer-name
symmetries, # pylint: disable=redefined-outer-name
sample,
):
"""
Test the 'symmetrize' method.
"""
model_res = input_model
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(sym.symmetries, full_group=sym.full_group)
else:
model_res = model_res.symmetrize([sym], full_group=False)
model_reference = tbmodels.io.load(sample("InAs_sym_reference.hdf5"))
models_close(model_res, model_reference)
def test_position_tolerance(
models_close,
input_model, # pylint: disable=redefined-outer-name
symmetries, # pylint: disable=redefined-outer-name
sample,
):
"""
Test the 'position_tolerance' argument in the 'symmetrize' method.
"""
model_in = copy.deepcopy(input_model)
model_reference = tbmodels.io.load(sample("InAs_sym_reference.hdf5"))
model_in.pos[0] += 0.01
model_reference.pos[0] += 0.01
# First run without 'position_tolerance' argument - this should raise
with pytest.raises(tbmodels.exceptions.TbmodelsException):
model_res = model_in
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(
sym.symmetries, full_group=sym.full_group
)
else:
model_res = model_res.symmetrize([sym], full_group=False)
# Adding the 'position_tolerance' argument suppresses the error
model_res = model_in
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(
sym.symmetries, full_group=sym.full_group, position_tolerance=0.05
)
else:
model_res = model_res.symmetrize(
[sym], full_group=False, position_tolerance=0.05
)
models_close(model_res, model_reference)
|
Add test for 'position_tolerance' when symmetrizing.
|
Add test for 'position_tolerance' when symmetrizing.
|
Python
|
apache-2.0
|
Z2PackDev/TBmodels,Z2PackDev/TBmodels
|
Add test for 'position_tolerance' when symmetrizing.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Tests for the 'symmetrize' method.
"""
import copy
import pytest
import tbmodels
@pytest.fixture
def input_model(sample):
return tbmodels.io.load(sample("InAs_nosym.hdf5"))
@pytest.fixture
def symmetries(sample):
return tbmodels.io.load(sample("InAs_symmetries.hdf5"))
def test_symmetrize(
models_close,
input_model, # pylint: disable=redefined-outer-name
symmetries, # pylint: disable=redefined-outer-name
sample,
):
"""
Test the 'symmetrize' method.
"""
model_res = input_model
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(sym.symmetries, full_group=sym.full_group)
else:
model_res = model_res.symmetrize([sym], full_group=False)
model_reference = tbmodels.io.load(sample("InAs_sym_reference.hdf5"))
models_close(model_res, model_reference)
def test_position_tolerance(
models_close,
input_model, # pylint: disable=redefined-outer-name
symmetries, # pylint: disable=redefined-outer-name
sample,
):
"""
Test the 'position_tolerance' argument in the 'symmetrize' method.
"""
model_in = copy.deepcopy(input_model)
model_reference = tbmodels.io.load(sample("InAs_sym_reference.hdf5"))
model_in.pos[0] += 0.01
model_reference.pos[0] += 0.01
# First run without 'position_tolerance' argument - this should raise
with pytest.raises(tbmodels.exceptions.TbmodelsException):
model_res = model_in
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(
sym.symmetries, full_group=sym.full_group
)
else:
model_res = model_res.symmetrize([sym], full_group=False)
# Adding the 'position_tolerance' argument suppresses the error
model_res = model_in
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(
sym.symmetries, full_group=sym.full_group, position_tolerance=0.05
)
else:
model_res = model_res.symmetrize(
[sym], full_group=False, position_tolerance=0.05
)
models_close(model_res, model_reference)
|
<commit_before><commit_msg>Add test for 'position_tolerance' when symmetrizing.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Tests for the 'symmetrize' method.
"""
import copy
import pytest
import tbmodels
@pytest.fixture
def input_model(sample):
return tbmodels.io.load(sample("InAs_nosym.hdf5"))
@pytest.fixture
def symmetries(sample):
return tbmodels.io.load(sample("InAs_symmetries.hdf5"))
def test_symmetrize(
models_close,
input_model, # pylint: disable=redefined-outer-name
symmetries, # pylint: disable=redefined-outer-name
sample,
):
"""
Test the 'symmetrize' method.
"""
model_res = input_model
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(sym.symmetries, full_group=sym.full_group)
else:
model_res = model_res.symmetrize([sym], full_group=False)
model_reference = tbmodels.io.load(sample("InAs_sym_reference.hdf5"))
models_close(model_res, model_reference)
def test_position_tolerance(
models_close,
input_model, # pylint: disable=redefined-outer-name
symmetries, # pylint: disable=redefined-outer-name
sample,
):
"""
Test the 'position_tolerance' argument in the 'symmetrize' method.
"""
model_in = copy.deepcopy(input_model)
model_reference = tbmodels.io.load(sample("InAs_sym_reference.hdf5"))
model_in.pos[0] += 0.01
model_reference.pos[0] += 0.01
# First run without 'position_tolerance' argument - this should raise
with pytest.raises(tbmodels.exceptions.TbmodelsException):
model_res = model_in
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(
sym.symmetries, full_group=sym.full_group
)
else:
model_res = model_res.symmetrize([sym], full_group=False)
# Adding the 'position_tolerance' argument suppresses the error
model_res = model_in
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(
sym.symmetries, full_group=sym.full_group, position_tolerance=0.05
)
else:
model_res = model_res.symmetrize(
[sym], full_group=False, position_tolerance=0.05
)
models_close(model_res, model_reference)
|
Add test for 'position_tolerance' when symmetrizing.#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Tests for the 'symmetrize' method.
"""
import copy
import pytest
import tbmodels
@pytest.fixture
def input_model(sample):
return tbmodels.io.load(sample("InAs_nosym.hdf5"))
@pytest.fixture
def symmetries(sample):
return tbmodels.io.load(sample("InAs_symmetries.hdf5"))
def test_symmetrize(
models_close,
input_model, # pylint: disable=redefined-outer-name
symmetries, # pylint: disable=redefined-outer-name
sample,
):
"""
Test the 'symmetrize' method.
"""
model_res = input_model
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(sym.symmetries, full_group=sym.full_group)
else:
model_res = model_res.symmetrize([sym], full_group=False)
model_reference = tbmodels.io.load(sample("InAs_sym_reference.hdf5"))
models_close(model_res, model_reference)
def test_position_tolerance(
models_close,
input_model, # pylint: disable=redefined-outer-name
symmetries, # pylint: disable=redefined-outer-name
sample,
):
"""
Test the 'position_tolerance' argument in the 'symmetrize' method.
"""
model_in = copy.deepcopy(input_model)
model_reference = tbmodels.io.load(sample("InAs_sym_reference.hdf5"))
model_in.pos[0] += 0.01
model_reference.pos[0] += 0.01
# First run without 'position_tolerance' argument - this should raise
with pytest.raises(tbmodels.exceptions.TbmodelsException):
model_res = model_in
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(
sym.symmetries, full_group=sym.full_group
)
else:
model_res = model_res.symmetrize([sym], full_group=False)
# Adding the 'position_tolerance' argument suppresses the error
model_res = model_in
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(
sym.symmetries, full_group=sym.full_group, position_tolerance=0.05
)
else:
model_res = model_res.symmetrize(
[sym], full_group=False, position_tolerance=0.05
)
models_close(model_res, model_reference)
|
<commit_before><commit_msg>Add test for 'position_tolerance' when symmetrizing.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Tests for the 'symmetrize' method.
"""
import copy
import pytest
import tbmodels
@pytest.fixture
def input_model(sample):
return tbmodels.io.load(sample("InAs_nosym.hdf5"))
@pytest.fixture
def symmetries(sample):
return tbmodels.io.load(sample("InAs_symmetries.hdf5"))
def test_symmetrize(
models_close,
input_model, # pylint: disable=redefined-outer-name
symmetries, # pylint: disable=redefined-outer-name
sample,
):
"""
Test the 'symmetrize' method.
"""
model_res = input_model
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(sym.symmetries, full_group=sym.full_group)
else:
model_res = model_res.symmetrize([sym], full_group=False)
model_reference = tbmodels.io.load(sample("InAs_sym_reference.hdf5"))
models_close(model_res, model_reference)
def test_position_tolerance(
models_close,
input_model, # pylint: disable=redefined-outer-name
symmetries, # pylint: disable=redefined-outer-name
sample,
):
"""
Test the 'position_tolerance' argument in the 'symmetrize' method.
"""
model_in = copy.deepcopy(input_model)
model_reference = tbmodels.io.load(sample("InAs_sym_reference.hdf5"))
model_in.pos[0] += 0.01
model_reference.pos[0] += 0.01
# First run without 'position_tolerance' argument - this should raise
with pytest.raises(tbmodels.exceptions.TbmodelsException):
model_res = model_in
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(
sym.symmetries, full_group=sym.full_group
)
else:
model_res = model_res.symmetrize([sym], full_group=False)
# Adding the 'position_tolerance' argument suppresses the error
model_res = model_in
for sym in symmetries:
if hasattr(sym, "full_group"):
model_res = model_res.symmetrize(
sym.symmetries, full_group=sym.full_group, position_tolerance=0.05
)
else:
model_res = model_res.symmetrize(
[sym], full_group=False, position_tolerance=0.05
)
models_close(model_res, model_reference)
|
|
cd6efedc59924cef44dbddaa304483e09fe62d4e
|
testci/test_featureproducer.py
|
testci/test_featureproducer.py
|
import collections
import datetime
import pytest
import numpy as np
from PIL import Image
#from pelops.datasets.chip import ChipDataset, Chip
from pelops.datasets.featuredataset import FeatureDataset
from pelops.features.feature_producer import FeatureProducer
@pytest.fixture
def chip_producer():
Chip = collections.namedtuple("Chip", ["filepath", "car_id", "cam_id", "time", "img_data", "misc"])
DATA = [[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
[[255, 255, 255],
[ 0, 0, 0],
[255, 255, 255]],
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]]]
CHIPS = (
# filepath, car_id, cam_id, time, img_data, misc
("car1_cam1.png", 1, 1, datetime.datetime(2016, 10, 1, 0, 1, 2, microsecond=100), np.array(DATA, dtype=np.uint8), {}),
)
chip_producer = {"chips": {}}
for filepath, car_id, cam_id, time, img_data, misc in CHIPS:
print(img_data.shape)
chip = Chip(filepath, car_id, cam_id, time, img_data, misc)
chip_producer["chips"][filepath] = chip
return chip_producer
@pytest.fixture
def monkey_feature_producer(chip_producer):
# Monkey patch the __init__() function so that it will succeed
def new_init(self, chip_producer):
self.chip_producer = chip_producer
FeatureProducer.__init__ = new_init
return (FeatureProducer(chip_producer))
def test_set_variables_raises():
with pytest.raises(NotImplementedError):
fp = FeatureProducer(None)
def test_produce_features_raises(monkey_feature_producer):
with pytest.raises(NotImplementedError):
monkey_feature_producer.produce_features(None)
def test_get_image_img_data(monkey_feature_producer, chip_producer):
for key, chip in chip_producer["chips"].items():
assert monkey_feature_producer.get_image(chip)
|
Add first tests for `FeatureProducer()`
|
Add first tests for `FeatureProducer()`
These tests do not test all code paths, but it is a start. We still need
to add a test file to read.
|
Python
|
apache-2.0
|
dave-lab41/pelops,dave-lab41/pelops,Lab41/pelops,Lab41/pelops,d-grossman/pelops,d-grossman/pelops
|
Add first tests for `FeatureProducer()`
These tests do not test all code paths, but it is a start. We still need
to add a test file to read.
|
import collections
import datetime
import pytest
import numpy as np
from PIL import Image
#from pelops.datasets.chip import ChipDataset, Chip
from pelops.datasets.featuredataset import FeatureDataset
from pelops.features.feature_producer import FeatureProducer
@pytest.fixture
def chip_producer():
Chip = collections.namedtuple("Chip", ["filepath", "car_id", "cam_id", "time", "img_data", "misc"])
DATA = [[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
[[255, 255, 255],
[ 0, 0, 0],
[255, 255, 255]],
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]]]
CHIPS = (
# filepath, car_id, cam_id, time, img_data, misc
("car1_cam1.png", 1, 1, datetime.datetime(2016, 10, 1, 0, 1, 2, microsecond=100), np.array(DATA, dtype=np.uint8), {}),
)
chip_producer = {"chips": {}}
for filepath, car_id, cam_id, time, img_data, misc in CHIPS:
print(img_data.shape)
chip = Chip(filepath, car_id, cam_id, time, img_data, misc)
chip_producer["chips"][filepath] = chip
return chip_producer
@pytest.fixture
def monkey_feature_producer(chip_producer):
# Monkey patch the __init__() function so that it will succeed
def new_init(self, chip_producer):
self.chip_producer = chip_producer
FeatureProducer.__init__ = new_init
return (FeatureProducer(chip_producer))
def test_set_variables_raises():
with pytest.raises(NotImplementedError):
fp = FeatureProducer(None)
def test_produce_features_raises(monkey_feature_producer):
with pytest.raises(NotImplementedError):
monkey_feature_producer.produce_features(None)
def test_get_image_img_data(monkey_feature_producer, chip_producer):
for key, chip in chip_producer["chips"].items():
assert monkey_feature_producer.get_image(chip)
|
<commit_before><commit_msg>Add first tests for `FeatureProducer()`
These tests do not test all code paths, but it is a start. We still need
to add a test file to read.<commit_after>
|
import collections
import datetime
import pytest
import numpy as np
from PIL import Image
#from pelops.datasets.chip import ChipDataset, Chip
from pelops.datasets.featuredataset import FeatureDataset
from pelops.features.feature_producer import FeatureProducer
@pytest.fixture
def chip_producer():
Chip = collections.namedtuple("Chip", ["filepath", "car_id", "cam_id", "time", "img_data", "misc"])
DATA = [[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
[[255, 255, 255],
[ 0, 0, 0],
[255, 255, 255]],
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]]]
CHIPS = (
# filepath, car_id, cam_id, time, img_data, misc
("car1_cam1.png", 1, 1, datetime.datetime(2016, 10, 1, 0, 1, 2, microsecond=100), np.array(DATA, dtype=np.uint8), {}),
)
chip_producer = {"chips": {}}
for filepath, car_id, cam_id, time, img_data, misc in CHIPS:
print(img_data.shape)
chip = Chip(filepath, car_id, cam_id, time, img_data, misc)
chip_producer["chips"][filepath] = chip
return chip_producer
@pytest.fixture
def monkey_feature_producer(chip_producer):
# Monkey patch the __init__() function so that it will succeed
def new_init(self, chip_producer):
self.chip_producer = chip_producer
FeatureProducer.__init__ = new_init
return (FeatureProducer(chip_producer))
def test_set_variables_raises():
with pytest.raises(NotImplementedError):
fp = FeatureProducer(None)
def test_produce_features_raises(monkey_feature_producer):
with pytest.raises(NotImplementedError):
monkey_feature_producer.produce_features(None)
def test_get_image_img_data(monkey_feature_producer, chip_producer):
for key, chip in chip_producer["chips"].items():
assert monkey_feature_producer.get_image(chip)
|
Add first tests for `FeatureProducer()`
These tests do not test all code paths, but it is a start. We still need
to add a test file to read.import collections
import datetime
import pytest
import numpy as np
from PIL import Image
#from pelops.datasets.chip import ChipDataset, Chip
from pelops.datasets.featuredataset import FeatureDataset
from pelops.features.feature_producer import FeatureProducer
@pytest.fixture
def chip_producer():
Chip = collections.namedtuple("Chip", ["filepath", "car_id", "cam_id", "time", "img_data", "misc"])
DATA = [[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
[[255, 255, 255],
[ 0, 0, 0],
[255, 255, 255]],
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]]]
CHIPS = (
# filepath, car_id, cam_id, time, img_data, misc
("car1_cam1.png", 1, 1, datetime.datetime(2016, 10, 1, 0, 1, 2, microsecond=100), np.array(DATA, dtype=np.uint8), {}),
)
chip_producer = {"chips": {}}
for filepath, car_id, cam_id, time, img_data, misc in CHIPS:
print(img_data.shape)
chip = Chip(filepath, car_id, cam_id, time, img_data, misc)
chip_producer["chips"][filepath] = chip
return chip_producer
@pytest.fixture
def monkey_feature_producer(chip_producer):
# Monkey patch the __init__() function so that it will succeed
def new_init(self, chip_producer):
self.chip_producer = chip_producer
FeatureProducer.__init__ = new_init
return (FeatureProducer(chip_producer))
def test_set_variables_raises():
with pytest.raises(NotImplementedError):
fp = FeatureProducer(None)
def test_produce_features_raises(monkey_feature_producer):
with pytest.raises(NotImplementedError):
monkey_feature_producer.produce_features(None)
def test_get_image_img_data(monkey_feature_producer, chip_producer):
for key, chip in chip_producer["chips"].items():
assert monkey_feature_producer.get_image(chip)
|
<commit_before><commit_msg>Add first tests for `FeatureProducer()`
These tests do not test all code paths, but it is a start. We still need
to add a test file to read.<commit_after>import collections
import datetime
import pytest
import numpy as np
from PIL import Image
#from pelops.datasets.chip import ChipDataset, Chip
from pelops.datasets.featuredataset import FeatureDataset
from pelops.features.feature_producer import FeatureProducer
@pytest.fixture
def chip_producer():
Chip = collections.namedtuple("Chip", ["filepath", "car_id", "cam_id", "time", "img_data", "misc"])
DATA = [[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
[[255, 255, 255],
[ 0, 0, 0],
[255, 255, 255]],
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]]]
CHIPS = (
# filepath, car_id, cam_id, time, img_data, misc
("car1_cam1.png", 1, 1, datetime.datetime(2016, 10, 1, 0, 1, 2, microsecond=100), np.array(DATA, dtype=np.uint8), {}),
)
chip_producer = {"chips": {}}
for filepath, car_id, cam_id, time, img_data, misc in CHIPS:
print(img_data.shape)
chip = Chip(filepath, car_id, cam_id, time, img_data, misc)
chip_producer["chips"][filepath] = chip
return chip_producer
@pytest.fixture
def monkey_feature_producer(chip_producer):
# Monkey patch the __init__() function so that it will succeed
def new_init(self, chip_producer):
self.chip_producer = chip_producer
FeatureProducer.__init__ = new_init
return (FeatureProducer(chip_producer))
def test_set_variables_raises():
with pytest.raises(NotImplementedError):
fp = FeatureProducer(None)
def test_produce_features_raises(monkey_feature_producer):
with pytest.raises(NotImplementedError):
monkey_feature_producer.produce_features(None)
def test_get_image_img_data(monkey_feature_producer, chip_producer):
for key, chip in chip_producer["chips"].items():
assert monkey_feature_producer.get_image(chip)
|
|
ea1b55dcd3d174888b7d0214fb1fa780cb9a4ebe
|
chstrings/chstrings_test.py
|
chstrings/chstrings_test.py
|
import chstrings
import config
import unittest
class CHStringsTest(unittest.TestCase):
@classmethod
def add_smoke_test(cls, cfg):
def test(self):
# We just want to see if this will blow up
chstrings.get_localized_strings(cfg, cfg.lang_code)
name = 'test_' + cfg.lang_code + '_smoke_test'
setattr(cls, name, test)
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc)
CHStringsTest.add_smoke_test(cfg)
unittest.main()
|
Add a quick check that we can preprocess strings.
|
Add a quick check that we can preprocess strings.
|
Python
|
mit
|
guilherme-pg/citationhunt,eggpi/citationhunt,eggpi/citationhunt,eggpi/citationhunt,guilherme-pg/citationhunt,guilherme-pg/citationhunt,eggpi/citationhunt,guilherme-pg/citationhunt
|
Add a quick check that we can preprocess strings.
|
import chstrings
import config
import unittest
class CHStringsTest(unittest.TestCase):
@classmethod
def add_smoke_test(cls, cfg):
def test(self):
# We just want to see if this will blow up
chstrings.get_localized_strings(cfg, cfg.lang_code)
name = 'test_' + cfg.lang_code + '_smoke_test'
setattr(cls, name, test)
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc)
CHStringsTest.add_smoke_test(cfg)
unittest.main()
|
<commit_before><commit_msg>Add a quick check that we can preprocess strings.<commit_after>
|
import chstrings
import config
import unittest
class CHStringsTest(unittest.TestCase):
@classmethod
def add_smoke_test(cls, cfg):
def test(self):
# We just want to see if this will blow up
chstrings.get_localized_strings(cfg, cfg.lang_code)
name = 'test_' + cfg.lang_code + '_smoke_test'
setattr(cls, name, test)
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc)
CHStringsTest.add_smoke_test(cfg)
unittest.main()
|
Add a quick check that we can preprocess strings.import chstrings
import config
import unittest
class CHStringsTest(unittest.TestCase):
@classmethod
def add_smoke_test(cls, cfg):
def test(self):
# We just want to see if this will blow up
chstrings.get_localized_strings(cfg, cfg.lang_code)
name = 'test_' + cfg.lang_code + '_smoke_test'
setattr(cls, name, test)
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc)
CHStringsTest.add_smoke_test(cfg)
unittest.main()
|
<commit_before><commit_msg>Add a quick check that we can preprocess strings.<commit_after>import chstrings
import config
import unittest
class CHStringsTest(unittest.TestCase):
@classmethod
def add_smoke_test(cls, cfg):
def test(self):
# We just want to see if this will blow up
chstrings.get_localized_strings(cfg, cfg.lang_code)
name = 'test_' + cfg.lang_code + '_smoke_test'
setattr(cls, name, test)
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc)
CHStringsTest.add_smoke_test(cfg)
unittest.main()
|
|
05db0c76a2eefd2a490573973d417f29eb1303c9
|
testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py
|
testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py
|
"""
Description:
This file include basis functions
lanqinglong@huawei.com
"""
import logging
import os
import time
class foundation:
def __init__(self):
self.dir = os.path.join( os.getcwd(), 'log' )
def log (self, loginfo):
"""
Record log in log directory for deploying test environment
parameters:
loginfo(input): record info
"""
filename = time.strftime( '%Y-%m-%d-%H-%M-%S' ) + '.log'
filepath = os.path.join( self.dir, filename )
logging.basicConfig( level=logging.INFO,
format = '%(asctime)s %(filename)s:%(message)s',
datefmt = '%d %b %Y %H:%M:%S',
filename = filepath,
filemode = 'w')
filelog = logging.FileHandler( filepath )
logging.getLogger( 'Functest' ).addHandler( filelog )
print loginfo
logging.info(loginfo)
|
Write a simple adapter in functest project to run TESTON JIRA:FUNCTEST-46
|
Write a simple adapter in functest project to run TESTON
JIRA:FUNCTEST-46
Change-Id: I146ec926da6cbbd2535d0997326a13bd545f42c6
Signed-off-by: Qinglong Lan <0cd11e1cd771fa5a3124ae83fdff3084638adefb@huawei.com>
|
Python
|
apache-2.0
|
opnfv/functest,mywulin/functest,mywulin/functest,opnfv/functest
|
Write a simple adapter in functest project to run TESTON
JIRA:FUNCTEST-46
Change-Id: I146ec926da6cbbd2535d0997326a13bd545f42c6
Signed-off-by: Qinglong Lan <0cd11e1cd771fa5a3124ae83fdff3084638adefb@huawei.com>
|
"""
Description:
This file include basis functions
lanqinglong@huawei.com
"""
import logging
import os
import time
class foundation:
def __init__(self):
self.dir = os.path.join( os.getcwd(), 'log' )
def log (self, loginfo):
"""
Record log in log directory for deploying test environment
parameters:
loginfo(input): record info
"""
filename = time.strftime( '%Y-%m-%d-%H-%M-%S' ) + '.log'
filepath = os.path.join( self.dir, filename )
logging.basicConfig( level=logging.INFO,
format = '%(asctime)s %(filename)s:%(message)s',
datefmt = '%d %b %Y %H:%M:%S',
filename = filepath,
filemode = 'w')
filelog = logging.FileHandler( filepath )
logging.getLogger( 'Functest' ).addHandler( filelog )
print loginfo
logging.info(loginfo)
|
<commit_before><commit_msg>Write a simple adapter in functest project to run TESTON
JIRA:FUNCTEST-46
Change-Id: I146ec926da6cbbd2535d0997326a13bd545f42c6
Signed-off-by: Qinglong Lan <0cd11e1cd771fa5a3124ae83fdff3084638adefb@huawei.com><commit_after>
|
"""
Description:
This file include basis functions
lanqinglong@huawei.com
"""
import logging
import os
import time
class foundation:
def __init__(self):
self.dir = os.path.join( os.getcwd(), 'log' )
def log (self, loginfo):
"""
Record log in log directory for deploying test environment
parameters:
loginfo(input): record info
"""
filename = time.strftime( '%Y-%m-%d-%H-%M-%S' ) + '.log'
filepath = os.path.join( self.dir, filename )
logging.basicConfig( level=logging.INFO,
format = '%(asctime)s %(filename)s:%(message)s',
datefmt = '%d %b %Y %H:%M:%S',
filename = filepath,
filemode = 'w')
filelog = logging.FileHandler( filepath )
logging.getLogger( 'Functest' ).addHandler( filelog )
print loginfo
logging.info(loginfo)
|
Write a simple adapter in functest project to run TESTON
JIRA:FUNCTEST-46
Change-Id: I146ec926da6cbbd2535d0997326a13bd545f42c6
Signed-off-by: Qinglong Lan <0cd11e1cd771fa5a3124ae83fdff3084638adefb@huawei.com>"""
Description:
This file include basis functions
lanqinglong@huawei.com
"""
import logging
import os
import time
class foundation:
def __init__(self):
self.dir = os.path.join( os.getcwd(), 'log' )
def log (self, loginfo):
"""
Record log in log directory for deploying test environment
parameters:
loginfo(input): record info
"""
filename = time.strftime( '%Y-%m-%d-%H-%M-%S' ) + '.log'
filepath = os.path.join( self.dir, filename )
logging.basicConfig( level=logging.INFO,
format = '%(asctime)s %(filename)s:%(message)s',
datefmt = '%d %b %Y %H:%M:%S',
filename = filepath,
filemode = 'w')
filelog = logging.FileHandler( filepath )
logging.getLogger( 'Functest' ).addHandler( filelog )
print loginfo
logging.info(loginfo)
|
<commit_before><commit_msg>Write a simple adapter in functest project to run TESTON
JIRA:FUNCTEST-46
Change-Id: I146ec926da6cbbd2535d0997326a13bd545f42c6
Signed-off-by: Qinglong Lan <0cd11e1cd771fa5a3124ae83fdff3084638adefb@huawei.com><commit_after>"""
Description:
This file include basis functions
lanqinglong@huawei.com
"""
import logging
import os
import time
class foundation:
def __init__(self):
self.dir = os.path.join( os.getcwd(), 'log' )
def log (self, loginfo):
"""
Record log in log directory for deploying test environment
parameters:
loginfo(input): record info
"""
filename = time.strftime( '%Y-%m-%d-%H-%M-%S' ) + '.log'
filepath = os.path.join( self.dir, filename )
logging.basicConfig( level=logging.INFO,
format = '%(asctime)s %(filename)s:%(message)s',
datefmt = '%d %b %Y %H:%M:%S',
filename = filepath,
filemode = 'w')
filelog = logging.FileHandler( filepath )
logging.getLogger( 'Functest' ).addHandler( filelog )
print loginfo
logging.info(loginfo)
|
|
46b7667bf704e98c52dbd9eb41a7ccbf6de9229e
|
trump/tools/tests/test_sqla.py
|
trump/tools/tests/test_sqla.py
|
from ...orm import SetupTrump, SymbolManager
class TestToolsSQLA(object):
def setup_method(self, test_method):
self.eng = SetupTrump()
self.sm = SymbolManager(self.eng)
def test_repr_mixin(self):
sym = self.sm.create("testsym", overwrite=True)
assert repr(sym) == """Symbol(name=u'testsym', description=None, units=None, agg_method=u'priority_fill')"""
|
Add test for repr mixin
|
TST: Add test for repr mixin
|
Python
|
bsd-3-clause
|
Equitable/trump,Asiant/trump,jnmclarty/trump
|
TST: Add test for repr mixin
|
from ...orm import SetupTrump, SymbolManager
class TestToolsSQLA(object):
def setup_method(self, test_method):
self.eng = SetupTrump()
self.sm = SymbolManager(self.eng)
def test_repr_mixin(self):
sym = self.sm.create("testsym", overwrite=True)
assert repr(sym) == """Symbol(name=u'testsym', description=None, units=None, agg_method=u'priority_fill')"""
|
<commit_before><commit_msg>TST: Add test for repr mixin<commit_after>
|
from ...orm import SetupTrump, SymbolManager
class TestToolsSQLA(object):
def setup_method(self, test_method):
self.eng = SetupTrump()
self.sm = SymbolManager(self.eng)
def test_repr_mixin(self):
sym = self.sm.create("testsym", overwrite=True)
assert repr(sym) == """Symbol(name=u'testsym', description=None, units=None, agg_method=u'priority_fill')"""
|
TST: Add test for repr mixinfrom ...orm import SetupTrump, SymbolManager
class TestToolsSQLA(object):
def setup_method(self, test_method):
self.eng = SetupTrump()
self.sm = SymbolManager(self.eng)
def test_repr_mixin(self):
sym = self.sm.create("testsym", overwrite=True)
assert repr(sym) == """Symbol(name=u'testsym', description=None, units=None, agg_method=u'priority_fill')"""
|
<commit_before><commit_msg>TST: Add test for repr mixin<commit_after>from ...orm import SetupTrump, SymbolManager
class TestToolsSQLA(object):
def setup_method(self, test_method):
self.eng = SetupTrump()
self.sm = SymbolManager(self.eng)
def test_repr_mixin(self):
sym = self.sm.create("testsym", overwrite=True)
assert repr(sym) == """Symbol(name=u'testsym', description=None, units=None, agg_method=u'priority_fill')"""
|
|
af5b12f4fdda3f9ccfee19d2a7589fed6a7faee5
|
news/migrations/0004_auto_20160301_2235.py
|
news/migrations/0004_auto_20160301_2235.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def make_articles_boring(apps, schema_editor):
Article = apps.get_model("news", "Article")
for article in Article.objects.filter(url__contains='http://www.nu.nl/gadgets/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/prive/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/autovisie/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/reiskrant/'):
for version in article.version_set.all():
version.boring = True
version.save()
class Migration(migrations.Migration):
dependencies = [
('news', '0003_auto_20160123_1510'),
]
operations = [
]
|
Add migrations for boring articles.
|
Add migrations for boring articles.
|
Python
|
mit
|
flupzor/newsdiffs,flupzor/newsdiffs,flupzor/newsdiffs,flupzor/bijgeschaafd,flupzor/bijgeschaafd,flupzor/bijgeschaafd,flupzor/bijgeschaafd,flupzor/newsdiffs
|
Add migrations for boring articles.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def make_articles_boring(apps, schema_editor):
Article = apps.get_model("news", "Article")
for article in Article.objects.filter(url__contains='http://www.nu.nl/gadgets/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/prive/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/autovisie/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/reiskrant/'):
for version in article.version_set.all():
version.boring = True
version.save()
class Migration(migrations.Migration):
dependencies = [
('news', '0003_auto_20160123_1510'),
]
operations = [
]
|
<commit_before><commit_msg>Add migrations for boring articles.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def make_articles_boring(apps, schema_editor):
Article = apps.get_model("news", "Article")
for article in Article.objects.filter(url__contains='http://www.nu.nl/gadgets/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/prive/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/autovisie/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/reiskrant/'):
for version in article.version_set.all():
version.boring = True
version.save()
class Migration(migrations.Migration):
dependencies = [
('news', '0003_auto_20160123_1510'),
]
operations = [
]
|
Add migrations for boring articles.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def make_articles_boring(apps, schema_editor):
Article = apps.get_model("news", "Article")
for article in Article.objects.filter(url__contains='http://www.nu.nl/gadgets/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/prive/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/autovisie/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/reiskrant/'):
for version in article.version_set.all():
version.boring = True
version.save()
class Migration(migrations.Migration):
dependencies = [
('news', '0003_auto_20160123_1510'),
]
operations = [
]
|
<commit_before><commit_msg>Add migrations for boring articles.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def make_articles_boring(apps, schema_editor):
Article = apps.get_model("news", "Article")
for article in Article.objects.filter(url__contains='http://www.nu.nl/gadgets/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/prive/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/autovisie/'):
for version in article.version_set.all():
version.boring = True
version.save()
for article in Article.objects.filter(url__contains='http://www.telegraaf.nl/reiskrant/'):
for version in article.version_set.all():
version.boring = True
version.save()
class Migration(migrations.Migration):
dependencies = [
('news', '0003_auto_20160123_1510'),
]
operations = [
]
|
|
fb0e9e0d6de608a255cac00acbecdc445b2da93c
|
examples/act_experience_update.py
|
examples/act_experience_update.py
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorforce import Agent, Environment
def main():
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
agent = Agent.create(agent='benchmarks/configs/ppo.json', environment=environment, config=dict(eager_mode=True))
# Train for 100 episodes
for episode in range(100):
# Record episode experience
episode_states = list()
episode_internals = list()
episode_actions = list()
episode_terminal = list()
episode_reward = list()
# Episode using independent-act and agent.intial_internals()
states = environment.reset()
internals = agent.initial_internals()
terminal = False
sum_reward = 0.0
while not terminal:
episode_states.append(states)
episode_internals.append(internals)
actions, internals = agent.act(states=states, internals=internals, independent=True)
episode_actions.append(actions)
states, terminal, reward = environment.execute(actions=actions)
episode_terminal.append(terminal)
episode_reward.append(reward)
sum_reward += reward
print('Episode {}: {}'.format(episode, sum_reward))
# Feed recorded experience to agent
agent.experience(
states=episode_states, internals=episode_internals, actions=episode_actions,
terminal=episode_terminal, reward=episode_reward
)
# Perform update
agent.update()
agent.close()
environment.close()
if __name__ == '__main__':
main()
|
Update last commit: add act-experience-update example
|
Update last commit: add act-experience-update example
|
Python
|
apache-2.0
|
reinforceio/tensorforce
|
Update last commit: add act-experience-update example
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorforce import Agent, Environment
def main():
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
agent = Agent.create(agent='benchmarks/configs/ppo.json', environment=environment, config=dict(eager_mode=True))
# Train for 100 episodes
for episode in range(100):
# Record episode experience
episode_states = list()
episode_internals = list()
episode_actions = list()
episode_terminal = list()
episode_reward = list()
# Episode using independent-act and agent.intial_internals()
states = environment.reset()
internals = agent.initial_internals()
terminal = False
sum_reward = 0.0
while not terminal:
episode_states.append(states)
episode_internals.append(internals)
actions, internals = agent.act(states=states, internals=internals, independent=True)
episode_actions.append(actions)
states, terminal, reward = environment.execute(actions=actions)
episode_terminal.append(terminal)
episode_reward.append(reward)
sum_reward += reward
print('Episode {}: {}'.format(episode, sum_reward))
# Feed recorded experience to agent
agent.experience(
states=episode_states, internals=episode_internals, actions=episode_actions,
terminal=episode_terminal, reward=episode_reward
)
# Perform update
agent.update()
agent.close()
environment.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Update last commit: add act-experience-update example<commit_after>
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorforce import Agent, Environment
def main():
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
agent = Agent.create(agent='benchmarks/configs/ppo.json', environment=environment, config=dict(eager_mode=True))
# Train for 100 episodes
for episode in range(100):
# Record episode experience
episode_states = list()
episode_internals = list()
episode_actions = list()
episode_terminal = list()
episode_reward = list()
# Episode using independent-act and agent.intial_internals()
states = environment.reset()
internals = agent.initial_internals()
terminal = False
sum_reward = 0.0
while not terminal:
episode_states.append(states)
episode_internals.append(internals)
actions, internals = agent.act(states=states, internals=internals, independent=True)
episode_actions.append(actions)
states, terminal, reward = environment.execute(actions=actions)
episode_terminal.append(terminal)
episode_reward.append(reward)
sum_reward += reward
print('Episode {}: {}'.format(episode, sum_reward))
# Feed recorded experience to agent
agent.experience(
states=episode_states, internals=episode_internals, actions=episode_actions,
terminal=episode_terminal, reward=episode_reward
)
# Perform update
agent.update()
agent.close()
environment.close()
if __name__ == '__main__':
main()
|
Update last commit: add act-experience-update example# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorforce import Agent, Environment
def main():
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
agent = Agent.create(agent='benchmarks/configs/ppo.json', environment=environment, config=dict(eager_mode=True))
# Train for 100 episodes
for episode in range(100):
# Record episode experience
episode_states = list()
episode_internals = list()
episode_actions = list()
episode_terminal = list()
episode_reward = list()
# Episode using independent-act and agent.intial_internals()
states = environment.reset()
internals = agent.initial_internals()
terminal = False
sum_reward = 0.0
while not terminal:
episode_states.append(states)
episode_internals.append(internals)
actions, internals = agent.act(states=states, internals=internals, independent=True)
episode_actions.append(actions)
states, terminal, reward = environment.execute(actions=actions)
episode_terminal.append(terminal)
episode_reward.append(reward)
sum_reward += reward
print('Episode {}: {}'.format(episode, sum_reward))
# Feed recorded experience to agent
agent.experience(
states=episode_states, internals=episode_internals, actions=episode_actions,
terminal=episode_terminal, reward=episode_reward
)
# Perform update
agent.update()
agent.close()
environment.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Update last commit: add act-experience-update example<commit_after># Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorforce import Agent, Environment
def main():
environment = Environment.create(environment='benchmarks/configs/cartpole.json')
agent = Agent.create(agent='benchmarks/configs/ppo.json', environment=environment, config=dict(eager_mode=True))
# Train for 100 episodes
for episode in range(100):
# Record episode experience
episode_states = list()
episode_internals = list()
episode_actions = list()
episode_terminal = list()
episode_reward = list()
# Episode using independent-act and agent.intial_internals()
states = environment.reset()
internals = agent.initial_internals()
terminal = False
sum_reward = 0.0
while not terminal:
episode_states.append(states)
episode_internals.append(internals)
actions, internals = agent.act(states=states, internals=internals, independent=True)
episode_actions.append(actions)
states, terminal, reward = environment.execute(actions=actions)
episode_terminal.append(terminal)
episode_reward.append(reward)
sum_reward += reward
print('Episode {}: {}'.format(episode, sum_reward))
# Feed recorded experience to agent
agent.experience(
states=episode_states, internals=episode_internals, actions=episode_actions,
terminal=episode_terminal, reward=episode_reward
)
# Perform update
agent.update()
agent.close()
environment.close()
if __name__ == '__main__':
main()
|
|
8ac380905f969eab9be64cc97d8e5ec4d7c53e26
|
sorting/merge_sort.py
|
sorting/merge_sort.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Merge Sort.
Best, Average, Worst: O(nlogn).
'''
def merge_sort(array, result, left, right, order):
if right - left < 2:
return
if (right - left) == 2:
if order == 'asc':
if result[left] > result[right-1]:
result[left], result[right-1] = result[right-1], result[left]
else:
if result[left] < result[right-1]:
result[left], result[right-1] = result[right-1], result[left]
return
mid = (right + left + 1) / 2
merge_sort(result, array, left, mid, order)
merge_sort(result, array, mid, right, order)
i = left
j = mid
if order == 'asc':
for x in xrange(left, right):
if i >= mid or (j < right and array[i] > array[j]):
result[x] = array[j]
j += 1
else:
result[x] = array[i]
i += 1
else:
for x in xrange(left, right):
if i >= mid or (j < right and array[i] < array[j]):
result[x] = array[j]
j += 1
else:
result[x] = array[i]
i += 1
def sort(array, order='asc'):
'''
In-place sort array use merge sort algorithm in ascending or descending
order.
Return the sorted array.
'''
if not array:
raise Exception('No element to sort.')
copy = list(array)
merge_sort(copy, array, 0, len(array), order)
return array
if __name__ == '__main__':
import random
from argparse import ArgumentParser
parser = ArgumentParser(description='Sort array use Merge Sort algorithm.')
parser.add_argument('random', type=int, help='max random number count')
parser.add_argument('--order', type=str, default='asc', choices=['asc', 'desc'],
help='sort in ascending or descending.')
args = parser.parse_args()
# to avoid 'ValueError("sample larger than population")' when call
# random.sample().
r_end = 1000
if args.random >= r_end:
r_end = args.random + 10
randoms = random.sample(xrange(1, r_end), args.random)
print 'before sort:\t', randoms
sort(randoms, args.order)
print 'after sort:\t', randoms
|
Implement the merge sort algorithm.
|
Implement the merge sort algorithm.
|
Python
|
mit
|
weichen2046/algorithm-study,weichen2046/algorithm-study
|
Implement the merge sort algorithm.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Merge Sort.
Best, Average, Worst: O(nlogn).
'''
def merge_sort(array, result, left, right, order):
if right - left < 2:
return
if (right - left) == 2:
if order == 'asc':
if result[left] > result[right-1]:
result[left], result[right-1] = result[right-1], result[left]
else:
if result[left] < result[right-1]:
result[left], result[right-1] = result[right-1], result[left]
return
mid = (right + left + 1) / 2
merge_sort(result, array, left, mid, order)
merge_sort(result, array, mid, right, order)
i = left
j = mid
if order == 'asc':
for x in xrange(left, right):
if i >= mid or (j < right and array[i] > array[j]):
result[x] = array[j]
j += 1
else:
result[x] = array[i]
i += 1
else:
for x in xrange(left, right):
if i >= mid or (j < right and array[i] < array[j]):
result[x] = array[j]
j += 1
else:
result[x] = array[i]
i += 1
def sort(array, order='asc'):
'''
In-place sort array use merge sort algorithm in ascending or descending
order.
Return the sorted array.
'''
if not array:
raise Exception('No element to sort.')
copy = list(array)
merge_sort(copy, array, 0, len(array), order)
return array
if __name__ == '__main__':
import random
from argparse import ArgumentParser
parser = ArgumentParser(description='Sort array use Merge Sort algorithm.')
parser.add_argument('random', type=int, help='max random number count')
parser.add_argument('--order', type=str, default='asc', choices=['asc', 'desc'],
help='sort in ascending or descending.')
args = parser.parse_args()
# to avoid 'ValueError("sample larger than population")' when call
# random.sample().
r_end = 1000
if args.random >= r_end:
r_end = args.random + 10
randoms = random.sample(xrange(1, r_end), args.random)
print 'before sort:\t', randoms
sort(randoms, args.order)
print 'after sort:\t', randoms
|
<commit_before><commit_msg>Implement the merge sort algorithm.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Merge Sort.
Best, Average, Worst: O(nlogn).
'''
def merge_sort(array, result, left, right, order):
if right - left < 2:
return
if (right - left) == 2:
if order == 'asc':
if result[left] > result[right-1]:
result[left], result[right-1] = result[right-1], result[left]
else:
if result[left] < result[right-1]:
result[left], result[right-1] = result[right-1], result[left]
return
mid = (right + left + 1) / 2
merge_sort(result, array, left, mid, order)
merge_sort(result, array, mid, right, order)
i = left
j = mid
if order == 'asc':
for x in xrange(left, right):
if i >= mid or (j < right and array[i] > array[j]):
result[x] = array[j]
j += 1
else:
result[x] = array[i]
i += 1
else:
for x in xrange(left, right):
if i >= mid or (j < right and array[i] < array[j]):
result[x] = array[j]
j += 1
else:
result[x] = array[i]
i += 1
def sort(array, order='asc'):
'''
In-place sort array use merge sort algorithm in ascending or descending
order.
Return the sorted array.
'''
if not array:
raise Exception('No element to sort.')
copy = list(array)
merge_sort(copy, array, 0, len(array), order)
return array
if __name__ == '__main__':
import random
from argparse import ArgumentParser
parser = ArgumentParser(description='Sort array use Merge Sort algorithm.')
parser.add_argument('random', type=int, help='max random number count')
parser.add_argument('--order', type=str, default='asc', choices=['asc', 'desc'],
help='sort in ascending or descending.')
args = parser.parse_args()
# to avoid 'ValueError("sample larger than population")' when call
# random.sample().
r_end = 1000
if args.random >= r_end:
r_end = args.random + 10
randoms = random.sample(xrange(1, r_end), args.random)
print 'before sort:\t', randoms
sort(randoms, args.order)
print 'after sort:\t', randoms
|
Implement the merge sort algorithm.#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Merge Sort.
Best, Average, Worst: O(nlogn).
'''
def merge_sort(array, result, left, right, order):
if right - left < 2:
return
if (right - left) == 2:
if order == 'asc':
if result[left] > result[right-1]:
result[left], result[right-1] = result[right-1], result[left]
else:
if result[left] < result[right-1]:
result[left], result[right-1] = result[right-1], result[left]
return
mid = (right + left + 1) / 2
merge_sort(result, array, left, mid, order)
merge_sort(result, array, mid, right, order)
i = left
j = mid
if order == 'asc':
for x in xrange(left, right):
if i >= mid or (j < right and array[i] > array[j]):
result[x] = array[j]
j += 1
else:
result[x] = array[i]
i += 1
else:
for x in xrange(left, right):
if i >= mid or (j < right and array[i] < array[j]):
result[x] = array[j]
j += 1
else:
result[x] = array[i]
i += 1
def sort(array, order='asc'):
'''
In-place sort array use merge sort algorithm in ascending or descending
order.
Return the sorted array.
'''
if not array:
raise Exception('No element to sort.')
copy = list(array)
merge_sort(copy, array, 0, len(array), order)
return array
if __name__ == '__main__':
import random
from argparse import ArgumentParser
parser = ArgumentParser(description='Sort array use Merge Sort algorithm.')
parser.add_argument('random', type=int, help='max random number count')
parser.add_argument('--order', type=str, default='asc', choices=['asc', 'desc'],
help='sort in ascending or descending.')
args = parser.parse_args()
# to avoid 'ValueError("sample larger than population")' when call
# random.sample().
r_end = 1000
if args.random >= r_end:
r_end = args.random + 10
randoms = random.sample(xrange(1, r_end), args.random)
print 'before sort:\t', randoms
sort(randoms, args.order)
print 'after sort:\t', randoms
|
<commit_before><commit_msg>Implement the merge sort algorithm.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Merge Sort.
Best, Average, Worst: O(nlogn).
'''
def merge_sort(array, result, left, right, order):
if right - left < 2:
return
if (right - left) == 2:
if order == 'asc':
if result[left] > result[right-1]:
result[left], result[right-1] = result[right-1], result[left]
else:
if result[left] < result[right-1]:
result[left], result[right-1] = result[right-1], result[left]
return
mid = (right + left + 1) / 2
merge_sort(result, array, left, mid, order)
merge_sort(result, array, mid, right, order)
i = left
j = mid
if order == 'asc':
for x in xrange(left, right):
if i >= mid or (j < right and array[i] > array[j]):
result[x] = array[j]
j += 1
else:
result[x] = array[i]
i += 1
else:
for x in xrange(left, right):
if i >= mid or (j < right and array[i] < array[j]):
result[x] = array[j]
j += 1
else:
result[x] = array[i]
i += 1
def sort(array, order='asc'):
'''
In-place sort array use merge sort algorithm in ascending or descending
order.
Return the sorted array.
'''
if not array:
raise Exception('No element to sort.')
copy = list(array)
merge_sort(copy, array, 0, len(array), order)
return array
if __name__ == '__main__':
import random
from argparse import ArgumentParser
parser = ArgumentParser(description='Sort array use Merge Sort algorithm.')
parser.add_argument('random', type=int, help='max random number count')
parser.add_argument('--order', type=str, default='asc', choices=['asc', 'desc'],
help='sort in ascending or descending.')
args = parser.parse_args()
# to avoid 'ValueError("sample larger than population")' when call
# random.sample().
r_end = 1000
if args.random >= r_end:
r_end = args.random + 10
randoms = random.sample(xrange(1, r_end), args.random)
print 'before sort:\t', randoms
sort(randoms, args.order)
print 'after sort:\t', randoms
|
|
bfbeee63deecb424a528b44225b2dd4c67bbbc23
|
manage.py
|
manage.py
|
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
from app.models import User, BucketList, Item
app = create_app("development")
manager = Manager(app)
migrate = Migrate(app, db)
@manager.command
def createdb():
db.create_all()
print("database tables created successfully")
@manager.command
def dropdb():
db.drop_all()
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
Add database migrations and creation script
|
Add database migrations and creation script
|
Python
|
mit
|
brayoh/bucket-list-api
|
Add database migrations and creation script
|
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
from app.models import User, BucketList, Item
app = create_app("development")
manager = Manager(app)
migrate = Migrate(app, db)
@manager.command
def createdb():
db.create_all()
print("database tables created successfully")
@manager.command
def dropdb():
db.drop_all()
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
<commit_before><commit_msg>Add database migrations and creation script<commit_after>
|
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
from app.models import User, BucketList, Item
app = create_app("development")
manager = Manager(app)
migrate = Migrate(app, db)
@manager.command
def createdb():
db.create_all()
print("database tables created successfully")
@manager.command
def dropdb():
db.drop_all()
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
Add database migrations and creation scriptfrom flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
from app.models import User, BucketList, Item
app = create_app("development")
manager = Manager(app)
migrate = Migrate(app, db)
@manager.command
def createdb():
db.create_all()
print("database tables created successfully")
@manager.command
def dropdb():
db.drop_all()
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
<commit_before><commit_msg>Add database migrations and creation script<commit_after>from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
from app.models import User, BucketList, Item
app = create_app("development")
manager = Manager(app)
migrate = Migrate(app, db)
@manager.command
def createdb():
db.create_all()
print("database tables created successfully")
@manager.command
def dropdb():
db.drop_all()
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
|
|
77917be83586e1963fb3b11cdb9631a766acb294
|
pyagg/tests/test_canvas.py
|
pyagg/tests/test_canvas.py
|
import numpy as np
from numpy.testing import assert_equal
from pyagg import GraphicsState, Color, ndarray_canvas_rgb24
def test_line():
expected = np.zeros((100, 100, 3), dtype=np.uint8)
buffer = np.zeros((100, 100, 3), dtype=np.uint8)
canvas = ndarray_canvas_rgb24(buffer)
gs = GraphicsState()
gs.anti_aliased = False
gs.line_color = Color(10, 10, 10)
gs.line_width = 1.0
expected[1, ...] = 10
canvas.draw_line(0, 1.5, 100, 1.5, gs)
assert_equal(expected, canvas.image)
buffer[:] = 0
expected[:] = 0
expected[:, 1, :] = 10
canvas.draw_line(1.5, 0, 1.5, 100, gs)
assert_equal(expected, buffer)
|
Add a test module where canvas unit tests will go
|
Add a test module where canvas unit tests will go
Only a simple line drawing test there for now...
|
Python
|
mit
|
celiagg/celiagg,celiagg/celiagg,celiagg/celiagg,celiagg/celiagg,celiagg/celiagg
|
Add a test module where canvas unit tests will go
Only a simple line drawing test there for now...
|
import numpy as np
from numpy.testing import assert_equal
from pyagg import GraphicsState, Color, ndarray_canvas_rgb24
def test_line():
expected = np.zeros((100, 100, 3), dtype=np.uint8)
buffer = np.zeros((100, 100, 3), dtype=np.uint8)
canvas = ndarray_canvas_rgb24(buffer)
gs = GraphicsState()
gs.anti_aliased = False
gs.line_color = Color(10, 10, 10)
gs.line_width = 1.0
expected[1, ...] = 10
canvas.draw_line(0, 1.5, 100, 1.5, gs)
assert_equal(expected, canvas.image)
buffer[:] = 0
expected[:] = 0
expected[:, 1, :] = 10
canvas.draw_line(1.5, 0, 1.5, 100, gs)
assert_equal(expected, buffer)
|
<commit_before><commit_msg>Add a test module where canvas unit tests will go
Only a simple line drawing test there for now...<commit_after>
|
import numpy as np
from numpy.testing import assert_equal
from pyagg import GraphicsState, Color, ndarray_canvas_rgb24
def test_line():
expected = np.zeros((100, 100, 3), dtype=np.uint8)
buffer = np.zeros((100, 100, 3), dtype=np.uint8)
canvas = ndarray_canvas_rgb24(buffer)
gs = GraphicsState()
gs.anti_aliased = False
gs.line_color = Color(10, 10, 10)
gs.line_width = 1.0
expected[1, ...] = 10
canvas.draw_line(0, 1.5, 100, 1.5, gs)
assert_equal(expected, canvas.image)
buffer[:] = 0
expected[:] = 0
expected[:, 1, :] = 10
canvas.draw_line(1.5, 0, 1.5, 100, gs)
assert_equal(expected, buffer)
|
Add a test module where canvas unit tests will go
Only a simple line drawing test there for now...import numpy as np
from numpy.testing import assert_equal
from pyagg import GraphicsState, Color, ndarray_canvas_rgb24
def test_line():
expected = np.zeros((100, 100, 3), dtype=np.uint8)
buffer = np.zeros((100, 100, 3), dtype=np.uint8)
canvas = ndarray_canvas_rgb24(buffer)
gs = GraphicsState()
gs.anti_aliased = False
gs.line_color = Color(10, 10, 10)
gs.line_width = 1.0
expected[1, ...] = 10
canvas.draw_line(0, 1.5, 100, 1.5, gs)
assert_equal(expected, canvas.image)
buffer[:] = 0
expected[:] = 0
expected[:, 1, :] = 10
canvas.draw_line(1.5, 0, 1.5, 100, gs)
assert_equal(expected, buffer)
|
<commit_before><commit_msg>Add a test module where canvas unit tests will go
Only a simple line drawing test there for now...<commit_after>import numpy as np
from numpy.testing import assert_equal
from pyagg import GraphicsState, Color, ndarray_canvas_rgb24
def test_line():
expected = np.zeros((100, 100, 3), dtype=np.uint8)
buffer = np.zeros((100, 100, 3), dtype=np.uint8)
canvas = ndarray_canvas_rgb24(buffer)
gs = GraphicsState()
gs.anti_aliased = False
gs.line_color = Color(10, 10, 10)
gs.line_width = 1.0
expected[1, ...] = 10
canvas.draw_line(0, 1.5, 100, 1.5, gs)
assert_equal(expected, canvas.image)
buffer[:] = 0
expected[:] = 0
expected[:, 1, :] = 10
canvas.draw_line(1.5, 0, 1.5, 100, gs)
assert_equal(expected, buffer)
|
|
f118339ae2e55ed5d07480094f5706eef6ed858d
|
pmdarima/preprocessing/endog/tests/test_log.py
|
pmdarima/preprocessing/endog/tests/test_log.py
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy import stats
import pytest
from pmdarima.preprocessing import LogEndogTransformer
from pmdarima.preprocessing import BoxCoxEndogTransformer
def test_same():
y = [1, 2, 3]
trans = BoxCoxEndogTransformer()
log_trans = LogEndogTransformer()
y_t, _ = trans.fit_transform(y)
log_y_t, _ = log_trans.fit_transform(y)
assert_array_almost_equal(log_y_t, y_t)
|
Add test for empty LogEndogTransformer
|
Add test for empty LogEndogTransformer
|
Python
|
mit
|
tgsmith61591/pyramid,tgsmith61591/pyramid,alkaline-ml/pmdarima,alkaline-ml/pmdarima,tgsmith61591/pyramid,alkaline-ml/pmdarima
|
Add test for empty LogEndogTransformer
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy import stats
import pytest
from pmdarima.preprocessing import LogEndogTransformer
from pmdarima.preprocessing import BoxCoxEndogTransformer
def test_same():
y = [1, 2, 3]
trans = BoxCoxEndogTransformer()
log_trans = LogEndogTransformer()
y_t, _ = trans.fit_transform(y)
log_y_t, _ = log_trans.fit_transform(y)
assert_array_almost_equal(log_y_t, y_t)
|
<commit_before><commit_msg>Add test for empty LogEndogTransformer<commit_after>
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy import stats
import pytest
from pmdarima.preprocessing import LogEndogTransformer
from pmdarima.preprocessing import BoxCoxEndogTransformer
def test_same():
y = [1, 2, 3]
trans = BoxCoxEndogTransformer()
log_trans = LogEndogTransformer()
y_t, _ = trans.fit_transform(y)
log_y_t, _ = log_trans.fit_transform(y)
assert_array_almost_equal(log_y_t, y_t)
|
Add test for empty LogEndogTransformer# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy import stats
import pytest
from pmdarima.preprocessing import LogEndogTransformer
from pmdarima.preprocessing import BoxCoxEndogTransformer
def test_same():
y = [1, 2, 3]
trans = BoxCoxEndogTransformer()
log_trans = LogEndogTransformer()
y_t, _ = trans.fit_transform(y)
log_y_t, _ = log_trans.fit_transform(y)
assert_array_almost_equal(log_y_t, y_t)
|
<commit_before><commit_msg>Add test for empty LogEndogTransformer<commit_after># -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy import stats
import pytest
from pmdarima.preprocessing import LogEndogTransformer
from pmdarima.preprocessing import BoxCoxEndogTransformer
def test_same():
y = [1, 2, 3]
trans = BoxCoxEndogTransformer()
log_trans = LogEndogTransformer()
y_t, _ = trans.fit_transform(y)
log_y_t, _ = log_trans.fit_transform(y)
assert_array_almost_equal(log_y_t, y_t)
|
|
099c0af192cabb11fad220296c327654b4c10b3e
|
tools/upload_build.py
|
tools/upload_build.py
|
"""This script upload a newly-build version of CocoMUD for Windows.
The Download wiki page on Redmine are updated.
Requirements:
This script needs 'python-redmine', which you can obtain with
pip install python-redmine
"""
import argparse
from json import dumps
import os
import re
import sys
import urllib2
from redmine import Redmine
from redmine.exceptions import ResourceNotFoundError
# Create an argument parser
parser = argparse.ArgumentParser(
description="upload a new CocoMUD build")
parser.add_argument("key", help="the API key to upload to Redmine")
args = parser.parse_args()
# Configure the system
key = args.key
# Connects to the REST API
redmine = Redmine("https://cocomud.plan.io", key=key)
# Check that the file exists
path = os.path.abspath("../src/build/CocoMUD.zip")
if not os.path.exists(path):
print "The file {} cannot be found.".format(path)
sys.exit(1)
# Then upload this file
print "Retrieving the Download wiki page on 'cocomud-client'..."
page = redmine.wiki_page.get("Download", project_id="cocomud-client")
print "Uploading {}...".format(path)
text = page.text
page.uploads = [{"path": path, "filename": "CocoMUD.zip"}]
page.text = text
print "Saving the page...", page.save()
# Get the new resource URL
url = list(page.attachments)[-1].content_url
# Retrieve the version number
with open("../src/version.py") as file:
content = file.read()
version = content.partition("=")[2].strip()
# Now we get ALL wiki pages with the title 'Download' and replace the URL
for project in redmine.project.all():
identifier = project.identifier
# Try to get the 'Download' page
try:
page = redmine.wiki_page.get("Download", project_id=identifier)
except ResourceNotFoundError:
pass
else:
print "Updating the Download page for the {} project...".format(
identifier)
text = page.text
text = re.sub(r"https\://cocomud\.plan\.io/attachments/" \
r"download/\d+/CocoMUD\.zip", url, text)
text = re.sub(r"\+\*\d+\*\+", "+*" + version + "*+", text)
page.text = text
success = page.save()
if success:
print "Correctly saved the wiki page."
else:
print "Error while saving the wiki page."
# Update the build information in the custom field
build = dumps({version: {"windows": url}})
print "Updating the custom field"
redmine.project.update(resource_id=2,
custom_fields=[{"id": 3, "value": build}])
print "URL", url
|
Add a tool to upload a build
|
Add a tool to upload a build
|
Python
|
bsd-3-clause
|
vlegoff/cocomud
|
Add a tool to upload a build
|
"""This script upload a newly-build version of CocoMUD for Windows.
The Download wiki page on Redmine are updated.
Requirements:
This script needs 'python-redmine', which you can obtain with
pip install python-redmine
"""
import argparse
from json import dumps
import os
import re
import sys
import urllib2
from redmine import Redmine
from redmine.exceptions import ResourceNotFoundError
# Create an argument parser
parser = argparse.ArgumentParser(
description="upload a new CocoMUD build")
parser.add_argument("key", help="the API key to upload to Redmine")
args = parser.parse_args()
# Configure the system
key = args.key
# Connects to the REST API
redmine = Redmine("https://cocomud.plan.io", key=key)
# Check that the file exists
path = os.path.abspath("../src/build/CocoMUD.zip")
if not os.path.exists(path):
print "The file {} cannot be found.".format(path)
sys.exit(1)
# Then upload this file
print "Retrieving the Download wiki page on 'cocomud-client'..."
page = redmine.wiki_page.get("Download", project_id="cocomud-client")
print "Uploading {}...".format(path)
text = page.text
page.uploads = [{"path": path, "filename": "CocoMUD.zip"}]
page.text = text
print "Saving the page...", page.save()
# Get the new resource URL
url = list(page.attachments)[-1].content_url
# Retrieve the version number
with open("../src/version.py") as file:
content = file.read()
version = content.partition("=")[2].strip()
# Now we get ALL wiki pages with the title 'Download' and replace the URL
for project in redmine.project.all():
identifier = project.identifier
# Try to get the 'Download' page
try:
page = redmine.wiki_page.get("Download", project_id=identifier)
except ResourceNotFoundError:
pass
else:
print "Updating the Download page for the {} project...".format(
identifier)
text = page.text
text = re.sub(r"https\://cocomud\.plan\.io/attachments/" \
r"download/\d+/CocoMUD\.zip", url, text)
text = re.sub(r"\+\*\d+\*\+", "+*" + version + "*+", text)
page.text = text
success = page.save()
if success:
print "Correctly saved the wiki page."
else:
print "Error while saving the wiki page."
# Update the build information in the custom field
build = dumps({version: {"windows": url}})
print "Updating the custom field"
redmine.project.update(resource_id=2,
custom_fields=[{"id": 3, "value": build}])
print "URL", url
|
<commit_before><commit_msg>Add a tool to upload a build<commit_after>
|
"""This script upload a newly-build version of CocoMUD for Windows.
The Download wiki page on Redmine are updated.
Requirements:
This script needs 'python-redmine', which you can obtain with
pip install python-redmine
"""
import argparse
from json import dumps
import os
import re
import sys
import urllib2
from redmine import Redmine
from redmine.exceptions import ResourceNotFoundError
# Create an argument parser
parser = argparse.ArgumentParser(
description="upload a new CocoMUD build")
parser.add_argument("key", help="the API key to upload to Redmine")
args = parser.parse_args()
# Configure the system
key = args.key
# Connects to the REST API
redmine = Redmine("https://cocomud.plan.io", key=key)
# Check that the file exists
path = os.path.abspath("../src/build/CocoMUD.zip")
if not os.path.exists(path):
print "The file {} cannot be found.".format(path)
sys.exit(1)
# Then upload this file
print "Retrieving the Download wiki page on 'cocomud-client'..."
page = redmine.wiki_page.get("Download", project_id="cocomud-client")
print "Uploading {}...".format(path)
text = page.text
page.uploads = [{"path": path, "filename": "CocoMUD.zip"}]
page.text = text
print "Saving the page...", page.save()
# Get the new resource URL
url = list(page.attachments)[-1].content_url
# Retrieve the version number
with open("../src/version.py") as file:
content = file.read()
version = content.partition("=")[2].strip()
# Now we get ALL wiki pages with the title 'Download' and replace the URL
for project in redmine.project.all():
identifier = project.identifier
# Try to get the 'Download' page
try:
page = redmine.wiki_page.get("Download", project_id=identifier)
except ResourceNotFoundError:
pass
else:
print "Updating the Download page for the {} project...".format(
identifier)
text = page.text
text = re.sub(r"https\://cocomud\.plan\.io/attachments/" \
r"download/\d+/CocoMUD\.zip", url, text)
text = re.sub(r"\+\*\d+\*\+", "+*" + version + "*+", text)
page.text = text
success = page.save()
if success:
print "Correctly saved the wiki page."
else:
print "Error while saving the wiki page."
# Update the build information in the custom field
build = dumps({version: {"windows": url}})
print "Updating the custom field"
redmine.project.update(resource_id=2,
custom_fields=[{"id": 3, "value": build}])
print "URL", url
|
Add a tool to upload a build"""This script upload a newly-build version of CocoMUD for Windows.
The Download wiki page on Redmine are updated.
Requirements:
This script needs 'python-redmine', which you can obtain with
pip install python-redmine
"""
import argparse
from json import dumps
import os
import re
import sys
import urllib2
from redmine import Redmine
from redmine.exceptions import ResourceNotFoundError
# Create an argument parser
parser = argparse.ArgumentParser(
description="upload a new CocoMUD build")
parser.add_argument("key", help="the API key to upload to Redmine")
args = parser.parse_args()
# Configure the system
key = args.key
# Connects to the REST API
redmine = Redmine("https://cocomud.plan.io", key=key)
# Check that the file exists
path = os.path.abspath("../src/build/CocoMUD.zip")
if not os.path.exists(path):
print "The file {} cannot be found.".format(path)
sys.exit(1)
# Then upload this file
print "Retrieving the Download wiki page on 'cocomud-client'..."
page = redmine.wiki_page.get("Download", project_id="cocomud-client")
print "Uploading {}...".format(path)
text = page.text
page.uploads = [{"path": path, "filename": "CocoMUD.zip"}]
page.text = text
print "Saving the page...", page.save()
# Get the new resource URL
url = list(page.attachments)[-1].content_url
# Retrieve the version number
with open("../src/version.py") as file:
content = file.read()
version = content.partition("=")[2].strip()
# Now we get ALL wiki pages with the title 'Download' and replace the URL
for project in redmine.project.all():
identifier = project.identifier
# Try to get the 'Download' page
try:
page = redmine.wiki_page.get("Download", project_id=identifier)
except ResourceNotFoundError:
pass
else:
print "Updating the Download page for the {} project...".format(
identifier)
text = page.text
text = re.sub(r"https\://cocomud\.plan\.io/attachments/" \
r"download/\d+/CocoMUD\.zip", url, text)
text = re.sub(r"\+\*\d+\*\+", "+*" + version + "*+", text)
page.text = text
success = page.save()
if success:
print "Correctly saved the wiki page."
else:
print "Error while saving the wiki page."
# Update the build information in the custom field
build = dumps({version: {"windows": url}})
print "Updating the custom field"
redmine.project.update(resource_id=2,
custom_fields=[{"id": 3, "value": build}])
print "URL", url
|
<commit_before><commit_msg>Add a tool to upload a build<commit_after>"""This script upload a newly-build version of CocoMUD for Windows.
The Download wiki page on Redmine are updated.
Requirements:
This script needs 'python-redmine', which you can obtain with
pip install python-redmine
"""
import argparse
from json import dumps
import os
import re
import sys
import urllib2
from redmine import Redmine
from redmine.exceptions import ResourceNotFoundError
# Create an argument parser
parser = argparse.ArgumentParser(
description="upload a new CocoMUD build")
parser.add_argument("key", help="the API key to upload to Redmine")
args = parser.parse_args()
# Configure the system
key = args.key
# Connects to the REST API
redmine = Redmine("https://cocomud.plan.io", key=key)
# Check that the file exists
path = os.path.abspath("../src/build/CocoMUD.zip")
if not os.path.exists(path):
print "The file {} cannot be found.".format(path)
sys.exit(1)
# Then upload this file
print "Retrieving the Download wiki page on 'cocomud-client'..."
page = redmine.wiki_page.get("Download", project_id="cocomud-client")
print "Uploading {}...".format(path)
text = page.text
page.uploads = [{"path": path, "filename": "CocoMUD.zip"}]
page.text = text
print "Saving the page...", page.save()
# Get the new resource URL
url = list(page.attachments)[-1].content_url
# Retrieve the version number
with open("../src/version.py") as file:
content = file.read()
version = content.partition("=")[2].strip()
# Now we get ALL wiki pages with the title 'Download' and replace the URL
for project in redmine.project.all():
identifier = project.identifier
# Try to get the 'Download' page
try:
page = redmine.wiki_page.get("Download", project_id=identifier)
except ResourceNotFoundError:
pass
else:
print "Updating the Download page for the {} project...".format(
identifier)
text = page.text
text = re.sub(r"https\://cocomud\.plan\.io/attachments/" \
r"download/\d+/CocoMUD\.zip", url, text)
text = re.sub(r"\+\*\d+\*\+", "+*" + version + "*+", text)
page.text = text
success = page.save()
if success:
print "Correctly saved the wiki page."
else:
print "Error while saving the wiki page."
# Update the build information in the custom field
build = dumps({version: {"windows": url}})
print "Updating the custom field"
redmine.project.update(resource_id=2,
custom_fields=[{"id": 3, "value": build}])
print "URL", url
|
|
911cb1838dcb974dfa1e28712a1f884c89c2b600
|
migrations/versions/577ad345788e_.py
|
migrations/versions/577ad345788e_.py
|
"""empty message
Revision ID: 577ad345788e
Revises: 3b655c2d1a85
Create Date: 2015-11-17 11:18:22.685983
"""
# revision identifiers, used by Alembic.
revision = '577ad345788e'
down_revision = '3b655c2d1a85'
from alembic import op
import sqlalchemy as sa
def upgrade():
command = """
ALTER TABLE auth_providers
DROP CONSTRAINT auth_providers_user_id_fkey,
ADD CONSTRAINT auth_providers_user_id_fkey
FOREIGN KEY (user_id) REFERENCES users(id)
ON DELETE CASCADE
"""
op.execute(command)
def downgrade():
command = """
ALTER TABLE auth_providers
DROP CONSTRAINT auth_providers_user_id_fkey,
ADD CONSTRAINT auth_providers_user_id_fkey
FOREIGN KEY (user_id) REFERENCES users(id)
"""
op.execute(command)
|
Add cascade for easier deletion of users from database.
|
Add cascade for easier deletion of users from database.
|
Python
|
bsd-3-clause
|
uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal
|
Add cascade for easier deletion of users from database.
|
"""empty message
Revision ID: 577ad345788e
Revises: 3b655c2d1a85
Create Date: 2015-11-17 11:18:22.685983
"""
# revision identifiers, used by Alembic.
revision = '577ad345788e'
down_revision = '3b655c2d1a85'
from alembic import op
import sqlalchemy as sa
def upgrade():
command = """
ALTER TABLE auth_providers
DROP CONSTRAINT auth_providers_user_id_fkey,
ADD CONSTRAINT auth_providers_user_id_fkey
FOREIGN KEY (user_id) REFERENCES users(id)
ON DELETE CASCADE
"""
op.execute(command)
def downgrade():
command = """
ALTER TABLE auth_providers
DROP CONSTRAINT auth_providers_user_id_fkey,
ADD CONSTRAINT auth_providers_user_id_fkey
FOREIGN KEY (user_id) REFERENCES users(id)
"""
op.execute(command)
|
<commit_before><commit_msg>Add cascade for easier deletion of users from database.<commit_after>
|
"""empty message
Revision ID: 577ad345788e
Revises: 3b655c2d1a85
Create Date: 2015-11-17 11:18:22.685983
"""
# revision identifiers, used by Alembic.
revision = '577ad345788e'
down_revision = '3b655c2d1a85'
from alembic import op
import sqlalchemy as sa
def upgrade():
command = """
ALTER TABLE auth_providers
DROP CONSTRAINT auth_providers_user_id_fkey,
ADD CONSTRAINT auth_providers_user_id_fkey
FOREIGN KEY (user_id) REFERENCES users(id)
ON DELETE CASCADE
"""
op.execute(command)
def downgrade():
command = """
ALTER TABLE auth_providers
DROP CONSTRAINT auth_providers_user_id_fkey,
ADD CONSTRAINT auth_providers_user_id_fkey
FOREIGN KEY (user_id) REFERENCES users(id)
"""
op.execute(command)
|
Add cascade for easier deletion of users from database."""empty message
Revision ID: 577ad345788e
Revises: 3b655c2d1a85
Create Date: 2015-11-17 11:18:22.685983
"""
# revision identifiers, used by Alembic.
revision = '577ad345788e'
down_revision = '3b655c2d1a85'
from alembic import op
import sqlalchemy as sa
def upgrade():
command = """
ALTER TABLE auth_providers
DROP CONSTRAINT auth_providers_user_id_fkey,
ADD CONSTRAINT auth_providers_user_id_fkey
FOREIGN KEY (user_id) REFERENCES users(id)
ON DELETE CASCADE
"""
op.execute(command)
def downgrade():
command = """
ALTER TABLE auth_providers
DROP CONSTRAINT auth_providers_user_id_fkey,
ADD CONSTRAINT auth_providers_user_id_fkey
FOREIGN KEY (user_id) REFERENCES users(id)
"""
op.execute(command)
|
<commit_before><commit_msg>Add cascade for easier deletion of users from database.<commit_after>"""empty message
Revision ID: 577ad345788e
Revises: 3b655c2d1a85
Create Date: 2015-11-17 11:18:22.685983
"""
# revision identifiers, used by Alembic.
revision = '577ad345788e'
down_revision = '3b655c2d1a85'
from alembic import op
import sqlalchemy as sa
def upgrade():
command = """
ALTER TABLE auth_providers
DROP CONSTRAINT auth_providers_user_id_fkey,
ADD CONSTRAINT auth_providers_user_id_fkey
FOREIGN KEY (user_id) REFERENCES users(id)
ON DELETE CASCADE
"""
op.execute(command)
def downgrade():
command = """
ALTER TABLE auth_providers
DROP CONSTRAINT auth_providers_user_id_fkey,
ADD CONSTRAINT auth_providers_user_id_fkey
FOREIGN KEY (user_id) REFERENCES users(id)
"""
op.execute(command)
|
|
e8844c62571e85b38287048bac9666972ae674de
|
migrations/versions/888b56ed5dcb_.py
|
migrations/versions/888b56ed5dcb_.py
|
"""Add table for additional user attributes
Revision ID: 888b56ed5dcb
Revises: d5870fd2f2a4
Create Date: 2021-02-10 12:17:40.880224
"""
# revision identifiers, used by Alembic.
revision = '888b56ed5dcb'
down_revision = 'd5870fd2f2a4'
from alembic import op
import sqlalchemy as sa
def upgrade():
try:
op.create_table('userattribute',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Unicode(length=320), nullable=True),
sa.Column('resolver', sa.Unicode(length=120), nullable=True),
sa.Column('realm_id', sa.Integer(), nullable=True),
sa.Column('Key', sa.Unicode(length=255), nullable=False),
sa.Column('Value', sa.UnicodeText(), nullable=True),
sa.Column('Type', sa.Unicode(length=100), nullable=True),
sa.ForeignKeyConstraint(['realm_id'], ['realm.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_userattribute_resolver'), 'userattribute', ['resolver'], unique=False)
op.create_index(op.f('ix_userattribute_user_id'), 'userattribute', ['user_id'], unique=False)
except Exception as exx:
print("Could not add table 'userattribute'.")
print(exx)
def downgrade():
op.drop_index(op.f('ix_userattribute_user_id'), table_name='userattribute')
op.drop_index(op.f('ix_userattribute_resolver'), table_name='userattribute')
op.drop_table('userattribute')
|
Add DB migration script for attributes table
|
Add DB migration script for attributes table
|
Python
|
agpl-3.0
|
privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea
|
Add DB migration script for attributes table
|
"""Add table for additional user attributes
Revision ID: 888b56ed5dcb
Revises: d5870fd2f2a4
Create Date: 2021-02-10 12:17:40.880224
"""
# revision identifiers, used by Alembic.
revision = '888b56ed5dcb'
down_revision = 'd5870fd2f2a4'
from alembic import op
import sqlalchemy as sa
def upgrade():
try:
op.create_table('userattribute',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Unicode(length=320), nullable=True),
sa.Column('resolver', sa.Unicode(length=120), nullable=True),
sa.Column('realm_id', sa.Integer(), nullable=True),
sa.Column('Key', sa.Unicode(length=255), nullable=False),
sa.Column('Value', sa.UnicodeText(), nullable=True),
sa.Column('Type', sa.Unicode(length=100), nullable=True),
sa.ForeignKeyConstraint(['realm_id'], ['realm.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_userattribute_resolver'), 'userattribute', ['resolver'], unique=False)
op.create_index(op.f('ix_userattribute_user_id'), 'userattribute', ['user_id'], unique=False)
except Exception as exx:
print("Could not add table 'userattribute'.")
print(exx)
def downgrade():
op.drop_index(op.f('ix_userattribute_user_id'), table_name='userattribute')
op.drop_index(op.f('ix_userattribute_resolver'), table_name='userattribute')
op.drop_table('userattribute')
|
<commit_before><commit_msg>Add DB migration script for attributes table<commit_after>
|
"""Add table for additional user attributes
Revision ID: 888b56ed5dcb
Revises: d5870fd2f2a4
Create Date: 2021-02-10 12:17:40.880224
"""
# revision identifiers, used by Alembic.
revision = '888b56ed5dcb'
down_revision = 'd5870fd2f2a4'
from alembic import op
import sqlalchemy as sa
def upgrade():
try:
op.create_table('userattribute',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Unicode(length=320), nullable=True),
sa.Column('resolver', sa.Unicode(length=120), nullable=True),
sa.Column('realm_id', sa.Integer(), nullable=True),
sa.Column('Key', sa.Unicode(length=255), nullable=False),
sa.Column('Value', sa.UnicodeText(), nullable=True),
sa.Column('Type', sa.Unicode(length=100), nullable=True),
sa.ForeignKeyConstraint(['realm_id'], ['realm.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_userattribute_resolver'), 'userattribute', ['resolver'], unique=False)
op.create_index(op.f('ix_userattribute_user_id'), 'userattribute', ['user_id'], unique=False)
except Exception as exx:
print("Could not add table 'userattribute'.")
print(exx)
def downgrade():
op.drop_index(op.f('ix_userattribute_user_id'), table_name='userattribute')
op.drop_index(op.f('ix_userattribute_resolver'), table_name='userattribute')
op.drop_table('userattribute')
|
Add DB migration script for attributes table"""Add table for additional user attributes
Revision ID: 888b56ed5dcb
Revises: d5870fd2f2a4
Create Date: 2021-02-10 12:17:40.880224
"""
# revision identifiers, used by Alembic.
revision = '888b56ed5dcb'
down_revision = 'd5870fd2f2a4'
from alembic import op
import sqlalchemy as sa
def upgrade():
try:
op.create_table('userattribute',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Unicode(length=320), nullable=True),
sa.Column('resolver', sa.Unicode(length=120), nullable=True),
sa.Column('realm_id', sa.Integer(), nullable=True),
sa.Column('Key', sa.Unicode(length=255), nullable=False),
sa.Column('Value', sa.UnicodeText(), nullable=True),
sa.Column('Type', sa.Unicode(length=100), nullable=True),
sa.ForeignKeyConstraint(['realm_id'], ['realm.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_userattribute_resolver'), 'userattribute', ['resolver'], unique=False)
op.create_index(op.f('ix_userattribute_user_id'), 'userattribute', ['user_id'], unique=False)
except Exception as exx:
print("Could not add table 'userattribute'.")
print(exx)
def downgrade():
op.drop_index(op.f('ix_userattribute_user_id'), table_name='userattribute')
op.drop_index(op.f('ix_userattribute_resolver'), table_name='userattribute')
op.drop_table('userattribute')
|
<commit_before><commit_msg>Add DB migration script for attributes table<commit_after>"""Add table for additional user attributes
Revision ID: 888b56ed5dcb
Revises: d5870fd2f2a4
Create Date: 2021-02-10 12:17:40.880224
"""
# revision identifiers, used by Alembic.
revision = '888b56ed5dcb'
down_revision = 'd5870fd2f2a4'
from alembic import op
import sqlalchemy as sa
def upgrade():
try:
op.create_table('userattribute',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Unicode(length=320), nullable=True),
sa.Column('resolver', sa.Unicode(length=120), nullable=True),
sa.Column('realm_id', sa.Integer(), nullable=True),
sa.Column('Key', sa.Unicode(length=255), nullable=False),
sa.Column('Value', sa.UnicodeText(), nullable=True),
sa.Column('Type', sa.Unicode(length=100), nullable=True),
sa.ForeignKeyConstraint(['realm_id'], ['realm.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_userattribute_resolver'), 'userattribute', ['resolver'], unique=False)
op.create_index(op.f('ix_userattribute_user_id'), 'userattribute', ['user_id'], unique=False)
except Exception as exx:
print("Could not add table 'userattribute'.")
print(exx)
def downgrade():
op.drop_index(op.f('ix_userattribute_user_id'), table_name='userattribute')
op.drop_index(op.f('ix_userattribute_resolver'), table_name='userattribute')
op.drop_table('userattribute')
|
|
067c256fb9df4170c129b2bc36d1b1323fad6d25
|
generate_PSC.py
|
generate_PSC.py
|
from datetime import datetime as dt
from datetime import timedelta as tdelta
import numpy as np
from astropy.io import fits
import astropy.units as u
from astropy.coordinates import SkyCoord
sky_background = 1000.
sky_sigma = 5.
nx = 12
ny = 16
nt = 42
data_cube = np.random.normal(sky_background, sky_sigma, (nt,ny,nx))
obstime = dt.utcnow()
unit = 'PAN000' #I've given this the ID of PAN000 just in case it gets confused for data from a real unit.
camera = '0x2A'
target_name = 'faketarg'
seq_id = '{}{}_{}'.format(unit, camera, obstime.strftime('%Y%m%d_%H%M%SUT'))
xpixorg = 1042
ypixorg = 2042
exptime = 100. # seconds
c = SkyCoord.from_name('HR8799', frame='fk5')
hdu = fits.PrimaryHDU(data_cube)
metadata = {'SEQID': seq_id,\
'FIELD': target_name,\
'RA': c.ra.to(u.degree).value,\
'DEC': c.dec.to(u.degree).value,\
'EQUINOX': c.equinox.value,\
'OBSTIME': obstime.isoformat(),\
'XPIXORG': xpixorg,\
'YPIXORG': ypixorg,\
}
for t in range(nt):
# slightly randomize time gap between images
gap = tdelta(0,exptime + np.random.normal(5,1))
obstime = obstime + t*gap
metadata['TIME{:04d}'.format(t)] = obstime.isoformat()
hdu.header.extend(metadata)
hdu.writeto('PSC_0001.fits', clobber=True)
|
Add quick and dirty script to generate a fake postage stamp cue (PSC).
|
Add quick and dirty script to generate a fake postage stamp cue (PSC).
|
Python
|
mit
|
panoptes/PIAA
|
Add quick and dirty script to generate a fake postage stamp cue (PSC).
|
from datetime import datetime as dt
from datetime import timedelta as tdelta
import numpy as np
from astropy.io import fits
import astropy.units as u
from astropy.coordinates import SkyCoord
sky_background = 1000.
sky_sigma = 5.
nx = 12
ny = 16
nt = 42
data_cube = np.random.normal(sky_background, sky_sigma, (nt,ny,nx))
obstime = dt.utcnow()
unit = 'PAN000' #I've given this the ID of PAN000 just in case it gets confused for data from a real unit.
camera = '0x2A'
target_name = 'faketarg'
seq_id = '{}{}_{}'.format(unit, camera, obstime.strftime('%Y%m%d_%H%M%SUT'))
xpixorg = 1042
ypixorg = 2042
exptime = 100. # seconds
c = SkyCoord.from_name('HR8799', frame='fk5')
hdu = fits.PrimaryHDU(data_cube)
metadata = {'SEQID': seq_id,\
'FIELD': target_name,\
'RA': c.ra.to(u.degree).value,\
'DEC': c.dec.to(u.degree).value,\
'EQUINOX': c.equinox.value,\
'OBSTIME': obstime.isoformat(),\
'XPIXORG': xpixorg,\
'YPIXORG': ypixorg,\
}
for t in range(nt):
# slightly randomize time gap between images
gap = tdelta(0,exptime + np.random.normal(5,1))
obstime = obstime + t*gap
metadata['TIME{:04d}'.format(t)] = obstime.isoformat()
hdu.header.extend(metadata)
hdu.writeto('PSC_0001.fits', clobber=True)
|
<commit_before><commit_msg>Add quick and dirty script to generate a fake postage stamp cue (PSC).<commit_after>
|
from datetime import datetime as dt
from datetime import timedelta as tdelta
import numpy as np
from astropy.io import fits
import astropy.units as u
from astropy.coordinates import SkyCoord
sky_background = 1000.
sky_sigma = 5.
nx = 12
ny = 16
nt = 42
data_cube = np.random.normal(sky_background, sky_sigma, (nt,ny,nx))
obstime = dt.utcnow()
unit = 'PAN000' #I've given this the ID of PAN000 just in case it gets confused for data from a real unit.
camera = '0x2A'
target_name = 'faketarg'
seq_id = '{}{}_{}'.format(unit, camera, obstime.strftime('%Y%m%d_%H%M%SUT'))
xpixorg = 1042
ypixorg = 2042
exptime = 100. # seconds
c = SkyCoord.from_name('HR8799', frame='fk5')
hdu = fits.PrimaryHDU(data_cube)
metadata = {'SEQID': seq_id,\
'FIELD': target_name,\
'RA': c.ra.to(u.degree).value,\
'DEC': c.dec.to(u.degree).value,\
'EQUINOX': c.equinox.value,\
'OBSTIME': obstime.isoformat(),\
'XPIXORG': xpixorg,\
'YPIXORG': ypixorg,\
}
for t in range(nt):
# slightly randomize time gap between images
gap = tdelta(0,exptime + np.random.normal(5,1))
obstime = obstime + t*gap
metadata['TIME{:04d}'.format(t)] = obstime.isoformat()
hdu.header.extend(metadata)
hdu.writeto('PSC_0001.fits', clobber=True)
|
Add quick and dirty script to generate a fake postage stamp cue (PSC).from datetime import datetime as dt
from datetime import timedelta as tdelta
import numpy as np
from astropy.io import fits
import astropy.units as u
from astropy.coordinates import SkyCoord
sky_background = 1000.
sky_sigma = 5.
nx = 12
ny = 16
nt = 42
data_cube = np.random.normal(sky_background, sky_sigma, (nt,ny,nx))
obstime = dt.utcnow()
unit = 'PAN000' #I've given this the ID of PAN000 just in case it gets confused for data from a real unit.
camera = '0x2A'
target_name = 'faketarg'
seq_id = '{}{}_{}'.format(unit, camera, obstime.strftime('%Y%m%d_%H%M%SUT'))
xpixorg = 1042
ypixorg = 2042
exptime = 100. # seconds
c = SkyCoord.from_name('HR8799', frame='fk5')
hdu = fits.PrimaryHDU(data_cube)
metadata = {'SEQID': seq_id,\
'FIELD': target_name,\
'RA': c.ra.to(u.degree).value,\
'DEC': c.dec.to(u.degree).value,\
'EQUINOX': c.equinox.value,\
'OBSTIME': obstime.isoformat(),\
'XPIXORG': xpixorg,\
'YPIXORG': ypixorg,\
}
for t in range(nt):
# slightly randomize time gap between images
gap = tdelta(0,exptime + np.random.normal(5,1))
obstime = obstime + t*gap
metadata['TIME{:04d}'.format(t)] = obstime.isoformat()
hdu.header.extend(metadata)
hdu.writeto('PSC_0001.fits', clobber=True)
|
<commit_before><commit_msg>Add quick and dirty script to generate a fake postage stamp cue (PSC).<commit_after>from datetime import datetime as dt
from datetime import timedelta as tdelta
import numpy as np
from astropy.io import fits
import astropy.units as u
from astropy.coordinates import SkyCoord
sky_background = 1000.
sky_sigma = 5.
nx = 12
ny = 16
nt = 42
data_cube = np.random.normal(sky_background, sky_sigma, (nt,ny,nx))
obstime = dt.utcnow()
unit = 'PAN000' #I've given this the ID of PAN000 just in case it gets confused for data from a real unit.
camera = '0x2A'
target_name = 'faketarg'
seq_id = '{}{}_{}'.format(unit, camera, obstime.strftime('%Y%m%d_%H%M%SUT'))
xpixorg = 1042
ypixorg = 2042
exptime = 100. # seconds
c = SkyCoord.from_name('HR8799', frame='fk5')
hdu = fits.PrimaryHDU(data_cube)
metadata = {'SEQID': seq_id,\
'FIELD': target_name,\
'RA': c.ra.to(u.degree).value,\
'DEC': c.dec.to(u.degree).value,\
'EQUINOX': c.equinox.value,\
'OBSTIME': obstime.isoformat(),\
'XPIXORG': xpixorg,\
'YPIXORG': ypixorg,\
}
for t in range(nt):
# slightly randomize time gap between images
gap = tdelta(0,exptime + np.random.normal(5,1))
obstime = obstime + t*gap
metadata['TIME{:04d}'.format(t)] = obstime.isoformat()
hdu.header.extend(metadata)
hdu.writeto('PSC_0001.fits', clobber=True)
|
|
a4aea5e34f44d5f9183258978c85e20fb0fbc0b7
|
SMSFlyCRM/SMSApp/management/commands/sms_campaign_scheduler.py
|
SMSFlyCRM/SMSApp/management/commands/sms_campaign_scheduler.py
|
from datetime import datetime
from django.core.management.base import BaseCommand
from SMSFlyCRM.SMSApp.tasks import scheduleRecurringCampaignTasksFor
class Command(BaseCommand):
help = 'Schedules campaign sending for specified interval'
def add_arguments(self, parser):
parser.add_argument('min_interval', type=int)
def handle(self, *args, **options):
min_interval = options['min_interval']
scheduleRecurringCampaignTasksFor.delay(min_interval)
self.stdout.write(self.style.SUCCESS(
'Campaign scheduler has been executed at {}'.format(datetime.now())))
|
Add django command for triggering campaigns scheduler
|
Add django command for triggering campaigns scheduler
|
Python
|
mit
|
wk-tech/crm-smsfly,wk-tech/crm-smsfly,wk-tech/crm-smsfly
|
Add django command for triggering campaigns scheduler
|
from datetime import datetime
from django.core.management.base import BaseCommand
from SMSFlyCRM.SMSApp.tasks import scheduleRecurringCampaignTasksFor
class Command(BaseCommand):
help = 'Schedules campaign sending for specified interval'
def add_arguments(self, parser):
parser.add_argument('min_interval', type=int)
def handle(self, *args, **options):
min_interval = options['min_interval']
scheduleRecurringCampaignTasksFor.delay(min_interval)
self.stdout.write(self.style.SUCCESS(
'Campaign scheduler has been executed at {}'.format(datetime.now())))
|
<commit_before><commit_msg>Add django command for triggering campaigns scheduler<commit_after>
|
from datetime import datetime
from django.core.management.base import BaseCommand
from SMSFlyCRM.SMSApp.tasks import scheduleRecurringCampaignTasksFor
class Command(BaseCommand):
help = 'Schedules campaign sending for specified interval'
def add_arguments(self, parser):
parser.add_argument('min_interval', type=int)
def handle(self, *args, **options):
min_interval = options['min_interval']
scheduleRecurringCampaignTasksFor.delay(min_interval)
self.stdout.write(self.style.SUCCESS(
'Campaign scheduler has been executed at {}'.format(datetime.now())))
|
Add django command for triggering campaigns schedulerfrom datetime import datetime
from django.core.management.base import BaseCommand
from SMSFlyCRM.SMSApp.tasks import scheduleRecurringCampaignTasksFor
class Command(BaseCommand):
help = 'Schedules campaign sending for specified interval'
def add_arguments(self, parser):
parser.add_argument('min_interval', type=int)
def handle(self, *args, **options):
min_interval = options['min_interval']
scheduleRecurringCampaignTasksFor.delay(min_interval)
self.stdout.write(self.style.SUCCESS(
'Campaign scheduler has been executed at {}'.format(datetime.now())))
|
<commit_before><commit_msg>Add django command for triggering campaigns scheduler<commit_after>from datetime import datetime
from django.core.management.base import BaseCommand
from SMSFlyCRM.SMSApp.tasks import scheduleRecurringCampaignTasksFor
class Command(BaseCommand):
help = 'Schedules campaign sending for specified interval'
def add_arguments(self, parser):
parser.add_argument('min_interval', type=int)
def handle(self, *args, **options):
min_interval = options['min_interval']
scheduleRecurringCampaignTasksFor.delay(min_interval)
self.stdout.write(self.style.SUCCESS(
'Campaign scheduler has been executed at {}'.format(datetime.now())))
|
|
9515bf68e15099582946f9765b2fa081d2570701
|
course_discovery/apps/course_metadata/migrations/0220_leveltype_remove_order.py
|
course_discovery/apps/course_metadata/migrations/0220_leveltype_remove_order.py
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0219_leveltype_ordering'),
]
operations = [
migrations.RemoveField(
model_name='leveltype',
name='order',
),
]
|
Rename order field in LevelType (3/3)
|
Rename order field in LevelType (3/3)
Stage 3: ONLY remove the old column via a migration.
DE-1829
|
Python
|
agpl-3.0
|
edx/course-discovery,edx/course-discovery,edx/course-discovery,edx/course-discovery
|
Rename order field in LevelType (3/3)
Stage 3: ONLY remove the old column via a migration.
DE-1829
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0219_leveltype_ordering'),
]
operations = [
migrations.RemoveField(
model_name='leveltype',
name='order',
),
]
|
<commit_before><commit_msg>Rename order field in LevelType (3/3)
Stage 3: ONLY remove the old column via a migration.
DE-1829<commit_after>
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0219_leveltype_ordering'),
]
operations = [
migrations.RemoveField(
model_name='leveltype',
name='order',
),
]
|
Rename order field in LevelType (3/3)
Stage 3: ONLY remove the old column via a migration.
DE-1829from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0219_leveltype_ordering'),
]
operations = [
migrations.RemoveField(
model_name='leveltype',
name='order',
),
]
|
<commit_before><commit_msg>Rename order field in LevelType (3/3)
Stage 3: ONLY remove the old column via a migration.
DE-1829<commit_after>from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0219_leveltype_ordering'),
]
operations = [
migrations.RemoveField(
model_name='leveltype',
name='order',
),
]
|
|
302e267c0f1374a11a661418c49aa63f25a38bf9
|
corehq/apps/export/migrations/0006_delete_dailysavedexportnotification.py
|
corehq/apps/export/migrations/0006_delete_dailysavedexportnotification.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-10-09 19:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('export', '0005_datafile_blobmeta'),
]
operations = [
migrations.DeleteModel(
name='DailySavedExportNotification',
),
]
|
Revert "Hold off on migration"
|
Revert "Hold off on migration"
This reverts commit d0cd939e90917bc06b71fb6a6a5aedd881f24840.
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Revert "Hold off on migration"
This reverts commit d0cd939e90917bc06b71fb6a6a5aedd881f24840.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-10-09 19:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('export', '0005_datafile_blobmeta'),
]
operations = [
migrations.DeleteModel(
name='DailySavedExportNotification',
),
]
|
<commit_before><commit_msg>Revert "Hold off on migration"
This reverts commit d0cd939e90917bc06b71fb6a6a5aedd881f24840.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-10-09 19:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('export', '0005_datafile_blobmeta'),
]
operations = [
migrations.DeleteModel(
name='DailySavedExportNotification',
),
]
|
Revert "Hold off on migration"
This reverts commit d0cd939e90917bc06b71fb6a6a5aedd881f24840.# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-10-09 19:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('export', '0005_datafile_blobmeta'),
]
operations = [
migrations.DeleteModel(
name='DailySavedExportNotification',
),
]
|
<commit_before><commit_msg>Revert "Hold off on migration"
This reverts commit d0cd939e90917bc06b71fb6a6a5aedd881f24840.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-10-09 19:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('export', '0005_datafile_blobmeta'),
]
operations = [
migrations.DeleteModel(
name='DailySavedExportNotification',
),
]
|
|
5e19645b4917bf2f0bc9c987ef6148a5833ff05c
|
aioriak/tests/test_datatypes.py
|
aioriak/tests/test_datatypes.py
|
import unittest
from aioriak.bucket import Bucket, BucketType
from aioriak import datatypes
from aioriak.tests.base import IntegrationTest, AsyncUnitTestCase
class DatatypeUnitTestBase:
dtype = None
bucket = Bucket(None, 'test', BucketType(None, 'datatypes'))
def op(self, dtype):
raise NotImplementedError
def check_op_output(self, op):
raise NotImplementedError
def test_new_type_is_clean(self):
newtype = self.dtype(self.bucket, 'key')
self.assertIsNone(newtype.to_op())
def test_modified_type_has_op(self):
newtype = self.dtype(self.bucket, 'key')
self.op(newtype)
self.assertIsNotNone(newtype.to_op())
def test_protected_attrs_not_settable(self):
newtype = self.dtype(self.bucket, 'key')
for i in ('value', 'context'):
with self.assertRaises(AttributeError):
setattr(newtype, i, 'foo')
def test_modified_type_has_unmodified_value(self):
newtype = self.dtype(self.bucket, 'key')
oldvalue = newtype.value
self.op(newtype)
self.assertEqual(oldvalue, newtype.value)
def test_op_output(self):
newtype = self.dtype(self.bucket, 'key')
self.op(newtype)
op = newtype.to_op()
self.check_op_output(op)
class CounterUnitTests(DatatypeUnitTestBase, unittest.TestCase):
dtype = datatypes.Counter
def op(self, dtype):
dtype.increment(5)
def check_op_output(self, op):
self.assertEqual(('increment', 5), op)
class DatatypeIntegrationTests(IntegrationTest,
AsyncUnitTestCase):
def test_dt_counter(self):
async def go():
btype = self.client.bucket_type('pytest-counters')
bucket = btype.bucket(self.bucket_name)
mycount = datatypes.Counter(bucket, self.key_name)
mycount.increment(5)
await mycount.store()
othercount = await bucket.get(self.key_name)
self.assertEqual(5, othercount.value)
othercount.decrement(3)
await othercount.store()
await mycount.reload()
self.assertEqual(2, mycount.value)
self.loop.run_until_complete(go())
|
Add Riak counter datatype tests
|
Add Riak counter datatype tests
|
Python
|
mit
|
rambler-digital-solutions/aioriak
|
Add Riak counter datatype tests
|
import unittest
from aioriak.bucket import Bucket, BucketType
from aioriak import datatypes
from aioriak.tests.base import IntegrationTest, AsyncUnitTestCase
class DatatypeUnitTestBase:
dtype = None
bucket = Bucket(None, 'test', BucketType(None, 'datatypes'))
def op(self, dtype):
raise NotImplementedError
def check_op_output(self, op):
raise NotImplementedError
def test_new_type_is_clean(self):
newtype = self.dtype(self.bucket, 'key')
self.assertIsNone(newtype.to_op())
def test_modified_type_has_op(self):
newtype = self.dtype(self.bucket, 'key')
self.op(newtype)
self.assertIsNotNone(newtype.to_op())
def test_protected_attrs_not_settable(self):
newtype = self.dtype(self.bucket, 'key')
for i in ('value', 'context'):
with self.assertRaises(AttributeError):
setattr(newtype, i, 'foo')
def test_modified_type_has_unmodified_value(self):
newtype = self.dtype(self.bucket, 'key')
oldvalue = newtype.value
self.op(newtype)
self.assertEqual(oldvalue, newtype.value)
def test_op_output(self):
newtype = self.dtype(self.bucket, 'key')
self.op(newtype)
op = newtype.to_op()
self.check_op_output(op)
class CounterUnitTests(DatatypeUnitTestBase, unittest.TestCase):
dtype = datatypes.Counter
def op(self, dtype):
dtype.increment(5)
def check_op_output(self, op):
self.assertEqual(('increment', 5), op)
class DatatypeIntegrationTests(IntegrationTest,
AsyncUnitTestCase):
def test_dt_counter(self):
async def go():
btype = self.client.bucket_type('pytest-counters')
bucket = btype.bucket(self.bucket_name)
mycount = datatypes.Counter(bucket, self.key_name)
mycount.increment(5)
await mycount.store()
othercount = await bucket.get(self.key_name)
self.assertEqual(5, othercount.value)
othercount.decrement(3)
await othercount.store()
await mycount.reload()
self.assertEqual(2, mycount.value)
self.loop.run_until_complete(go())
|
<commit_before><commit_msg>Add Riak counter datatype tests<commit_after>
|
import unittest
from aioriak.bucket import Bucket, BucketType
from aioriak import datatypes
from aioriak.tests.base import IntegrationTest, AsyncUnitTestCase
class DatatypeUnitTestBase:
dtype = None
bucket = Bucket(None, 'test', BucketType(None, 'datatypes'))
def op(self, dtype):
raise NotImplementedError
def check_op_output(self, op):
raise NotImplementedError
def test_new_type_is_clean(self):
newtype = self.dtype(self.bucket, 'key')
self.assertIsNone(newtype.to_op())
def test_modified_type_has_op(self):
newtype = self.dtype(self.bucket, 'key')
self.op(newtype)
self.assertIsNotNone(newtype.to_op())
def test_protected_attrs_not_settable(self):
newtype = self.dtype(self.bucket, 'key')
for i in ('value', 'context'):
with self.assertRaises(AttributeError):
setattr(newtype, i, 'foo')
def test_modified_type_has_unmodified_value(self):
newtype = self.dtype(self.bucket, 'key')
oldvalue = newtype.value
self.op(newtype)
self.assertEqual(oldvalue, newtype.value)
def test_op_output(self):
newtype = self.dtype(self.bucket, 'key')
self.op(newtype)
op = newtype.to_op()
self.check_op_output(op)
class CounterUnitTests(DatatypeUnitTestBase, unittest.TestCase):
dtype = datatypes.Counter
def op(self, dtype):
dtype.increment(5)
def check_op_output(self, op):
self.assertEqual(('increment', 5), op)
class DatatypeIntegrationTests(IntegrationTest,
AsyncUnitTestCase):
def test_dt_counter(self):
async def go():
btype = self.client.bucket_type('pytest-counters')
bucket = btype.bucket(self.bucket_name)
mycount = datatypes.Counter(bucket, self.key_name)
mycount.increment(5)
await mycount.store()
othercount = await bucket.get(self.key_name)
self.assertEqual(5, othercount.value)
othercount.decrement(3)
await othercount.store()
await mycount.reload()
self.assertEqual(2, mycount.value)
self.loop.run_until_complete(go())
|
Add Riak counter datatype testsimport unittest
from aioriak.bucket import Bucket, BucketType
from aioriak import datatypes
from aioriak.tests.base import IntegrationTest, AsyncUnitTestCase
class DatatypeUnitTestBase:
dtype = None
bucket = Bucket(None, 'test', BucketType(None, 'datatypes'))
def op(self, dtype):
raise NotImplementedError
def check_op_output(self, op):
raise NotImplementedError
def test_new_type_is_clean(self):
newtype = self.dtype(self.bucket, 'key')
self.assertIsNone(newtype.to_op())
def test_modified_type_has_op(self):
newtype = self.dtype(self.bucket, 'key')
self.op(newtype)
self.assertIsNotNone(newtype.to_op())
def test_protected_attrs_not_settable(self):
newtype = self.dtype(self.bucket, 'key')
for i in ('value', 'context'):
with self.assertRaises(AttributeError):
setattr(newtype, i, 'foo')
def test_modified_type_has_unmodified_value(self):
newtype = self.dtype(self.bucket, 'key')
oldvalue = newtype.value
self.op(newtype)
self.assertEqual(oldvalue, newtype.value)
def test_op_output(self):
newtype = self.dtype(self.bucket, 'key')
self.op(newtype)
op = newtype.to_op()
self.check_op_output(op)
class CounterUnitTests(DatatypeUnitTestBase, unittest.TestCase):
dtype = datatypes.Counter
def op(self, dtype):
dtype.increment(5)
def check_op_output(self, op):
self.assertEqual(('increment', 5), op)
class DatatypeIntegrationTests(IntegrationTest,
AsyncUnitTestCase):
def test_dt_counter(self):
async def go():
btype = self.client.bucket_type('pytest-counters')
bucket = btype.bucket(self.bucket_name)
mycount = datatypes.Counter(bucket, self.key_name)
mycount.increment(5)
await mycount.store()
othercount = await bucket.get(self.key_name)
self.assertEqual(5, othercount.value)
othercount.decrement(3)
await othercount.store()
await mycount.reload()
self.assertEqual(2, mycount.value)
self.loop.run_until_complete(go())
|
<commit_before><commit_msg>Add Riak counter datatype tests<commit_after>import unittest
from aioriak.bucket import Bucket, BucketType
from aioriak import datatypes
from aioriak.tests.base import IntegrationTest, AsyncUnitTestCase
class DatatypeUnitTestBase:
dtype = None
bucket = Bucket(None, 'test', BucketType(None, 'datatypes'))
def op(self, dtype):
raise NotImplementedError
def check_op_output(self, op):
raise NotImplementedError
def test_new_type_is_clean(self):
newtype = self.dtype(self.bucket, 'key')
self.assertIsNone(newtype.to_op())
def test_modified_type_has_op(self):
newtype = self.dtype(self.bucket, 'key')
self.op(newtype)
self.assertIsNotNone(newtype.to_op())
def test_protected_attrs_not_settable(self):
newtype = self.dtype(self.bucket, 'key')
for i in ('value', 'context'):
with self.assertRaises(AttributeError):
setattr(newtype, i, 'foo')
def test_modified_type_has_unmodified_value(self):
newtype = self.dtype(self.bucket, 'key')
oldvalue = newtype.value
self.op(newtype)
self.assertEqual(oldvalue, newtype.value)
def test_op_output(self):
newtype = self.dtype(self.bucket, 'key')
self.op(newtype)
op = newtype.to_op()
self.check_op_output(op)
class CounterUnitTests(DatatypeUnitTestBase, unittest.TestCase):
dtype = datatypes.Counter
def op(self, dtype):
dtype.increment(5)
def check_op_output(self, op):
self.assertEqual(('increment', 5), op)
class DatatypeIntegrationTests(IntegrationTest,
AsyncUnitTestCase):
def test_dt_counter(self):
async def go():
btype = self.client.bucket_type('pytest-counters')
bucket = btype.bucket(self.bucket_name)
mycount = datatypes.Counter(bucket, self.key_name)
mycount.increment(5)
await mycount.store()
othercount = await bucket.get(self.key_name)
self.assertEqual(5, othercount.value)
othercount.decrement(3)
await othercount.store()
await mycount.reload()
self.assertEqual(2, mycount.value)
self.loop.run_until_complete(go())
|
|
342da37f14c6ac991bd3f7c904bd5b7f1196493a
|
src/sentry/digests/backends/dummy.py
|
src/sentry/digests/backends/dummy.py
|
from __future__ import absolute_import
from contextlib import contextmanager
from sentry.digests.backends.base import Backend
class DummyBackend(Backend):
def add(self, key, record):
pass
@contextmanager
def digest(self, key):
yield []
def schedule(self, deadline):
return
yield # make this a generator
def maintenance(self, deadline):
pass
|
from __future__ import absolute_import
from contextlib import contextmanager
from sentry.digests.backends.base import Backend
class DummyBackend(Backend):
def add(self, key, record, increment_delay=None, maximum_delay=None):
pass
@contextmanager
def digest(self, key, minimum_delay=None):
yield []
def schedule(self, deadline):
return
yield # make this a generator
def maintenance(self, deadline):
pass
|
Fix digests `DummyBackend` method signatures.
|
Fix digests `DummyBackend` method signatures.
This makes them consistent with the base backend API.
RIP @tkaemming, stung by dynamic typing.
|
Python
|
bsd-3-clause
|
looker/sentry,mvaled/sentry,JamesMura/sentry,mvaled/sentry,zenefits/sentry,zenefits/sentry,fotinakis/sentry,alexm92/sentry,JamesMura/sentry,BuildingLink/sentry,mvaled/sentry,jean/sentry,JamesMura/sentry,BuildingLink/sentry,JackDanger/sentry,gencer/sentry,fotinakis/sentry,gencer/sentry,daevaorn/sentry,JackDanger/sentry,nicholasserra/sentry,zenefits/sentry,beeftornado/sentry,beeftornado/sentry,BuildingLink/sentry,ifduyue/sentry,daevaorn/sentry,BuildingLink/sentry,looker/sentry,gencer/sentry,JackDanger/sentry,jean/sentry,ifduyue/sentry,nicholasserra/sentry,looker/sentry,looker/sentry,JamesMura/sentry,alexm92/sentry,daevaorn/sentry,gencer/sentry,jean/sentry,nicholasserra/sentry,mitsuhiko/sentry,gencer/sentry,jean/sentry,daevaorn/sentry,fotinakis/sentry,JamesMura/sentry,zenefits/sentry,ifduyue/sentry,ifduyue/sentry,mitsuhiko/sentry,alexm92/sentry,mvaled/sentry,mvaled/sentry,zenefits/sentry,fotinakis/sentry,BuildingLink/sentry,looker/sentry,mvaled/sentry,beeftornado/sentry,jean/sentry,ifduyue/sentry
|
from __future__ import absolute_import
from contextlib import contextmanager
from sentry.digests.backends.base import Backend
class DummyBackend(Backend):
def add(self, key, record):
pass
@contextmanager
def digest(self, key):
yield []
def schedule(self, deadline):
return
yield # make this a generator
def maintenance(self, deadline):
pass
Fix digests `DummyBackend` method signatures.
This makes them consistent with the base backend API.
RIP @tkaemming, stung by dynamic typing.
|
from __future__ import absolute_import
from contextlib import contextmanager
from sentry.digests.backends.base import Backend
class DummyBackend(Backend):
def add(self, key, record, increment_delay=None, maximum_delay=None):
pass
@contextmanager
def digest(self, key, minimum_delay=None):
yield []
def schedule(self, deadline):
return
yield # make this a generator
def maintenance(self, deadline):
pass
|
<commit_before>from __future__ import absolute_import
from contextlib import contextmanager
from sentry.digests.backends.base import Backend
class DummyBackend(Backend):
def add(self, key, record):
pass
@contextmanager
def digest(self, key):
yield []
def schedule(self, deadline):
return
yield # make this a generator
def maintenance(self, deadline):
pass
<commit_msg>Fix digests `DummyBackend` method signatures.
This makes them consistent with the base backend API.
RIP @tkaemming, stung by dynamic typing.<commit_after>
|
from __future__ import absolute_import
from contextlib import contextmanager
from sentry.digests.backends.base import Backend
class DummyBackend(Backend):
def add(self, key, record, increment_delay=None, maximum_delay=None):
pass
@contextmanager
def digest(self, key, minimum_delay=None):
yield []
def schedule(self, deadline):
return
yield # make this a generator
def maintenance(self, deadline):
pass
|
from __future__ import absolute_import
from contextlib import contextmanager
from sentry.digests.backends.base import Backend
class DummyBackend(Backend):
def add(self, key, record):
pass
@contextmanager
def digest(self, key):
yield []
def schedule(self, deadline):
return
yield # make this a generator
def maintenance(self, deadline):
pass
Fix digests `DummyBackend` method signatures.
This makes them consistent with the base backend API.
RIP @tkaemming, stung by dynamic typing.from __future__ import absolute_import
from contextlib import contextmanager
from sentry.digests.backends.base import Backend
class DummyBackend(Backend):
def add(self, key, record, increment_delay=None, maximum_delay=None):
pass
@contextmanager
def digest(self, key, minimum_delay=None):
yield []
def schedule(self, deadline):
return
yield # make this a generator
def maintenance(self, deadline):
pass
|
<commit_before>from __future__ import absolute_import
from contextlib import contextmanager
from sentry.digests.backends.base import Backend
class DummyBackend(Backend):
def add(self, key, record):
pass
@contextmanager
def digest(self, key):
yield []
def schedule(self, deadline):
return
yield # make this a generator
def maintenance(self, deadline):
pass
<commit_msg>Fix digests `DummyBackend` method signatures.
This makes them consistent with the base backend API.
RIP @tkaemming, stung by dynamic typing.<commit_after>from __future__ import absolute_import
from contextlib import contextmanager
from sentry.digests.backends.base import Backend
class DummyBackend(Backend):
def add(self, key, record, increment_delay=None, maximum_delay=None):
pass
@contextmanager
def digest(self, key, minimum_delay=None):
yield []
def schedule(self, deadline):
return
yield # make this a generator
def maintenance(self, deadline):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.