commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d4ed2619bb7f1d49df7a6add98309de5f2201a8d
|
tests/destination_finder_test.py
|
tests/destination_finder_test.py
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicDestinationFinder(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
prices = self.sds.destination_finder('SFO',
length_of_stay=[1,2],
cost_per_mile=0.20,
point_of_sale='US')
self.assertIsNotNone(prices)
def test_basic_request(self):
opts = {
'origin': 'SFO',
'lengthofstay': [3,4],
'pointofsalecountry': 'US'
}
prices = self.sds.destination_finder_opts(opts)
self.assertIsNotNone(prices)
if __name__ == '__main__':
unittest.main()
|
Fix destination finder test to actually produce results
|
Fix destination finder test to actually produce results
|
Python
|
mit
|
Jamil/sabre_dev_studio
|
Fix destination finder test to actually produce results
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicDestinationFinder(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
prices = self.sds.destination_finder('SFO',
length_of_stay=[1,2],
cost_per_mile=0.20,
point_of_sale='US')
self.assertIsNotNone(prices)
def test_basic_request(self):
opts = {
'origin': 'SFO',
'lengthofstay': [3,4],
'pointofsalecountry': 'US'
}
prices = self.sds.destination_finder_opts(opts)
self.assertIsNotNone(prices)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Fix destination finder test to actually produce results<commit_after>
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicDestinationFinder(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
prices = self.sds.destination_finder('SFO',
length_of_stay=[1,2],
cost_per_mile=0.20,
point_of_sale='US')
self.assertIsNotNone(prices)
def test_basic_request(self):
opts = {
'origin': 'SFO',
'lengthofstay': [3,4],
'pointofsalecountry': 'US'
}
prices = self.sds.destination_finder_opts(opts)
self.assertIsNotNone(prices)
if __name__ == '__main__':
unittest.main()
|
Fix destination finder test to actually produce resultsimport unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicDestinationFinder(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
prices = self.sds.destination_finder('SFO',
length_of_stay=[1,2],
cost_per_mile=0.20,
point_of_sale='US')
self.assertIsNotNone(prices)
def test_basic_request(self):
opts = {
'origin': 'SFO',
'lengthofstay': [3,4],
'pointofsalecountry': 'US'
}
prices = self.sds.destination_finder_opts(opts)
self.assertIsNotNone(prices)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Fix destination finder test to actually produce results<commit_after>import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicDestinationFinder(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
prices = self.sds.destination_finder('SFO',
length_of_stay=[1,2],
cost_per_mile=0.20,
point_of_sale='US')
self.assertIsNotNone(prices)
def test_basic_request(self):
opts = {
'origin': 'SFO',
'lengthofstay': [3,4],
'pointofsalecountry': 'US'
}
prices = self.sds.destination_finder_opts(opts)
self.assertIsNotNone(prices)
if __name__ == '__main__':
unittest.main()
|
|
0448c74dd655dcc871fd870c6785295973ba4139
|
examples/kiwilist.py
|
examples/kiwilist.py
|
import gtk
from kiwi.ui.widgets.list import Column, List, SequentialColumn
class Person:
"""The parameters need to be of the same name of the column headers"""
def __init__(self, name, age, city, present):
(self.name, self.age,
self.city, self.present) = name, age, city, present
def __repr__(self):
return '<Person %s>' % self.name
class MyColumn(Column):
pass
def format_func(age):
if age % 2 == 0:
return float(age)
return age
columns = [
SequentialColumn(),
MyColumn('name', tooltip='What about a stupid tooltip?', editable=True),
Column('age', format_func=format_func, editable=True),
Column('city', visible=True, sorted=True),
]
data = (Person('Evandro', 23, 'Belo Horizonte', True),
Person('Daniel', 22, 'Sao Carlos', False),
Person('Henrique', 21, 'Sao Carlos', True),
Person('Gustavo', 23, 'San Jose do Santos', False),
Person('Johan', 23, 'Goteborg', True),
Person('Lorenzo', 26, 'Granada', False)
)
win = gtk.Window()
win.set_default_size(300, 150)
win.connect('destroy', gtk.main_quit)
l = List(columns, data)
l.add_list([Person('Nando', 29+len(l), 'Santos', True)], False)
# add an extra person
win.add(l)
win.show_all()
gtk.main()
|
Add a small kiwi list example
|
Add a small kiwi list example
|
Python
|
lgpl-2.1
|
Schevo/kiwi,Schevo/kiwi,Schevo/kiwi
|
Add a small kiwi list example
|
import gtk
from kiwi.ui.widgets.list import Column, List, SequentialColumn
class Person:
"""The parameters need to be of the same name of the column headers"""
def __init__(self, name, age, city, present):
(self.name, self.age,
self.city, self.present) = name, age, city, present
def __repr__(self):
return '<Person %s>' % self.name
class MyColumn(Column):
pass
def format_func(age):
if age % 2 == 0:
return float(age)
return age
columns = [
SequentialColumn(),
MyColumn('name', tooltip='What about a stupid tooltip?', editable=True),
Column('age', format_func=format_func, editable=True),
Column('city', visible=True, sorted=True),
]
data = (Person('Evandro', 23, 'Belo Horizonte', True),
Person('Daniel', 22, 'Sao Carlos', False),
Person('Henrique', 21, 'Sao Carlos', True),
Person('Gustavo', 23, 'San Jose do Santos', False),
Person('Johan', 23, 'Goteborg', True),
Person('Lorenzo', 26, 'Granada', False)
)
win = gtk.Window()
win.set_default_size(300, 150)
win.connect('destroy', gtk.main_quit)
l = List(columns, data)
l.add_list([Person('Nando', 29+len(l), 'Santos', True)], False)
# add an extra person
win.add(l)
win.show_all()
gtk.main()
|
<commit_before><commit_msg>Add a small kiwi list example<commit_after>
|
import gtk
from kiwi.ui.widgets.list import Column, List, SequentialColumn
class Person:
"""The parameters need to be of the same name of the column headers"""
def __init__(self, name, age, city, present):
(self.name, self.age,
self.city, self.present) = name, age, city, present
def __repr__(self):
return '<Person %s>' % self.name
class MyColumn(Column):
pass
def format_func(age):
if age % 2 == 0:
return float(age)
return age
columns = [
SequentialColumn(),
MyColumn('name', tooltip='What about a stupid tooltip?', editable=True),
Column('age', format_func=format_func, editable=True),
Column('city', visible=True, sorted=True),
]
data = (Person('Evandro', 23, 'Belo Horizonte', True),
Person('Daniel', 22, 'Sao Carlos', False),
Person('Henrique', 21, 'Sao Carlos', True),
Person('Gustavo', 23, 'San Jose do Santos', False),
Person('Johan', 23, 'Goteborg', True),
Person('Lorenzo', 26, 'Granada', False)
)
win = gtk.Window()
win.set_default_size(300, 150)
win.connect('destroy', gtk.main_quit)
l = List(columns, data)
l.add_list([Person('Nando', 29+len(l), 'Santos', True)], False)
# add an extra person
win.add(l)
win.show_all()
gtk.main()
|
Add a small kiwi list exampleimport gtk
from kiwi.ui.widgets.list import Column, List, SequentialColumn
class Person:
"""The parameters need to be of the same name of the column headers"""
def __init__(self, name, age, city, present):
(self.name, self.age,
self.city, self.present) = name, age, city, present
def __repr__(self):
return '<Person %s>' % self.name
class MyColumn(Column):
pass
def format_func(age):
if age % 2 == 0:
return float(age)
return age
columns = [
SequentialColumn(),
MyColumn('name', tooltip='What about a stupid tooltip?', editable=True),
Column('age', format_func=format_func, editable=True),
Column('city', visible=True, sorted=True),
]
data = (Person('Evandro', 23, 'Belo Horizonte', True),
Person('Daniel', 22, 'Sao Carlos', False),
Person('Henrique', 21, 'Sao Carlos', True),
Person('Gustavo', 23, 'San Jose do Santos', False),
Person('Johan', 23, 'Goteborg', True),
Person('Lorenzo', 26, 'Granada', False)
)
win = gtk.Window()
win.set_default_size(300, 150)
win.connect('destroy', gtk.main_quit)
l = List(columns, data)
l.add_list([Person('Nando', 29+len(l), 'Santos', True)], False)
# add an extra person
win.add(l)
win.show_all()
gtk.main()
|
<commit_before><commit_msg>Add a small kiwi list example<commit_after>import gtk
from kiwi.ui.widgets.list import Column, List, SequentialColumn
class Person:
"""The parameters need to be of the same name of the column headers"""
def __init__(self, name, age, city, present):
(self.name, self.age,
self.city, self.present) = name, age, city, present
def __repr__(self):
return '<Person %s>' % self.name
class MyColumn(Column):
pass
def format_func(age):
if age % 2 == 0:
return float(age)
return age
columns = [
SequentialColumn(),
MyColumn('name', tooltip='What about a stupid tooltip?', editable=True),
Column('age', format_func=format_func, editable=True),
Column('city', visible=True, sorted=True),
]
data = (Person('Evandro', 23, 'Belo Horizonte', True),
Person('Daniel', 22, 'Sao Carlos', False),
Person('Henrique', 21, 'Sao Carlos', True),
Person('Gustavo', 23, 'San Jose do Santos', False),
Person('Johan', 23, 'Goteborg', True),
Person('Lorenzo', 26, 'Granada', False)
)
win = gtk.Window()
win.set_default_size(300, 150)
win.connect('destroy', gtk.main_quit)
l = List(columns, data)
l.add_list([Person('Nando', 29+len(l), 'Santos', True)], False)
# add an extra person
win.add(l)
win.show_all()
gtk.main()
|
|
feebfc2d084227a015521de2fe4eea31db1fb09d
|
examples/list_dir.py
|
examples/list_dir.py
|
import asyncio
import ampdclient
MPD_HOST = '192.168.1.5'
MPD_PORT = 6600
def onchange(message):
print('Message received ' + str(message))
@asyncio.coroutine
def start():
mpd_client = yield from ampdclient.connect(MPD_HOST, MPD_PORT)
mpd_client.cb_onchange = onchange
resp = yield from mpd_client.lsinfo('nas-samba')
print('Response {}'.format(resp))
yield from asyncio.sleep(1)
resp = yield from mpd_client.lsinfo('nas-samba/testpl')
print('Response {}'.format(resp))
yield from asyncio.sleep(1)
resp = yield from mpd_client.lsinfo('nas-samba/Albums/Alternative '
'Rock/Arcade Fire/2004 - '
'Funeral')
print('Response {}'.format(resp))
yield from mpd_client.stop()
loop = asyncio.get_event_loop()
loop.run_until_complete(start())
|
Add example for listing directory
|
Add example for listing directory
|
Python
|
apache-2.0
|
PierreRust/ampdclient
|
Add example for listing directory
|
import asyncio
import ampdclient
MPD_HOST = '192.168.1.5'
MPD_PORT = 6600
def onchange(message):
print('Message received ' + str(message))
@asyncio.coroutine
def start():
mpd_client = yield from ampdclient.connect(MPD_HOST, MPD_PORT)
mpd_client.cb_onchange = onchange
resp = yield from mpd_client.lsinfo('nas-samba')
print('Response {}'.format(resp))
yield from asyncio.sleep(1)
resp = yield from mpd_client.lsinfo('nas-samba/testpl')
print('Response {}'.format(resp))
yield from asyncio.sleep(1)
resp = yield from mpd_client.lsinfo('nas-samba/Albums/Alternative '
'Rock/Arcade Fire/2004 - '
'Funeral')
print('Response {}'.format(resp))
yield from mpd_client.stop()
loop = asyncio.get_event_loop()
loop.run_until_complete(start())
|
<commit_before><commit_msg>Add example for listing directory<commit_after>
|
import asyncio
import ampdclient
MPD_HOST = '192.168.1.5'
MPD_PORT = 6600
def onchange(message):
print('Message received ' + str(message))
@asyncio.coroutine
def start():
mpd_client = yield from ampdclient.connect(MPD_HOST, MPD_PORT)
mpd_client.cb_onchange = onchange
resp = yield from mpd_client.lsinfo('nas-samba')
print('Response {}'.format(resp))
yield from asyncio.sleep(1)
resp = yield from mpd_client.lsinfo('nas-samba/testpl')
print('Response {}'.format(resp))
yield from asyncio.sleep(1)
resp = yield from mpd_client.lsinfo('nas-samba/Albums/Alternative '
'Rock/Arcade Fire/2004 - '
'Funeral')
print('Response {}'.format(resp))
yield from mpd_client.stop()
loop = asyncio.get_event_loop()
loop.run_until_complete(start())
|
Add example for listing directoryimport asyncio
import ampdclient
MPD_HOST = '192.168.1.5'
MPD_PORT = 6600
def onchange(message):
print('Message received ' + str(message))
@asyncio.coroutine
def start():
mpd_client = yield from ampdclient.connect(MPD_HOST, MPD_PORT)
mpd_client.cb_onchange = onchange
resp = yield from mpd_client.lsinfo('nas-samba')
print('Response {}'.format(resp))
yield from asyncio.sleep(1)
resp = yield from mpd_client.lsinfo('nas-samba/testpl')
print('Response {}'.format(resp))
yield from asyncio.sleep(1)
resp = yield from mpd_client.lsinfo('nas-samba/Albums/Alternative '
'Rock/Arcade Fire/2004 - '
'Funeral')
print('Response {}'.format(resp))
yield from mpd_client.stop()
loop = asyncio.get_event_loop()
loop.run_until_complete(start())
|
<commit_before><commit_msg>Add example for listing directory<commit_after>import asyncio
import ampdclient
MPD_HOST = '192.168.1.5'
MPD_PORT = 6600
def onchange(message):
print('Message received ' + str(message))
@asyncio.coroutine
def start():
mpd_client = yield from ampdclient.connect(MPD_HOST, MPD_PORT)
mpd_client.cb_onchange = onchange
resp = yield from mpd_client.lsinfo('nas-samba')
print('Response {}'.format(resp))
yield from asyncio.sleep(1)
resp = yield from mpd_client.lsinfo('nas-samba/testpl')
print('Response {}'.format(resp))
yield from asyncio.sleep(1)
resp = yield from mpd_client.lsinfo('nas-samba/Albums/Alternative '
'Rock/Arcade Fire/2004 - '
'Funeral')
print('Response {}'.format(resp))
yield from mpd_client.stop()
loop = asyncio.get_event_loop()
loop.run_until_complete(start())
|
|
ad5181b36a51a0ac2ab4aaec829359711afdeda9
|
tests/test_executors.py
|
tests/test_executors.py
|
import asyncio
import concurrent.futures
from uvloop import _testbase as tb
def fib(n):
if n < 2:
return 1
return fib(n - 2) + fib(n - 1)
class _TestExecutors:
def run_pool_test(self, pool_factory):
async def run():
pool = pool_factory()
with pool:
coros = []
for i in range(0, 10):
coros.append(self.loop.run_in_executor(pool, fib, i))
res = await asyncio.gather(*coros, loop=self.loop)
self.assertEqual(res, fib10)
fib10 = [fib(i) for i in range(10)]
self.loop.run_until_complete(run())
def test_executors_process_pool_01(self):
self.run_pool_test(concurrent.futures.ProcessPoolExecutor)
def test_executors_process_pool_02(self):
self.run_pool_test(concurrent.futures.ThreadPoolExecutor)
class TestUVExecutors(_TestExecutors, tb.UVTestCase):
pass
class TestAIOExecutors(_TestExecutors, tb.AIOTestCase):
pass
|
Add tests for process/thread pool executors
|
tests: Add tests for process/thread pool executors
|
Python
|
apache-2.0
|
1st1/uvloop,MagicStack/uvloop,MagicStack/uvloop
|
tests: Add tests for process/thread pool executors
|
import asyncio
import concurrent.futures
from uvloop import _testbase as tb
def fib(n):
if n < 2:
return 1
return fib(n - 2) + fib(n - 1)
class _TestExecutors:
def run_pool_test(self, pool_factory):
async def run():
pool = pool_factory()
with pool:
coros = []
for i in range(0, 10):
coros.append(self.loop.run_in_executor(pool, fib, i))
res = await asyncio.gather(*coros, loop=self.loop)
self.assertEqual(res, fib10)
fib10 = [fib(i) for i in range(10)]
self.loop.run_until_complete(run())
def test_executors_process_pool_01(self):
self.run_pool_test(concurrent.futures.ProcessPoolExecutor)
def test_executors_process_pool_02(self):
self.run_pool_test(concurrent.futures.ThreadPoolExecutor)
class TestUVExecutors(_TestExecutors, tb.UVTestCase):
pass
class TestAIOExecutors(_TestExecutors, tb.AIOTestCase):
pass
|
<commit_before><commit_msg>tests: Add tests for process/thread pool executors<commit_after>
|
import asyncio
import concurrent.futures
from uvloop import _testbase as tb
def fib(n):
if n < 2:
return 1
return fib(n - 2) + fib(n - 1)
class _TestExecutors:
def run_pool_test(self, pool_factory):
async def run():
pool = pool_factory()
with pool:
coros = []
for i in range(0, 10):
coros.append(self.loop.run_in_executor(pool, fib, i))
res = await asyncio.gather(*coros, loop=self.loop)
self.assertEqual(res, fib10)
fib10 = [fib(i) for i in range(10)]
self.loop.run_until_complete(run())
def test_executors_process_pool_01(self):
self.run_pool_test(concurrent.futures.ProcessPoolExecutor)
def test_executors_process_pool_02(self):
self.run_pool_test(concurrent.futures.ThreadPoolExecutor)
class TestUVExecutors(_TestExecutors, tb.UVTestCase):
pass
class TestAIOExecutors(_TestExecutors, tb.AIOTestCase):
pass
|
tests: Add tests for process/thread pool executorsimport asyncio
import concurrent.futures
from uvloop import _testbase as tb
def fib(n):
if n < 2:
return 1
return fib(n - 2) + fib(n - 1)
class _TestExecutors:
def run_pool_test(self, pool_factory):
async def run():
pool = pool_factory()
with pool:
coros = []
for i in range(0, 10):
coros.append(self.loop.run_in_executor(pool, fib, i))
res = await asyncio.gather(*coros, loop=self.loop)
self.assertEqual(res, fib10)
fib10 = [fib(i) for i in range(10)]
self.loop.run_until_complete(run())
def test_executors_process_pool_01(self):
self.run_pool_test(concurrent.futures.ProcessPoolExecutor)
def test_executors_process_pool_02(self):
self.run_pool_test(concurrent.futures.ThreadPoolExecutor)
class TestUVExecutors(_TestExecutors, tb.UVTestCase):
pass
class TestAIOExecutors(_TestExecutors, tb.AIOTestCase):
pass
|
<commit_before><commit_msg>tests: Add tests for process/thread pool executors<commit_after>import asyncio
import concurrent.futures
from uvloop import _testbase as tb
def fib(n):
if n < 2:
return 1
return fib(n - 2) + fib(n - 1)
class _TestExecutors:
def run_pool_test(self, pool_factory):
async def run():
pool = pool_factory()
with pool:
coros = []
for i in range(0, 10):
coros.append(self.loop.run_in_executor(pool, fib, i))
res = await asyncio.gather(*coros, loop=self.loop)
self.assertEqual(res, fib10)
fib10 = [fib(i) for i in range(10)]
self.loop.run_until_complete(run())
def test_executors_process_pool_01(self):
self.run_pool_test(concurrent.futures.ProcessPoolExecutor)
def test_executors_process_pool_02(self):
self.run_pool_test(concurrent.futures.ThreadPoolExecutor)
class TestUVExecutors(_TestExecutors, tb.UVTestCase):
pass
class TestAIOExecutors(_TestExecutors, tb.AIOTestCase):
pass
|
|
3e483c2dcfd89227d9a2c56578a6532439b8fca4
|
core/data/DataTransformer.py
|
core/data/DataTransformer.py
|
"""
DataTransformer
:Authors:
Berend Klein Haneveld
"""
from vtk import vtkImageReslice
class DataTransformer(object):
"""DataTransformer is a class that can transform a given dataset"""
def __init__(self):
super(DataTransformer, self).__init__()
def TransformImageData(self, imageData, transform):
"""
:type imageData: vtkImageData
:type transform: vtkTransform
"""
reslicer = vtkImageReslice()
reslicer.SetInterpolationModeToCubic()
reslicer.SetAutoCropOutput(1) # Not sure if this is what we want
reslicer.SetInputData(imageData)
reslicer.SetResliceTransform(transform.GetInverse())
return reslicer.GetOutput()
|
Add a simple transform helper class.
|
Add a simple transform helper class.
|
Python
|
mit
|
berendkleinhaneveld/Registrationshop,berendkleinhaneveld/Registrationshop
|
Add a simple transform helper class.
|
"""
DataTransformer
:Authors:
Berend Klein Haneveld
"""
from vtk import vtkImageReslice
class DataTransformer(object):
"""DataTransformer is a class that can transform a given dataset"""
def __init__(self):
super(DataTransformer, self).__init__()
def TransformImageData(self, imageData, transform):
"""
:type imageData: vtkImageData
:type transform: vtkTransform
"""
reslicer = vtkImageReslice()
reslicer.SetInterpolationModeToCubic()
reslicer.SetAutoCropOutput(1) # Not sure if this is what we want
reslicer.SetInputData(imageData)
reslicer.SetResliceTransform(transform.GetInverse())
return reslicer.GetOutput()
|
<commit_before><commit_msg>Add a simple transform helper class.<commit_after>
|
"""
DataTransformer
:Authors:
Berend Klein Haneveld
"""
from vtk import vtkImageReslice
class DataTransformer(object):
"""DataTransformer is a class that can transform a given dataset"""
def __init__(self):
super(DataTransformer, self).__init__()
def TransformImageData(self, imageData, transform):
"""
:type imageData: vtkImageData
:type transform: vtkTransform
"""
reslicer = vtkImageReslice()
reslicer.SetInterpolationModeToCubic()
reslicer.SetAutoCropOutput(1) # Not sure if this is what we want
reslicer.SetInputData(imageData)
reslicer.SetResliceTransform(transform.GetInverse())
return reslicer.GetOutput()
|
Add a simple transform helper class."""
DataTransformer
:Authors:
Berend Klein Haneveld
"""
from vtk import vtkImageReslice
class DataTransformer(object):
"""DataTransformer is a class that can transform a given dataset"""
def __init__(self):
super(DataTransformer, self).__init__()
def TransformImageData(self, imageData, transform):
"""
:type imageData: vtkImageData
:type transform: vtkTransform
"""
reslicer = vtkImageReslice()
reslicer.SetInterpolationModeToCubic()
reslicer.SetAutoCropOutput(1) # Not sure if this is what we want
reslicer.SetInputData(imageData)
reslicer.SetResliceTransform(transform.GetInverse())
return reslicer.GetOutput()
|
<commit_before><commit_msg>Add a simple transform helper class.<commit_after>"""
DataTransformer
:Authors:
Berend Klein Haneveld
"""
from vtk import vtkImageReslice
class DataTransformer(object):
"""DataTransformer is a class that can transform a given dataset"""
def __init__(self):
super(DataTransformer, self).__init__()
def TransformImageData(self, imageData, transform):
"""
:type imageData: vtkImageData
:type transform: vtkTransform
"""
reslicer = vtkImageReslice()
reslicer.SetInterpolationModeToCubic()
reslicer.SetAutoCropOutput(1) # Not sure if this is what we want
reslicer.SetInputData(imageData)
reslicer.SetResliceTransform(transform.GetInverse())
return reslicer.GetOutput()
|
|
1b1bc020f3e37c10072ed45271e92348a8e2fcad
|
pi/plot_temperature.py
|
pi/plot_temperature.py
|
import datetime
import matplotlib.pyplot as pyplot
import os
import re
import sys
def main():
"""Main."""
if sys.version_info.major <= 2:
print('Use Python 3')
return
lines = []
for file_name in sorted(os.listdir('.{}temperatures'.format(os.sep))):
if file_name.endswith('csv'):
# Do a line at a time because some lines might be corrupted due to
# power loss
file_name = '.{sep}temperatures{sep}{f}'.format(sep=os.sep, f=file_name)
with open(file_name) as file_:
for line in file_.readlines():
if line.startswith('"') and line.endswith('\n'):
lines.append(line)
def parse_time(line):
"""Returns the time from a line."""
time_str = line.split(',')[0].replace('"', '')
parts = [int(i) for i in re.split('[: -]', time_str)]
return datetime.datetime(*parts)
def parse_temperature(line):
"""Returns the temperature from a line."""
return float(line.split(',')[1][:-1])
initial_time = parse_time(lines[0])
seconds = [(parse_time(line) - initial_time).total_seconds() for line in lines]
temperatures = [parse_temperature(line) for line in lines]
hours = [sec / 3600. for sec in seconds]
pyplot.plot(hours, temperatures)
pyplot.xlabel('time (hours)')
pyplot.ylabel('temperature (degrees C)')
pyplot.grid(True)
pyplot.show()
if __name__ == '__main__':
main()
|
Add script to plot temperatures
|
Add script to plot temperatures
|
Python
|
mit
|
bskari/eclipse-2017-hab,bskari/eclipse-2017-hab
|
Add script to plot temperatures
|
import datetime
import matplotlib.pyplot as pyplot
import os
import re
import sys
def main():
"""Main."""
if sys.version_info.major <= 2:
print('Use Python 3')
return
lines = []
for file_name in sorted(os.listdir('.{}temperatures'.format(os.sep))):
if file_name.endswith('csv'):
# Do a line at a time because some lines might be corrupted due to
# power loss
file_name = '.{sep}temperatures{sep}{f}'.format(sep=os.sep, f=file_name)
with open(file_name) as file_:
for line in file_.readlines():
if line.startswith('"') and line.endswith('\n'):
lines.append(line)
def parse_time(line):
"""Returns the time from a line."""
time_str = line.split(',')[0].replace('"', '')
parts = [int(i) for i in re.split('[: -]', time_str)]
return datetime.datetime(*parts)
def parse_temperature(line):
"""Returns the temperature from a line."""
return float(line.split(',')[1][:-1])
initial_time = parse_time(lines[0])
seconds = [(parse_time(line) - initial_time).total_seconds() for line in lines]
temperatures = [parse_temperature(line) for line in lines]
hours = [sec / 3600. for sec in seconds]
pyplot.plot(hours, temperatures)
pyplot.xlabel('time (hours)')
pyplot.ylabel('temperature (degrees C)')
pyplot.grid(True)
pyplot.show()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to plot temperatures<commit_after>
|
import datetime
import matplotlib.pyplot as pyplot
import os
import re
import sys
def main():
"""Main."""
if sys.version_info.major <= 2:
print('Use Python 3')
return
lines = []
for file_name in sorted(os.listdir('.{}temperatures'.format(os.sep))):
if file_name.endswith('csv'):
# Do a line at a time because some lines might be corrupted due to
# power loss
file_name = '.{sep}temperatures{sep}{f}'.format(sep=os.sep, f=file_name)
with open(file_name) as file_:
for line in file_.readlines():
if line.startswith('"') and line.endswith('\n'):
lines.append(line)
def parse_time(line):
"""Returns the time from a line."""
time_str = line.split(',')[0].replace('"', '')
parts = [int(i) for i in re.split('[: -]', time_str)]
return datetime.datetime(*parts)
def parse_temperature(line):
"""Returns the temperature from a line."""
return float(line.split(',')[1][:-1])
initial_time = parse_time(lines[0])
seconds = [(parse_time(line) - initial_time).total_seconds() for line in lines]
temperatures = [parse_temperature(line) for line in lines]
hours = [sec / 3600. for sec in seconds]
pyplot.plot(hours, temperatures)
pyplot.xlabel('time (hours)')
pyplot.ylabel('temperature (degrees C)')
pyplot.grid(True)
pyplot.show()
if __name__ == '__main__':
main()
|
Add script to plot temperaturesimport datetime
import matplotlib.pyplot as pyplot
import os
import re
import sys
def main():
"""Main."""
if sys.version_info.major <= 2:
print('Use Python 3')
return
lines = []
for file_name in sorted(os.listdir('.{}temperatures'.format(os.sep))):
if file_name.endswith('csv'):
# Do a line at a time because some lines might be corrupted due to
# power loss
file_name = '.{sep}temperatures{sep}{f}'.format(sep=os.sep, f=file_name)
with open(file_name) as file_:
for line in file_.readlines():
if line.startswith('"') and line.endswith('\n'):
lines.append(line)
def parse_time(line):
"""Returns the time from a line."""
time_str = line.split(',')[0].replace('"', '')
parts = [int(i) for i in re.split('[: -]', time_str)]
return datetime.datetime(*parts)
def parse_temperature(line):
"""Returns the temperature from a line."""
return float(line.split(',')[1][:-1])
initial_time = parse_time(lines[0])
seconds = [(parse_time(line) - initial_time).total_seconds() for line in lines]
temperatures = [parse_temperature(line) for line in lines]
hours = [sec / 3600. for sec in seconds]
pyplot.plot(hours, temperatures)
pyplot.xlabel('time (hours)')
pyplot.ylabel('temperature (degrees C)')
pyplot.grid(True)
pyplot.show()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to plot temperatures<commit_after>import datetime
import matplotlib.pyplot as pyplot
import os
import re
import sys
def main():
"""Main."""
if sys.version_info.major <= 2:
print('Use Python 3')
return
lines = []
for file_name in sorted(os.listdir('.{}temperatures'.format(os.sep))):
if file_name.endswith('csv'):
# Do a line at a time because some lines might be corrupted due to
# power loss
file_name = '.{sep}temperatures{sep}{f}'.format(sep=os.sep, f=file_name)
with open(file_name) as file_:
for line in file_.readlines():
if line.startswith('"') and line.endswith('\n'):
lines.append(line)
def parse_time(line):
"""Returns the time from a line."""
time_str = line.split(',')[0].replace('"', '')
parts = [int(i) for i in re.split('[: -]', time_str)]
return datetime.datetime(*parts)
def parse_temperature(line):
"""Returns the temperature from a line."""
return float(line.split(',')[1][:-1])
initial_time = parse_time(lines[0])
seconds = [(parse_time(line) - initial_time).total_seconds() for line in lines]
temperatures = [parse_temperature(line) for line in lines]
hours = [sec / 3600. for sec in seconds]
pyplot.plot(hours, temperatures)
pyplot.xlabel('time (hours)')
pyplot.ylabel('temperature (degrees C)')
pyplot.grid(True)
pyplot.show()
if __name__ == '__main__':
main()
|
|
90aebb2fe3c4605798148adbff57deedba0ad175
|
test_user_operations.py
|
test_user_operations.py
|
import unittest
import user
from users import UserDatabase
class FakeDatabaseSession:
def __init__(self):
self.didCommit = False
self.things = [ ]
def commit(self):
self.didCommit = True
def add(self, thingToAdd):
self.things.append(thingToAdd)
class FakeDatabase:
def __init__(self):
self.session = FakeDatabaseSession()
class TestUserOperations(unittest.TestCase):
def setUp(self):
self.userDB = UserDatabase()
self.appDB = FakeDatabase()
self.appDBSession = self.appDB.session
def tearDown(self):
self.userDB = None
self.appDB = None
self.appDBSession = None
def register_ash(self):
self.userDB.registerUser("AshKetchum", "pallettown123", "000000000000", self.appDB)
def test_registration(self):
self.register_ash()
user = self.appDBSession.things[0]
self.assertEqual(user.username, "AshKetchum")
self.assertTrue(self.appDB.session.didCommit)
def test_catching_pokemon_works(self):
self.register_ash()
user = self.appDBSession.things[0]
# Catch Pikachu
self.userDB.catchPokemonForUser(user, 25, self.appDB)
# Grab the state of the Pokemon @ 25
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 1)
def test_new_user_hasnt_caught_pokemon(self):
self.register_ash()
user = self.appDBSession.things[0]
# Grab the state of the Pokemon @ 25
# It should be 0
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 0)
def test_catching_then_releasing_pokemon_works(self):
self.register_ash()
user = self.appDBSession.things[0]
# Catch Pikachu
self.userDB.catchPokemonForUser(user, 25, self.appDB)
# Release Pikachu :-(
self.userDB.uncatchPokemonForUser(user, 25, self.appDB)
# Grab the state of the Pokemon @ 25
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 0)
if __name__ == '__main__':
unittest.main()
|
Add some unit tests for common user operations
|
Add some unit tests for common user operations
|
Python
|
bsd-2-clause
|
peterhajas/LivingDex,peterhajas/LivingDex,peterhajas/LivingDex,peterhajas/LivingDex
|
Add some unit tests for common user operations
|
import unittest
import user
from users import UserDatabase
class FakeDatabaseSession:
def __init__(self):
self.didCommit = False
self.things = [ ]
def commit(self):
self.didCommit = True
def add(self, thingToAdd):
self.things.append(thingToAdd)
class FakeDatabase:
def __init__(self):
self.session = FakeDatabaseSession()
class TestUserOperations(unittest.TestCase):
def setUp(self):
self.userDB = UserDatabase()
self.appDB = FakeDatabase()
self.appDBSession = self.appDB.session
def tearDown(self):
self.userDB = None
self.appDB = None
self.appDBSession = None
def register_ash(self):
self.userDB.registerUser("AshKetchum", "pallettown123", "000000000000", self.appDB)
def test_registration(self):
self.register_ash()
user = self.appDBSession.things[0]
self.assertEqual(user.username, "AshKetchum")
self.assertTrue(self.appDB.session.didCommit)
def test_catching_pokemon_works(self):
self.register_ash()
user = self.appDBSession.things[0]
# Catch Pikachu
self.userDB.catchPokemonForUser(user, 25, self.appDB)
# Grab the state of the Pokemon @ 25
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 1)
def test_new_user_hasnt_caught_pokemon(self):
self.register_ash()
user = self.appDBSession.things[0]
# Grab the state of the Pokemon @ 25
# It should be 0
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 0)
def test_catching_then_releasing_pokemon_works(self):
self.register_ash()
user = self.appDBSession.things[0]
# Catch Pikachu
self.userDB.catchPokemonForUser(user, 25, self.appDB)
# Release Pikachu :-(
self.userDB.uncatchPokemonForUser(user, 25, self.appDB)
# Grab the state of the Pokemon @ 25
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 0)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some unit tests for common user operations<commit_after>
|
import unittest
import user
from users import UserDatabase
class FakeDatabaseSession:
def __init__(self):
self.didCommit = False
self.things = [ ]
def commit(self):
self.didCommit = True
def add(self, thingToAdd):
self.things.append(thingToAdd)
class FakeDatabase:
def __init__(self):
self.session = FakeDatabaseSession()
class TestUserOperations(unittest.TestCase):
def setUp(self):
self.userDB = UserDatabase()
self.appDB = FakeDatabase()
self.appDBSession = self.appDB.session
def tearDown(self):
self.userDB = None
self.appDB = None
self.appDBSession = None
def register_ash(self):
self.userDB.registerUser("AshKetchum", "pallettown123", "000000000000", self.appDB)
def test_registration(self):
self.register_ash()
user = self.appDBSession.things[0]
self.assertEqual(user.username, "AshKetchum")
self.assertTrue(self.appDB.session.didCommit)
def test_catching_pokemon_works(self):
self.register_ash()
user = self.appDBSession.things[0]
# Catch Pikachu
self.userDB.catchPokemonForUser(user, 25, self.appDB)
# Grab the state of the Pokemon @ 25
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 1)
def test_new_user_hasnt_caught_pokemon(self):
self.register_ash()
user = self.appDBSession.things[0]
# Grab the state of the Pokemon @ 25
# It should be 0
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 0)
def test_catching_then_releasing_pokemon_works(self):
self.register_ash()
user = self.appDBSession.things[0]
# Catch Pikachu
self.userDB.catchPokemonForUser(user, 25, self.appDB)
# Release Pikachu :-(
self.userDB.uncatchPokemonForUser(user, 25, self.appDB)
# Grab the state of the Pokemon @ 25
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 0)
if __name__ == '__main__':
unittest.main()
|
Add some unit tests for common user operationsimport unittest
import user
from users import UserDatabase
class FakeDatabaseSession:
def __init__(self):
self.didCommit = False
self.things = [ ]
def commit(self):
self.didCommit = True
def add(self, thingToAdd):
self.things.append(thingToAdd)
class FakeDatabase:
def __init__(self):
self.session = FakeDatabaseSession()
class TestUserOperations(unittest.TestCase):
def setUp(self):
self.userDB = UserDatabase()
self.appDB = FakeDatabase()
self.appDBSession = self.appDB.session
def tearDown(self):
self.userDB = None
self.appDB = None
self.appDBSession = None
def register_ash(self):
self.userDB.registerUser("AshKetchum", "pallettown123", "000000000000", self.appDB)
def test_registration(self):
self.register_ash()
user = self.appDBSession.things[0]
self.assertEqual(user.username, "AshKetchum")
self.assertTrue(self.appDB.session.didCommit)
def test_catching_pokemon_works(self):
self.register_ash()
user = self.appDBSession.things[0]
# Catch Pikachu
self.userDB.catchPokemonForUser(user, 25, self.appDB)
# Grab the state of the Pokemon @ 25
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 1)
def test_new_user_hasnt_caught_pokemon(self):
self.register_ash()
user = self.appDBSession.things[0]
# Grab the state of the Pokemon @ 25
# It should be 0
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 0)
def test_catching_then_releasing_pokemon_works(self):
self.register_ash()
user = self.appDBSession.things[0]
# Catch Pikachu
self.userDB.catchPokemonForUser(user, 25, self.appDB)
# Release Pikachu :-(
self.userDB.uncatchPokemonForUser(user, 25, self.appDB)
# Grab the state of the Pokemon @ 25
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 0)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some unit tests for common user operations<commit_after>import unittest
import user
from users import UserDatabase
class FakeDatabaseSession:
def __init__(self):
self.didCommit = False
self.things = [ ]
def commit(self):
self.didCommit = True
def add(self, thingToAdd):
self.things.append(thingToAdd)
class FakeDatabase:
def __init__(self):
self.session = FakeDatabaseSession()
class TestUserOperations(unittest.TestCase):
def setUp(self):
self.userDB = UserDatabase()
self.appDB = FakeDatabase()
self.appDBSession = self.appDB.session
def tearDown(self):
self.userDB = None
self.appDB = None
self.appDBSession = None
def register_ash(self):
self.userDB.registerUser("AshKetchum", "pallettown123", "000000000000", self.appDB)
def test_registration(self):
self.register_ash()
user = self.appDBSession.things[0]
self.assertEqual(user.username, "AshKetchum")
self.assertTrue(self.appDB.session.didCommit)
def test_catching_pokemon_works(self):
self.register_ash()
user = self.appDBSession.things[0]
# Catch Pikachu
self.userDB.catchPokemonForUser(user, 25, self.appDB)
# Grab the state of the Pokemon @ 25
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 1)
def test_new_user_hasnt_caught_pokemon(self):
self.register_ash()
user = self.appDBSession.things[0]
# Grab the state of the Pokemon @ 25
# It should be 0
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 0)
def test_catching_then_releasing_pokemon_works(self):
self.register_ash()
user = self.appDBSession.things[0]
# Catch Pikachu
self.userDB.catchPokemonForUser(user, 25, self.appDB)
# Release Pikachu :-(
self.userDB.uncatchPokemonForUser(user, 25, self.appDB)
# Grab the state of the Pokemon @ 25
state = self.userDB._stateOfPokemonForUser(user, 25, self.appDB)
state = int(state)
self.assertEqual(state, 0)
if __name__ == '__main__':
unittest.main()
|
|
4ff2635c54d59b4dbeaff87f369c0046f35e159a
|
tests.py
|
tests.py
|
from django.core.exceptions import ImproperlyConfigured
from django_mailgun import MailgunBackend
from pytest import raises
def test_configuration():
with raises(ImproperlyConfigured):
MailgunBackend()
|
Add super simple test case
|
Add super simple test case
|
Python
|
mit
|
vangale/django-mailgun,rollokb/django-mailgun,BradWhittington/django-mailgun
|
Add super simple test case
|
from django.core.exceptions import ImproperlyConfigured
from django_mailgun import MailgunBackend
from pytest import raises
def test_configuration():
with raises(ImproperlyConfigured):
MailgunBackend()
|
<commit_before><commit_msg>Add super simple test case<commit_after>
|
from django.core.exceptions import ImproperlyConfigured
from django_mailgun import MailgunBackend
from pytest import raises
def test_configuration():
with raises(ImproperlyConfigured):
MailgunBackend()
|
Add super simple test casefrom django.core.exceptions import ImproperlyConfigured
from django_mailgun import MailgunBackend
from pytest import raises
def test_configuration():
with raises(ImproperlyConfigured):
MailgunBackend()
|
<commit_before><commit_msg>Add super simple test case<commit_after>from django.core.exceptions import ImproperlyConfigured
from django_mailgun import MailgunBackend
from pytest import raises
def test_configuration():
with raises(ImproperlyConfigured):
MailgunBackend()
|
|
328525f8435f8c97545f8d4fea85173e480f11f2
|
setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.4',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.5',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
Update the PyPI version to 0.2.5.
|
Update the PyPI version to 0.2.5.
|
Python
|
mit
|
electronick1/todoist-python,Doist/todoist-python
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.4',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
Update the PyPI version to 0.2.5.
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.5',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
<commit_before># -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.4',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
<commit_msg>Update the PyPI version to 0.2.5.<commit_after>
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.5',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.4',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
Update the PyPI version to 0.2.5.# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.5',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
<commit_before># -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.4',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
<commit_msg>Update the PyPI version to 0.2.5.<commit_after># -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.5',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
b00fef938e2fac4599bb22ef110038d76dc88f79
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=open('README.rst', 'rb').read().decode('utf-8'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
Append HISTORY to long description on PyPI
|
Append HISTORY to long description on PyPI
|
Python
|
mit
|
rpkilby/tox-travis,ryanhiebert/tox-travis,tox-dev/tox-travis
|
from setuptools import setup
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=open('README.rst', 'rb').read().decode('utf-8'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
Append HISTORY to long description on PyPI
|
from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
<commit_before>from setuptools import setup
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=open('README.rst', 'rb').read().decode('utf-8'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
<commit_msg>Append HISTORY to long description on PyPI<commit_after>
|
from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
from setuptools import setup
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=open('README.rst', 'rb').read().decode('utf-8'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
Append HISTORY to long description on PyPIfrom setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
<commit_before>from setuptools import setup
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=open('README.rst', 'rb').read().decode('utf-8'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
<commit_msg>Append HISTORY to long description on PyPI<commit_after>from setuptools import setup
def fread(fn):
return open(fn, 'rb').read().decode('utf-8')
setup(
name='tox-travis',
description='Seamless integration of Tox into Travis CI',
long_description=fread('README.rst') + '\n\n' + fread('HISTORY.rst'),
author='Ryan Hiebert',
author_email='ryan@ryanhiebert.com',
url='https://github.com/ryanhiebert/tox-travis',
license='MIT',
version='0.1',
py_modules=['tox_travis'],
entry_points={
'tox': ['travis = tox_travis'],
},
install_requires=['tox>=2.0'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
daa7273b00056d5748687eba525a8715e6228a6a
|
test/dataset_test.py
|
test/dataset_test.py
|
import numpy as np
import theanets
class TestDataset:
def setUp(self):
self.dataset = theanets.dataset.Dataset(
np.arange(101)[:, None],
label='foo',
batches=4,
size=10,
)
def test_setup(self):
assert self.dataset.label == 'foo'
assert len(self.dataset.batches) == 10
assert self.dataset.number_batches == 4
def test_iterate(self):
batches_ = list(self.dataset.batches)
# check we iterate over the correct number of batches.
assert sum(1 for _ in self.dataset) == 4
# check the dataset didn't get shuffled (yet).
assert all(a is b for a, b in zip(self.dataset.batches, batches_))
assert sum(1 for _ in self.dataset) == 4
assert sum(1 for _ in self.dataset) == 4
# check the dataset did get shuffled.
assert not all(a is b for a, b in zip(self.dataset.batches, batches_))
|
Add tests for dataset class.
|
Add tests for dataset class.
|
Python
|
mit
|
chrinide/theanets,lmjohns3/theanets,devdoer/theanets
|
Add tests for dataset class.
|
import numpy as np
import theanets
class TestDataset:
def setUp(self):
self.dataset = theanets.dataset.Dataset(
np.arange(101)[:, None],
label='foo',
batches=4,
size=10,
)
def test_setup(self):
assert self.dataset.label == 'foo'
assert len(self.dataset.batches) == 10
assert self.dataset.number_batches == 4
def test_iterate(self):
batches_ = list(self.dataset.batches)
# check we iterate over the correct number of batches.
assert sum(1 for _ in self.dataset) == 4
# check the dataset didn't get shuffled (yet).
assert all(a is b for a, b in zip(self.dataset.batches, batches_))
assert sum(1 for _ in self.dataset) == 4
assert sum(1 for _ in self.dataset) == 4
# check the dataset did get shuffled.
assert not all(a is b for a, b in zip(self.dataset.batches, batches_))
|
<commit_before><commit_msg>Add tests for dataset class.<commit_after>
|
import numpy as np
import theanets
class TestDataset:
def setUp(self):
self.dataset = theanets.dataset.Dataset(
np.arange(101)[:, None],
label='foo',
batches=4,
size=10,
)
def test_setup(self):
assert self.dataset.label == 'foo'
assert len(self.dataset.batches) == 10
assert self.dataset.number_batches == 4
def test_iterate(self):
batches_ = list(self.dataset.batches)
# check we iterate over the correct number of batches.
assert sum(1 for _ in self.dataset) == 4
# check the dataset didn't get shuffled (yet).
assert all(a is b for a, b in zip(self.dataset.batches, batches_))
assert sum(1 for _ in self.dataset) == 4
assert sum(1 for _ in self.dataset) == 4
# check the dataset did get shuffled.
assert not all(a is b for a, b in zip(self.dataset.batches, batches_))
|
Add tests for dataset class.import numpy as np
import theanets
class TestDataset:
def setUp(self):
self.dataset = theanets.dataset.Dataset(
np.arange(101)[:, None],
label='foo',
batches=4,
size=10,
)
def test_setup(self):
assert self.dataset.label == 'foo'
assert len(self.dataset.batches) == 10
assert self.dataset.number_batches == 4
def test_iterate(self):
batches_ = list(self.dataset.batches)
# check we iterate over the correct number of batches.
assert sum(1 for _ in self.dataset) == 4
# check the dataset didn't get shuffled (yet).
assert all(a is b for a, b in zip(self.dataset.batches, batches_))
assert sum(1 for _ in self.dataset) == 4
assert sum(1 for _ in self.dataset) == 4
# check the dataset did get shuffled.
assert not all(a is b for a, b in zip(self.dataset.batches, batches_))
|
<commit_before><commit_msg>Add tests for dataset class.<commit_after>import numpy as np
import theanets
class TestDataset:
def setUp(self):
self.dataset = theanets.dataset.Dataset(
np.arange(101)[:, None],
label='foo',
batches=4,
size=10,
)
def test_setup(self):
assert self.dataset.label == 'foo'
assert len(self.dataset.batches) == 10
assert self.dataset.number_batches == 4
def test_iterate(self):
batches_ = list(self.dataset.batches)
# check we iterate over the correct number of batches.
assert sum(1 for _ in self.dataset) == 4
# check the dataset didn't get shuffled (yet).
assert all(a is b for a, b in zip(self.dataset.batches, batches_))
assert sum(1 for _ in self.dataset) == 4
assert sum(1 for _ in self.dataset) == 4
# check the dataset did get shuffled.
assert not all(a is b for a, b in zip(self.dataset.batches, batches_))
|
|
5f0e8dccb11f889cbc217ab7ce1408b978da8ef0
|
bin/deskew-and-unpaper.py
|
bin/deskew-and-unpaper.py
|
#!/usr/bin/env python
# This script walks through all files under the current directory,
# looking for those called page-001.png, page-002.png, etc. (If a
# version called page-001.rotated.png, etc. is also present, that us
# used as in put in preference.) For each page the script uses
# "convert -deskew '40%'" and then "unpaper" to remove scanning
# artefacts. (ImageMagick does better than unpaper on large skews.)
import os, re, sys
from subprocess import check_call
original_page_re = re.compile('^(page-[0-9]+)\.png')
for root, dirs, files in os.walk('.'):
def full(filename):
return os.path.join(root, filename)
def exists(filename):
return os.path.exists(full(filename))
for filename in sorted(files):
m = original_page_re.search(filename)
if not m:
continue
print "====", full(filename)
filename_to_use = filename
basename = m.group(1)
rotated_filename = "%s.rotated.png" % (basename,)
if exists(os.path.join(rotated_filename)):
filename_to_use = rotated_filename
deskewed_filename = "%s-deskewed.png" % (basename,)
if not exists(deskewed_filename):
print "converting", filename_to_use, "to", deskewed_filename
check_call(["convert",
"-deskew",
"40%",
full(filename_to_use),
full(deskewed_filename)])
pnm_version = "%s-deskewed.pnm" % (basename,)
unpapered_pnm_version = "%s-deskewed-unpapered.pnm" % (basename,)
unpapered_filename = "%s-deskewed-unpapered.png" % (basename,)
if not exists(pnm_version):
print "converting", deskewed_filename, "to", pnm_version
with open(full(pnm_version), "w") as fp:
check_call(["pngtopnm",
full(deskewed_filename)],
stdout=fp)
if not exists(unpapered_pnm_version):
print "unpapering", pnm_version, "to", unpapered_pnm_version
check_call(["unpaper",
full(pnm_version),
full(unpapered_pnm_version)])
if not exists(unpapered_filename):
print "converting", unpapered_pnm_version, "to", unpapered_filename
with open(full(unpapered_filename), "w") as fp:
check_call(["pnmtopng",
full(unpapered_pnm_version)],
stdout=fp)
os.remove(full(pnm_version))
os.remove(full(unpapered_pnm_version))
|
Add a helper script to deskew and unpaper scanned pages
|
Add a helper script to deskew and unpaper scanned pages
|
Python
|
agpl-3.0
|
ken-muturi/pombola,hzj123/56th,mysociety/pombola,geoffkilpin/pombola,ken-muturi/pombola,mysociety/pombola,hzj123/56th,geoffkilpin/pombola,ken-muturi/pombola,patricmutwiri/pombola,patricmutwiri/pombola,hzj123/56th,geoffkilpin/pombola,patricmutwiri/pombola,ken-muturi/pombola,mysociety/pombola,hzj123/56th,mysociety/pombola,patricmutwiri/pombola,patricmutwiri/pombola,geoffkilpin/pombola,patricmutwiri/pombola,hzj123/56th,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,hzj123/56th,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola
|
Add a helper script to deskew and unpaper scanned pages
|
#!/usr/bin/env python
# This script walks through all files under the current directory,
# looking for those called page-001.png, page-002.png, etc. (If a
# version called page-001.rotated.png, etc. is also present, that us
# used as in put in preference.) For each page the script uses
# "convert -deskew '40%'" and then "unpaper" to remove scanning
# artefacts. (ImageMagick does better than unpaper on large skews.)
import os, re, sys
from subprocess import check_call
original_page_re = re.compile('^(page-[0-9]+)\.png')
for root, dirs, files in os.walk('.'):
def full(filename):
return os.path.join(root, filename)
def exists(filename):
return os.path.exists(full(filename))
for filename in sorted(files):
m = original_page_re.search(filename)
if not m:
continue
print "====", full(filename)
filename_to_use = filename
basename = m.group(1)
rotated_filename = "%s.rotated.png" % (basename,)
if exists(os.path.join(rotated_filename)):
filename_to_use = rotated_filename
deskewed_filename = "%s-deskewed.png" % (basename,)
if not exists(deskewed_filename):
print "converting", filename_to_use, "to", deskewed_filename
check_call(["convert",
"-deskew",
"40%",
full(filename_to_use),
full(deskewed_filename)])
pnm_version = "%s-deskewed.pnm" % (basename,)
unpapered_pnm_version = "%s-deskewed-unpapered.pnm" % (basename,)
unpapered_filename = "%s-deskewed-unpapered.png" % (basename,)
if not exists(pnm_version):
print "converting", deskewed_filename, "to", pnm_version
with open(full(pnm_version), "w") as fp:
check_call(["pngtopnm",
full(deskewed_filename)],
stdout=fp)
if not exists(unpapered_pnm_version):
print "unpapering", pnm_version, "to", unpapered_pnm_version
check_call(["unpaper",
full(pnm_version),
full(unpapered_pnm_version)])
if not exists(unpapered_filename):
print "converting", unpapered_pnm_version, "to", unpapered_filename
with open(full(unpapered_filename), "w") as fp:
check_call(["pnmtopng",
full(unpapered_pnm_version)],
stdout=fp)
os.remove(full(pnm_version))
os.remove(full(unpapered_pnm_version))
|
<commit_before><commit_msg>Add a helper script to deskew and unpaper scanned pages<commit_after>
|
#!/usr/bin/env python
# This script walks through all files under the current directory,
# looking for those called page-001.png, page-002.png, etc. (If a
# version called page-001.rotated.png, etc. is also present, that us
# used as in put in preference.) For each page the script uses
# "convert -deskew '40%'" and then "unpaper" to remove scanning
# artefacts. (ImageMagick does better than unpaper on large skews.)
import os, re, sys
from subprocess import check_call
original_page_re = re.compile('^(page-[0-9]+)\.png')
for root, dirs, files in os.walk('.'):
def full(filename):
return os.path.join(root, filename)
def exists(filename):
return os.path.exists(full(filename))
for filename in sorted(files):
m = original_page_re.search(filename)
if not m:
continue
print "====", full(filename)
filename_to_use = filename
basename = m.group(1)
rotated_filename = "%s.rotated.png" % (basename,)
if exists(os.path.join(rotated_filename)):
filename_to_use = rotated_filename
deskewed_filename = "%s-deskewed.png" % (basename,)
if not exists(deskewed_filename):
print "converting", filename_to_use, "to", deskewed_filename
check_call(["convert",
"-deskew",
"40%",
full(filename_to_use),
full(deskewed_filename)])
pnm_version = "%s-deskewed.pnm" % (basename,)
unpapered_pnm_version = "%s-deskewed-unpapered.pnm" % (basename,)
unpapered_filename = "%s-deskewed-unpapered.png" % (basename,)
if not exists(pnm_version):
print "converting", deskewed_filename, "to", pnm_version
with open(full(pnm_version), "w") as fp:
check_call(["pngtopnm",
full(deskewed_filename)],
stdout=fp)
if not exists(unpapered_pnm_version):
print "unpapering", pnm_version, "to", unpapered_pnm_version
check_call(["unpaper",
full(pnm_version),
full(unpapered_pnm_version)])
if not exists(unpapered_filename):
print "converting", unpapered_pnm_version, "to", unpapered_filename
with open(full(unpapered_filename), "w") as fp:
check_call(["pnmtopng",
full(unpapered_pnm_version)],
stdout=fp)
os.remove(full(pnm_version))
os.remove(full(unpapered_pnm_version))
|
Add a helper script to deskew and unpaper scanned pages#!/usr/bin/env python
# This script walks through all files under the current directory,
# looking for those called page-001.png, page-002.png, etc. (If a
# version called page-001.rotated.png, etc. is also present, that us
# used as in put in preference.) For each page the script uses
# "convert -deskew '40%'" and then "unpaper" to remove scanning
# artefacts. (ImageMagick does better than unpaper on large skews.)
import os, re, sys
from subprocess import check_call
original_page_re = re.compile('^(page-[0-9]+)\.png')
for root, dirs, files in os.walk('.'):
def full(filename):
return os.path.join(root, filename)
def exists(filename):
return os.path.exists(full(filename))
for filename in sorted(files):
m = original_page_re.search(filename)
if not m:
continue
print "====", full(filename)
filename_to_use = filename
basename = m.group(1)
rotated_filename = "%s.rotated.png" % (basename,)
if exists(os.path.join(rotated_filename)):
filename_to_use = rotated_filename
deskewed_filename = "%s-deskewed.png" % (basename,)
if not exists(deskewed_filename):
print "converting", filename_to_use, "to", deskewed_filename
check_call(["convert",
"-deskew",
"40%",
full(filename_to_use),
full(deskewed_filename)])
pnm_version = "%s-deskewed.pnm" % (basename,)
unpapered_pnm_version = "%s-deskewed-unpapered.pnm" % (basename,)
unpapered_filename = "%s-deskewed-unpapered.png" % (basename,)
if not exists(pnm_version):
print "converting", deskewed_filename, "to", pnm_version
with open(full(pnm_version), "w") as fp:
check_call(["pngtopnm",
full(deskewed_filename)],
stdout=fp)
if not exists(unpapered_pnm_version):
print "unpapering", pnm_version, "to", unpapered_pnm_version
check_call(["unpaper",
full(pnm_version),
full(unpapered_pnm_version)])
if not exists(unpapered_filename):
print "converting", unpapered_pnm_version, "to", unpapered_filename
with open(full(unpapered_filename), "w") as fp:
check_call(["pnmtopng",
full(unpapered_pnm_version)],
stdout=fp)
os.remove(full(pnm_version))
os.remove(full(unpapered_pnm_version))
|
<commit_before><commit_msg>Add a helper script to deskew and unpaper scanned pages<commit_after>#!/usr/bin/env python
# This script walks through all files under the current directory,
# looking for those called page-001.png, page-002.png, etc. (If a
# version called page-001.rotated.png, etc. is also present, that us
# used as in put in preference.) For each page the script uses
# "convert -deskew '40%'" and then "unpaper" to remove scanning
# artefacts. (ImageMagick does better than unpaper on large skews.)
import os, re, sys
from subprocess import check_call
original_page_re = re.compile('^(page-[0-9]+)\.png')
for root, dirs, files in os.walk('.'):
def full(filename):
return os.path.join(root, filename)
def exists(filename):
return os.path.exists(full(filename))
for filename in sorted(files):
m = original_page_re.search(filename)
if not m:
continue
print "====", full(filename)
filename_to_use = filename
basename = m.group(1)
rotated_filename = "%s.rotated.png" % (basename,)
if exists(os.path.join(rotated_filename)):
filename_to_use = rotated_filename
deskewed_filename = "%s-deskewed.png" % (basename,)
if not exists(deskewed_filename):
print "converting", filename_to_use, "to", deskewed_filename
check_call(["convert",
"-deskew",
"40%",
full(filename_to_use),
full(deskewed_filename)])
pnm_version = "%s-deskewed.pnm" % (basename,)
unpapered_pnm_version = "%s-deskewed-unpapered.pnm" % (basename,)
unpapered_filename = "%s-deskewed-unpapered.png" % (basename,)
if not exists(pnm_version):
print "converting", deskewed_filename, "to", pnm_version
with open(full(pnm_version), "w") as fp:
check_call(["pngtopnm",
full(deskewed_filename)],
stdout=fp)
if not exists(unpapered_pnm_version):
print "unpapering", pnm_version, "to", unpapered_pnm_version
check_call(["unpaper",
full(pnm_version),
full(unpapered_pnm_version)])
if not exists(unpapered_filename):
print "converting", unpapered_pnm_version, "to", unpapered_filename
with open(full(unpapered_filename), "w") as fp:
check_call(["pnmtopng",
full(unpapered_pnm_version)],
stdout=fp)
os.remove(full(pnm_version))
os.remove(full(unpapered_pnm_version))
|
|
632bc12fee8a709f1bc0600085001c4e91c077ac
|
storage/test/test_kv_storages_read_only.py
|
storage/test/test_kv_storages_read_only.py
|
import pytest
from storage.kv_store_leveldb import KeyValueStorageLeveldb
from storage.kv_store_rocksdb import KeyValueStorageRocksdb
from storage.kv_store import KeyValueStorage
i = 0
@pytest.yield_fixture(scope="function", params=['rocksdb', 'leveldb'])
def kv(request, tempdir) -> KeyValueStorage:
global i
if request.param == 'leveldb':
kv = KeyValueStorageLeveldb(tempdir, 'kv{}'.format(i))
else:
kv = KeyValueStorageRocksdb(tempdir, 'kv{}'.format(i))
assert kv.read_only == False
kv.put('k1', 'v1')
kv.put('k2', 'v2')
kv.put('k3', 'v3')
kv.close()
if request.param == 'leveldb':
kv = KeyValueStorageLeveldb(tempdir, 'kv{}'.format(i), read_only=True)
else:
kv = KeyValueStorageRocksdb(tempdir, 'kv{}'.format(i), read_only=True)
i += 1
yield kv
kv.close()
def test_read_only_get(kv):
assert kv.read_only == True
v = kv.get('k1')
assert b'v1' == v
v = kv.get('k2')
assert b'v2' == v
v = kv.get('k3')
assert b'v3' == v
def test_read_only_put(kv):
assert kv.read_only == True
with pytest.raises(Exception, match="Not supported operation in read only mode."):
kv.put('k4', 'v4')
def test_read_only_remove(kv):
assert kv.read_only == True
with pytest.raises(Exception, match="Not supported operation in read only mode."):
kv.remove('k1')
|
Add test of read-only mode for key-value DB storages.
|
Add test of read-only mode for key-value DB storages.
Signed-off-by: Sergey Shilov <064bbdfaeb89a0bebebfe7b388747a73c8941704@dsr-company.com>
|
Python
|
apache-2.0
|
evernym/zeno,evernym/plenum
|
Add test of read-only mode for key-value DB storages.
Signed-off-by: Sergey Shilov <064bbdfaeb89a0bebebfe7b388747a73c8941704@dsr-company.com>
|
import pytest
from storage.kv_store_leveldb import KeyValueStorageLeveldb
from storage.kv_store_rocksdb import KeyValueStorageRocksdb
from storage.kv_store import KeyValueStorage
i = 0
@pytest.yield_fixture(scope="function", params=['rocksdb', 'leveldb'])
def kv(request, tempdir) -> KeyValueStorage:
global i
if request.param == 'leveldb':
kv = KeyValueStorageLeveldb(tempdir, 'kv{}'.format(i))
else:
kv = KeyValueStorageRocksdb(tempdir, 'kv{}'.format(i))
assert kv.read_only == False
kv.put('k1', 'v1')
kv.put('k2', 'v2')
kv.put('k3', 'v3')
kv.close()
if request.param == 'leveldb':
kv = KeyValueStorageLeveldb(tempdir, 'kv{}'.format(i), read_only=True)
else:
kv = KeyValueStorageRocksdb(tempdir, 'kv{}'.format(i), read_only=True)
i += 1
yield kv
kv.close()
def test_read_only_get(kv):
assert kv.read_only == True
v = kv.get('k1')
assert b'v1' == v
v = kv.get('k2')
assert b'v2' == v
v = kv.get('k3')
assert b'v3' == v
def test_read_only_put(kv):
assert kv.read_only == True
with pytest.raises(Exception, match="Not supported operation in read only mode."):
kv.put('k4', 'v4')
def test_read_only_remove(kv):
assert kv.read_only == True
with pytest.raises(Exception, match="Not supported operation in read only mode."):
kv.remove('k1')
|
<commit_before><commit_msg>Add test of read-only mode for key-value DB storages.
Signed-off-by: Sergey Shilov <064bbdfaeb89a0bebebfe7b388747a73c8941704@dsr-company.com><commit_after>
|
import pytest
from storage.kv_store_leveldb import KeyValueStorageLeveldb
from storage.kv_store_rocksdb import KeyValueStorageRocksdb
from storage.kv_store import KeyValueStorage
i = 0
@pytest.yield_fixture(scope="function", params=['rocksdb', 'leveldb'])
def kv(request, tempdir) -> KeyValueStorage:
global i
if request.param == 'leveldb':
kv = KeyValueStorageLeveldb(tempdir, 'kv{}'.format(i))
else:
kv = KeyValueStorageRocksdb(tempdir, 'kv{}'.format(i))
assert kv.read_only == False
kv.put('k1', 'v1')
kv.put('k2', 'v2')
kv.put('k3', 'v3')
kv.close()
if request.param == 'leveldb':
kv = KeyValueStorageLeveldb(tempdir, 'kv{}'.format(i), read_only=True)
else:
kv = KeyValueStorageRocksdb(tempdir, 'kv{}'.format(i), read_only=True)
i += 1
yield kv
kv.close()
def test_read_only_get(kv):
assert kv.read_only == True
v = kv.get('k1')
assert b'v1' == v
v = kv.get('k2')
assert b'v2' == v
v = kv.get('k3')
assert b'v3' == v
def test_read_only_put(kv):
assert kv.read_only == True
with pytest.raises(Exception, match="Not supported operation in read only mode."):
kv.put('k4', 'v4')
def test_read_only_remove(kv):
assert kv.read_only == True
with pytest.raises(Exception, match="Not supported operation in read only mode."):
kv.remove('k1')
|
Add test of read-only mode for key-value DB storages.
Signed-off-by: Sergey Shilov <064bbdfaeb89a0bebebfe7b388747a73c8941704@dsr-company.com>import pytest
from storage.kv_store_leveldb import KeyValueStorageLeveldb
from storage.kv_store_rocksdb import KeyValueStorageRocksdb
from storage.kv_store import KeyValueStorage
i = 0
@pytest.yield_fixture(scope="function", params=['rocksdb', 'leveldb'])
def kv(request, tempdir) -> KeyValueStorage:
global i
if request.param == 'leveldb':
kv = KeyValueStorageLeveldb(tempdir, 'kv{}'.format(i))
else:
kv = KeyValueStorageRocksdb(tempdir, 'kv{}'.format(i))
assert kv.read_only == False
kv.put('k1', 'v1')
kv.put('k2', 'v2')
kv.put('k3', 'v3')
kv.close()
if request.param == 'leveldb':
kv = KeyValueStorageLeveldb(tempdir, 'kv{}'.format(i), read_only=True)
else:
kv = KeyValueStorageRocksdb(tempdir, 'kv{}'.format(i), read_only=True)
i += 1
yield kv
kv.close()
def test_read_only_get(kv):
assert kv.read_only == True
v = kv.get('k1')
assert b'v1' == v
v = kv.get('k2')
assert b'v2' == v
v = kv.get('k3')
assert b'v3' == v
def test_read_only_put(kv):
assert kv.read_only == True
with pytest.raises(Exception, match="Not supported operation in read only mode."):
kv.put('k4', 'v4')
def test_read_only_remove(kv):
assert kv.read_only == True
with pytest.raises(Exception, match="Not supported operation in read only mode."):
kv.remove('k1')
|
<commit_before><commit_msg>Add test of read-only mode for key-value DB storages.
Signed-off-by: Sergey Shilov <064bbdfaeb89a0bebebfe7b388747a73c8941704@dsr-company.com><commit_after>import pytest
from storage.kv_store_leveldb import KeyValueStorageLeveldb
from storage.kv_store_rocksdb import KeyValueStorageRocksdb
from storage.kv_store import KeyValueStorage
i = 0
@pytest.yield_fixture(scope="function", params=['rocksdb', 'leveldb'])
def kv(request, tempdir) -> KeyValueStorage:
global i
if request.param == 'leveldb':
kv = KeyValueStorageLeveldb(tempdir, 'kv{}'.format(i))
else:
kv = KeyValueStorageRocksdb(tempdir, 'kv{}'.format(i))
assert kv.read_only == False
kv.put('k1', 'v1')
kv.put('k2', 'v2')
kv.put('k3', 'v3')
kv.close()
if request.param == 'leveldb':
kv = KeyValueStorageLeveldb(tempdir, 'kv{}'.format(i), read_only=True)
else:
kv = KeyValueStorageRocksdb(tempdir, 'kv{}'.format(i), read_only=True)
i += 1
yield kv
kv.close()
def test_read_only_get(kv):
assert kv.read_only == True
v = kv.get('k1')
assert b'v1' == v
v = kv.get('k2')
assert b'v2' == v
v = kv.get('k3')
assert b'v3' == v
def test_read_only_put(kv):
assert kv.read_only == True
with pytest.raises(Exception, match="Not supported operation in read only mode."):
kv.put('k4', 'v4')
def test_read_only_remove(kv):
assert kv.read_only == True
with pytest.raises(Exception, match="Not supported operation in read only mode."):
kv.remove('k1')
|
|
a6293fd84b1b393f5a2ed00f07131dc13371554b
|
viewer_examples/plugins/collection_plugin.py
|
viewer_examples/plugins/collection_plugin.py
|
"""
=================
Collection plugin
=================
Demo of a CollectionViewer for viewing collections of images with the
`autolevel` rank filter connected as a plugin.
"""
from skimage import data
from skimage.filter import rank
from skimage.morphology import disk
from skimage.viewer import CollectionViewer
from skimage.viewer.widgets import Slider
from skimage.viewer.plugins.base import Plugin
# Wrap autolevel function to make the disk size a filter argument.
def autolevel(image, disk_size):
return rank.autolevel(image, disk(disk_size))
img_collection = [data.camera(), data.coins(), data.text()]
plugin = Plugin(image_filter=autolevel)
plugin += Slider('disk_size', 2, 8, value_type='int', update_on='release')
plugin.name = "Autolevel"
viewer = CollectionViewer(img_collection)
viewer += plugin
viewer.show()
|
Add example of connecting plugins to CollectionViewer
|
Add example of connecting plugins to CollectionViewer
|
Python
|
bsd-3-clause
|
juliusbierk/scikit-image,rjeli/scikit-image,Hiyorimi/scikit-image,michaelaye/scikit-image,ofgulban/scikit-image,chintak/scikit-image,vighneshbirodkar/scikit-image,jwiggins/scikit-image,vighneshbirodkar/scikit-image,dpshelio/scikit-image,robintw/scikit-image,ClinicalGraphics/scikit-image,michaelpacer/scikit-image,almarklein/scikit-image,oew1v07/scikit-image,bennlich/scikit-image,chriscrosscutler/scikit-image,paalge/scikit-image,bennlich/scikit-image,pratapvardhan/scikit-image,bsipocz/scikit-image,dpshelio/scikit-image,chriscrosscutler/scikit-image,warmspringwinds/scikit-image,SamHames/scikit-image,SamHames/scikit-image,oew1v07/scikit-image,youprofit/scikit-image,michaelpacer/scikit-image,michaelaye/scikit-image,ajaybhat/scikit-image,paalge/scikit-image,pratapvardhan/scikit-image,newville/scikit-image,almarklein/scikit-image,vighneshbirodkar/scikit-image,Midafi/scikit-image,keflavich/scikit-image,ofgulban/scikit-image,Hiyorimi/scikit-image,Midafi/scikit-image,emon10005/scikit-image,bsipocz/scikit-image,chintak/scikit-image,almarklein/scikit-image,Britefury/scikit-image,jwiggins/scikit-image,ClinicalGraphics/scikit-image,chintak/scikit-image,almarklein/scikit-image,SamHames/scikit-image,chintak/scikit-image,WarrenWeckesser/scikits-image,GaZ3ll3/scikit-image,juliusbierk/scikit-image,SamHames/scikit-image,Britefury/scikit-image,rjeli/scikit-image,keflavich/scikit-image,warmspringwinds/scikit-image,emon10005/scikit-image,robintw/scikit-image,rjeli/scikit-image,WarrenWeckesser/scikits-image,paalge/scikit-image,ofgulban/scikit-image,GaZ3ll3/scikit-image,newville/scikit-image,blink1073/scikit-image,youprofit/scikit-image,blink1073/scikit-image,ajaybhat/scikit-image
|
Add example of connecting plugins to CollectionViewer
|
"""
=================
Collection plugin
=================
Demo of a CollectionViewer for viewing collections of images with the
`autolevel` rank filter connected as a plugin.
"""
from skimage import data
from skimage.filter import rank
from skimage.morphology import disk
from skimage.viewer import CollectionViewer
from skimage.viewer.widgets import Slider
from skimage.viewer.plugins.base import Plugin
# Wrap autolevel function to make the disk size a filter argument.
def autolevel(image, disk_size):
return rank.autolevel(image, disk(disk_size))
img_collection = [data.camera(), data.coins(), data.text()]
plugin = Plugin(image_filter=autolevel)
plugin += Slider('disk_size', 2, 8, value_type='int', update_on='release')
plugin.name = "Autolevel"
viewer = CollectionViewer(img_collection)
viewer += plugin
viewer.show()
|
<commit_before><commit_msg>Add example of connecting plugins to CollectionViewer<commit_after>
|
"""
=================
Collection plugin
=================
Demo of a CollectionViewer for viewing collections of images with the
`autolevel` rank filter connected as a plugin.
"""
from skimage import data
from skimage.filter import rank
from skimage.morphology import disk
from skimage.viewer import CollectionViewer
from skimage.viewer.widgets import Slider
from skimage.viewer.plugins.base import Plugin
# Wrap autolevel function to make the disk size a filter argument.
def autolevel(image, disk_size):
return rank.autolevel(image, disk(disk_size))
img_collection = [data.camera(), data.coins(), data.text()]
plugin = Plugin(image_filter=autolevel)
plugin += Slider('disk_size', 2, 8, value_type='int', update_on='release')
plugin.name = "Autolevel"
viewer = CollectionViewer(img_collection)
viewer += plugin
viewer.show()
|
Add example of connecting plugins to CollectionViewer"""
=================
Collection plugin
=================
Demo of a CollectionViewer for viewing collections of images with the
`autolevel` rank filter connected as a plugin.
"""
from skimage import data
from skimage.filter import rank
from skimage.morphology import disk
from skimage.viewer import CollectionViewer
from skimage.viewer.widgets import Slider
from skimage.viewer.plugins.base import Plugin
# Wrap autolevel function to make the disk size a filter argument.
def autolevel(image, disk_size):
return rank.autolevel(image, disk(disk_size))
img_collection = [data.camera(), data.coins(), data.text()]
plugin = Plugin(image_filter=autolevel)
plugin += Slider('disk_size', 2, 8, value_type='int', update_on='release')
plugin.name = "Autolevel"
viewer = CollectionViewer(img_collection)
viewer += plugin
viewer.show()
|
<commit_before><commit_msg>Add example of connecting plugins to CollectionViewer<commit_after>"""
=================
Collection plugin
=================
Demo of a CollectionViewer for viewing collections of images with the
`autolevel` rank filter connected as a plugin.
"""
from skimage import data
from skimage.filter import rank
from skimage.morphology import disk
from skimage.viewer import CollectionViewer
from skimage.viewer.widgets import Slider
from skimage.viewer.plugins.base import Plugin
# Wrap autolevel function to make the disk size a filter argument.
def autolevel(image, disk_size):
return rank.autolevel(image, disk(disk_size))
img_collection = [data.camera(), data.coins(), data.text()]
plugin = Plugin(image_filter=autolevel)
plugin += Slider('disk_size', 2, 8, value_type='int', update_on='release')
plugin.name = "Autolevel"
viewer = CollectionViewer(img_collection)
viewer += plugin
viewer.show()
|
|
09472f2cffb5fdd8481508d5a434ef9f1b1cd1a8
|
code/python/knub/thesis/word2vec_converter.py
|
code/python/knub/thesis/word2vec_converter.py
|
import argparse
import logging
from gensim.models import Word2Vec
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Convert word2vec model from binary to txt")
parser.add_argument("model", type=str)
args = parser.parse_args()
model = Word2Vec.load_word2vec_format(args.model, binary=True)
model.save_word2vec_format(args.model + ".txt", binary=False)
logging.info(model.most_similar(positive=['woman', 'king'], negative=['man']))
logging.info(model.doesnt_match("breakfast cereal dinner lunch".split()))
logging.info(model.similarity('woman', 'man'))
|
Add word2vec binary to txt format converter
|
Add word2vec binary to txt format converter
|
Python
|
apache-2.0
|
knub/master-thesis,knub/master-thesis,knub/master-thesis,knub/master-thesis
|
Add word2vec binary to txt format converter
|
import argparse
import logging
from gensim.models import Word2Vec
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Convert word2vec model from binary to txt")
parser.add_argument("model", type=str)
args = parser.parse_args()
model = Word2Vec.load_word2vec_format(args.model, binary=True)
model.save_word2vec_format(args.model + ".txt", binary=False)
logging.info(model.most_similar(positive=['woman', 'king'], negative=['man']))
logging.info(model.doesnt_match("breakfast cereal dinner lunch".split()))
logging.info(model.similarity('woman', 'man'))
|
<commit_before><commit_msg>Add word2vec binary to txt format converter<commit_after>
|
import argparse
import logging
from gensim.models import Word2Vec
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Convert word2vec model from binary to txt")
parser.add_argument("model", type=str)
args = parser.parse_args()
model = Word2Vec.load_word2vec_format(args.model, binary=True)
model.save_word2vec_format(args.model + ".txt", binary=False)
logging.info(model.most_similar(positive=['woman', 'king'], negative=['man']))
logging.info(model.doesnt_match("breakfast cereal dinner lunch".split()))
logging.info(model.similarity('woman', 'man'))
|
Add word2vec binary to txt format converterimport argparse
import logging
from gensim.models import Word2Vec
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Convert word2vec model from binary to txt")
parser.add_argument("model", type=str)
args = parser.parse_args()
model = Word2Vec.load_word2vec_format(args.model, binary=True)
model.save_word2vec_format(args.model + ".txt", binary=False)
logging.info(model.most_similar(positive=['woman', 'king'], negative=['man']))
logging.info(model.doesnt_match("breakfast cereal dinner lunch".split()))
logging.info(model.similarity('woman', 'man'))
|
<commit_before><commit_msg>Add word2vec binary to txt format converter<commit_after>import argparse
import logging
from gensim.models import Word2Vec
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Convert word2vec model from binary to txt")
parser.add_argument("model", type=str)
args = parser.parse_args()
model = Word2Vec.load_word2vec_format(args.model, binary=True)
model.save_word2vec_format(args.model + ".txt", binary=False)
logging.info(model.most_similar(positive=['woman', 'king'], negative=['man']))
logging.info(model.doesnt_match("breakfast cereal dinner lunch".split()))
logging.info(model.similarity('woman', 'man'))
|
|
d6f2ee46ea9b56eae5769b51cff48b1c434b829c
|
tests/unit/sts/god_scheduler_test.py
|
tests/unit/sts/god_scheduler_test.py
|
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os.path
sys.path.append(os.path.dirname(__file__) + "/../../..")
# N.B. this import is needed to avoid a circular dependency.
from sts.replay_event import *
from sts.god_scheduler import GodScheduler, PendingReceive
from pox.openflow.libopenflow_01 import *
class MockConnection(object):
def __init__(self):
self.passed_message = False
def allow_message_receipt(self, message):
self.passed_message = True
class GodSchedulerTest(unittest.TestCase):
def test_basic(self):
god = GodScheduler()
message = ofp_flow_mod(match=ofp_match(in_port=1, nw_src="1.1.1.1"),
action=ofp_action_output(port=1))
mock_conn = MockConnection()
god.insert_pending_receipt(1,"c1",message,mock_conn)
pending_receipt = PendingReceive(1,"c1",OFFingerprint.from_pkt(message))
self.assertTrue(god.message_receipt_waiting(pending_receipt))
god.schedule(pending_receipt)
self.assertTrue(mock_conn.passed_message)
self.assertFalse(god.message_receipt_waiting(pending_receipt))
|
Add simple test for GodScheduler
|
Add simple test for GodScheduler
|
Python
|
apache-2.0
|
jmiserez/sts,ucb-sts/sts,ucb-sts/sts,jmiserez/sts
|
Add simple test for GodScheduler
|
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os.path
sys.path.append(os.path.dirname(__file__) + "/../../..")
# N.B. this import is needed to avoid a circular dependency.
from sts.replay_event import *
from sts.god_scheduler import GodScheduler, PendingReceive
from pox.openflow.libopenflow_01 import *
class MockConnection(object):
def __init__(self):
self.passed_message = False
def allow_message_receipt(self, message):
self.passed_message = True
class GodSchedulerTest(unittest.TestCase):
def test_basic(self):
god = GodScheduler()
message = ofp_flow_mod(match=ofp_match(in_port=1, nw_src="1.1.1.1"),
action=ofp_action_output(port=1))
mock_conn = MockConnection()
god.insert_pending_receipt(1,"c1",message,mock_conn)
pending_receipt = PendingReceive(1,"c1",OFFingerprint.from_pkt(message))
self.assertTrue(god.message_receipt_waiting(pending_receipt))
god.schedule(pending_receipt)
self.assertTrue(mock_conn.passed_message)
self.assertFalse(god.message_receipt_waiting(pending_receipt))
|
<commit_before><commit_msg>Add simple test for GodScheduler<commit_after>
|
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os.path
sys.path.append(os.path.dirname(__file__) + "/../../..")
# N.B. this import is needed to avoid a circular dependency.
from sts.replay_event import *
from sts.god_scheduler import GodScheduler, PendingReceive
from pox.openflow.libopenflow_01 import *
class MockConnection(object):
def __init__(self):
self.passed_message = False
def allow_message_receipt(self, message):
self.passed_message = True
class GodSchedulerTest(unittest.TestCase):
def test_basic(self):
god = GodScheduler()
message = ofp_flow_mod(match=ofp_match(in_port=1, nw_src="1.1.1.1"),
action=ofp_action_output(port=1))
mock_conn = MockConnection()
god.insert_pending_receipt(1,"c1",message,mock_conn)
pending_receipt = PendingReceive(1,"c1",OFFingerprint.from_pkt(message))
self.assertTrue(god.message_receipt_waiting(pending_receipt))
god.schedule(pending_receipt)
self.assertTrue(mock_conn.passed_message)
self.assertFalse(god.message_receipt_waiting(pending_receipt))
|
Add simple test for GodScheduler# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os.path
sys.path.append(os.path.dirname(__file__) + "/../../..")
# N.B. this import is needed to avoid a circular dependency.
from sts.replay_event import *
from sts.god_scheduler import GodScheduler, PendingReceive
from pox.openflow.libopenflow_01 import *
class MockConnection(object):
def __init__(self):
self.passed_message = False
def allow_message_receipt(self, message):
self.passed_message = True
class GodSchedulerTest(unittest.TestCase):
def test_basic(self):
god = GodScheduler()
message = ofp_flow_mod(match=ofp_match(in_port=1, nw_src="1.1.1.1"),
action=ofp_action_output(port=1))
mock_conn = MockConnection()
god.insert_pending_receipt(1,"c1",message,mock_conn)
pending_receipt = PendingReceive(1,"c1",OFFingerprint.from_pkt(message))
self.assertTrue(god.message_receipt_waiting(pending_receipt))
god.schedule(pending_receipt)
self.assertTrue(mock_conn.passed_message)
self.assertFalse(god.message_receipt_waiting(pending_receipt))
|
<commit_before><commit_msg>Add simple test for GodScheduler<commit_after># Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os.path
sys.path.append(os.path.dirname(__file__) + "/../../..")
# N.B. this import is needed to avoid a circular dependency.
from sts.replay_event import *
from sts.god_scheduler import GodScheduler, PendingReceive
from pox.openflow.libopenflow_01 import *
class MockConnection(object):
def __init__(self):
self.passed_message = False
def allow_message_receipt(self, message):
self.passed_message = True
class GodSchedulerTest(unittest.TestCase):
def test_basic(self):
god = GodScheduler()
message = ofp_flow_mod(match=ofp_match(in_port=1, nw_src="1.1.1.1"),
action=ofp_action_output(port=1))
mock_conn = MockConnection()
god.insert_pending_receipt(1,"c1",message,mock_conn)
pending_receipt = PendingReceive(1,"c1",OFFingerprint.from_pkt(message))
self.assertTrue(god.message_receipt_waiting(pending_receipt))
god.schedule(pending_receipt)
self.assertTrue(mock_conn.passed_message)
self.assertFalse(god.message_receipt_waiting(pending_receipt))
|
|
b635112d3613d47247ac22390786aaaffcd2a3fd
|
examples/upsidedownternet.py
|
examples/upsidedownternet.py
|
import Image, cStringIO
def response(context, flow):
if flow.response.headers["content-type"] == ["image/png"]:
s = cStringIO.StringIO(flow.response.content)
img = Image.open(s)
img = img.rotate(180)
s2 = cStringIO.StringIO()
img.save(s2, "png")
flow.response.content = s2.getvalue()
|
Add an example script that turns all PNGs upside down.
|
Add an example script that turns all PNGs upside down.
|
Python
|
mit
|
dxq-git/mitmproxy,Kriechi/mitmproxy,tekii/mitmproxy,gzzhanghao/mitmproxy,ADemonisis/mitmproxy,ParthGanatra/mitmproxy,ZeYt/mitmproxy,vhaupert/mitmproxy,scriptmediala/mitmproxy,cortesi/mitmproxy,jvillacorta/mitmproxy,syjzwjj/mitmproxy,azureplus/mitmproxy,StevenVanAcker/mitmproxy,mitmproxy/mitmproxy,ccccccccccc/mitmproxy,ujjwal96/mitmproxy,liorvh/mitmproxy,xaxa89/mitmproxy,pombredanne/mitmproxy,zlorb/mitmproxy,rauburtin/mitmproxy,dxq-git/mitmproxy,legendtang/mitmproxy,0xwindows/InfoLeak,ikoz/mitmproxy,ZeYt/mitmproxy,StevenVanAcker/mitmproxy,jvillacorta/mitmproxy,inscriptionweb/mitmproxy,inscriptionweb/mitmproxy,0x0mar/mitmproxy,fimad/mitmproxy,ccccccccccc/mitmproxy,mitmproxy/mitmproxy,tdickers/mitmproxy,elitest/mitmproxy,dwfreed/mitmproxy,byt3bl33d3r/mitmproxy,tfeagle/mitmproxy,dufferzafar/mitmproxy,xaxa89/mitmproxy,elitest/mitmproxy,mitmproxy/mitmproxy,fimad/mitmproxy,dweinstein/mitmproxy,elitest/mitmproxy,dweinstein/mitmproxy,onlywade/mitmproxy,noikiy/mitmproxy,owers19856/mitmproxy,devasia1000/mitmproxy,ujjwal96/mitmproxy,laurmurclar/mitmproxy,dufferzafar/mitmproxy,azureplus/mitmproxy,0xwindows/InfoLeak,Endika/mitmproxy,zbuc/mitmproxy,tekii/mitmproxy,devasia1000/mitmproxy,dwfreed/mitmproxy,ADemonisis/mitmproxy,inscriptionweb/mitmproxy,xbzbing/mitmproxy,bazzinotti/mitmproxy,jpic/mitmproxy,StevenVanAcker/mitmproxy,jvillacorta/mitmproxy,ryoqun/mitmproxy,ZeYt/mitmproxy,macmantrl/mitmproxy,azureplus/mitmproxy,bltb/mitmproxy,macmantrl/mitmproxy,Kriechi/mitmproxy,tfeagle/mitmproxy,vhaupert/mitmproxy,pombredanne/mitmproxy,bltb/mitmproxy,ZeYt/mitmproxy,dwfreed/mitmproxy,Fuzion24/mitmproxy,guiquanz/mitmproxy,dufferzafar/mitmproxy,syjzwjj/mitmproxy,owers19856/mitmproxy,zlorb/mitmproxy,mosajjal/mitmproxy,scriptmediala/mitmproxy,legendtang/mitmproxy,cortesi/mitmproxy,owers19856/mitmproxy,mosajjal/mitmproxy,mhils/mitmproxy,liorvh/mitmproxy,sethp-jive/mitmproxy,rauburtin/mitmproxy,0x0mar/mitmproxy,vhaupert/mitmproxy,dufferzafar/mitmproxy,cortesi/mitmproxy,Endika/mitmproxy,rauburtin/mitmproxy,devasia1000/mitmproxy,xaxa89/mitmproxy,ParthGanatra/mitmproxy,Kriechi/mitmproxy,onlywade/mitmproxy,tekii/mitmproxy,zbuc/mitmproxy,tfeagle/mitmproxy,legendtang/mitmproxy,dweinstein/mitmproxy,MatthewShao/mitmproxy,syjzwjj/mitmproxy,macmantrl/mitmproxy,noikiy/mitmproxy,guiquanz/mitmproxy,dxq-git/mitmproxy,0x0mar/mitmproxy,0xwindows/InfoLeak,macmantrl/mitmproxy,ikoz/mitmproxy,ddworken/mitmproxy,Kriechi/mitmproxy,ujjwal96/mitmproxy,Fuzion24/mitmproxy,MatthewShao/mitmproxy,ADemonisis/mitmproxy,mosajjal/mitmproxy,tekii/mitmproxy,dweinstein/mitmproxy,onlywade/mitmproxy,MatthewShao/mitmproxy,syjzwjj/mitmproxy,Fuzion24/mitmproxy,Fuzion24/mitmproxy,meizhoubao/mitmproxy,ryoqun/mitmproxy,sethp-jive/mitmproxy,MatthewShao/mitmproxy,bazzinotti/mitmproxy,ddworken/mitmproxy,gzzhanghao/mitmproxy,zlorb/mitmproxy,xbzbing/mitmproxy,liorvh/mitmproxy,fimad/mitmproxy,pombredanne/mitmproxy,xbzbing/mitmproxy,tdickers/mitmproxy,xtso520ok/mitmproxy,tfeagle/mitmproxy,xtso520ok/mitmproxy,devasia1000/anti_adblock,devasia1000/anti_adblock,dwfreed/mitmproxy,mhils/mitmproxy,byt3bl33d3r/mitmproxy,guiquanz/mitmproxy,mosajjal/mitmproxy,claimsmall/mitmproxy,sethp-jive/mitmproxy,laurmurclar/mitmproxy,mhils/mitmproxy,cortesi/mitmproxy,meizhoubao/mitmproxy,scriptmediala/mitmproxy,sethp-jive/mitmproxy,bltb/mitmproxy,zlorb/mitmproxy,byt3bl33d3r/mitmproxy,xbzbing/mitmproxy,bazzinotti/mitmproxy,mhils/mitmproxy,ujjwal96/mitmproxy,jpic/mitmproxy,byt3bl33d3r/mitmproxy,vhaupert/mitmproxy,liorvh/mitmproxy,StevenVanAcker/mitmproxy,elitest/mitmproxy,jvillacorta/mitmproxy,zbuc/mitmproxy,tdickers/mitmproxy,claimsmall/mitmproxy,fimad/mitmproxy,devasia1000/mitmproxy,ikoz/mitmproxy,Endika/mitmproxy,ADemonisis/mitmproxy,mitmproxy/mitmproxy,meizhoubao/mitmproxy,laurmurclar/mitmproxy,devasia1000/anti_adblock,ParthGanatra/mitmproxy,meizhoubao/mitmproxy,onlywade/mitmproxy,noikiy/mitmproxy,Endika/mitmproxy,0xwindows/InfoLeak,zbuc/mitmproxy,jpic/mitmproxy,ccccccccccc/mitmproxy,ryoqun/mitmproxy,noikiy/mitmproxy,bltb/mitmproxy,owers19856/mitmproxy,ikoz/mitmproxy,ParthGanatra/mitmproxy,rauburtin/mitmproxy,pombredanne/mitmproxy,xtso520ok/mitmproxy,laurmurclar/mitmproxy,claimsmall/mitmproxy,gzzhanghao/mitmproxy,tdickers/mitmproxy,bazzinotti/mitmproxy,jpic/mitmproxy,claimsmall/mitmproxy,scriptmediala/mitmproxy,ddworken/mitmproxy,ddworken/mitmproxy,azureplus/mitmproxy,mitmproxy/mitmproxy,xaxa89/mitmproxy,guiquanz/mitmproxy,gzzhanghao/mitmproxy,dxq-git/mitmproxy,legendtang/mitmproxy,ryoqun/mitmproxy,mhils/mitmproxy,inscriptionweb/mitmproxy,ccccccccccc/mitmproxy
|
Add an example script that turns all PNGs upside down.
|
import Image, cStringIO
def response(context, flow):
if flow.response.headers["content-type"] == ["image/png"]:
s = cStringIO.StringIO(flow.response.content)
img = Image.open(s)
img = img.rotate(180)
s2 = cStringIO.StringIO()
img.save(s2, "png")
flow.response.content = s2.getvalue()
|
<commit_before><commit_msg>Add an example script that turns all PNGs upside down.<commit_after>
|
import Image, cStringIO
def response(context, flow):
if flow.response.headers["content-type"] == ["image/png"]:
s = cStringIO.StringIO(flow.response.content)
img = Image.open(s)
img = img.rotate(180)
s2 = cStringIO.StringIO()
img.save(s2, "png")
flow.response.content = s2.getvalue()
|
Add an example script that turns all PNGs upside down.import Image, cStringIO
def response(context, flow):
if flow.response.headers["content-type"] == ["image/png"]:
s = cStringIO.StringIO(flow.response.content)
img = Image.open(s)
img = img.rotate(180)
s2 = cStringIO.StringIO()
img.save(s2, "png")
flow.response.content = s2.getvalue()
|
<commit_before><commit_msg>Add an example script that turns all PNGs upside down.<commit_after>import Image, cStringIO
def response(context, flow):
if flow.response.headers["content-type"] == ["image/png"]:
s = cStringIO.StringIO(flow.response.content)
img = Image.open(s)
img = img.rotate(180)
s2 = cStringIO.StringIO()
img.save(s2, "png")
flow.response.content = s2.getvalue()
|
|
c145b2cc08b3bbf0d2506afb58116e1a0c2dc4fc
|
tests/core_tests.py
|
tests/core_tests.py
|
from graffiti import core
from graffiti import util
def test_schema():
assert "fn" in core.schema(1)
fn = lambda x: 1
assert core.schema(fn) == util.fninfo(fn)
def t():
return 1
t._schema = { "schema": 1 }
assert core.schema(t) == { "schema": 1 }
def test_dependencies():
g = {
"a": util.fninfo(lambda x: 1),
"b": util.fninfo(lambda y, z: 2),
"c": util.fninfo(lambda: 3),
"d": util.fninfo(lambda o=1: o)
}
assert core.dependencies(g) == {
"a": {"x"},
"b": {"y", "z"},
"c": set(),
"d": set()
}
def test_transitive():
g = {
"a": {"b"},
"b": {"c"},
"c": {"d"}
}
assert core.transitive(g) == {
"a": {"b", "c", "d"},
"b": {"c", "d"},
"c": {"d"}
}
def test_topological():
g = {
"a": {"b", "c", "d"},
"b": {"c", "d"},
"c": {"d"},
"d": {}
}
res = core.topological(g)
assert res.index("d") > res.index("c")
assert res.index("c") > res.index("b")
assert res.index("b") > res.index("a")
|
Add tests for core graph functions
|
Add tests for core graph functions
|
Python
|
mit
|
SegFaultAX/graffiti
|
Add tests for core graph functions
|
from graffiti import core
from graffiti import util
def test_schema():
assert "fn" in core.schema(1)
fn = lambda x: 1
assert core.schema(fn) == util.fninfo(fn)
def t():
return 1
t._schema = { "schema": 1 }
assert core.schema(t) == { "schema": 1 }
def test_dependencies():
g = {
"a": util.fninfo(lambda x: 1),
"b": util.fninfo(lambda y, z: 2),
"c": util.fninfo(lambda: 3),
"d": util.fninfo(lambda o=1: o)
}
assert core.dependencies(g) == {
"a": {"x"},
"b": {"y", "z"},
"c": set(),
"d": set()
}
def test_transitive():
g = {
"a": {"b"},
"b": {"c"},
"c": {"d"}
}
assert core.transitive(g) == {
"a": {"b", "c", "d"},
"b": {"c", "d"},
"c": {"d"}
}
def test_topological():
g = {
"a": {"b", "c", "d"},
"b": {"c", "d"},
"c": {"d"},
"d": {}
}
res = core.topological(g)
assert res.index("d") > res.index("c")
assert res.index("c") > res.index("b")
assert res.index("b") > res.index("a")
|
<commit_before><commit_msg>Add tests for core graph functions<commit_after>
|
from graffiti import core
from graffiti import util
def test_schema():
assert "fn" in core.schema(1)
fn = lambda x: 1
assert core.schema(fn) == util.fninfo(fn)
def t():
return 1
t._schema = { "schema": 1 }
assert core.schema(t) == { "schema": 1 }
def test_dependencies():
g = {
"a": util.fninfo(lambda x: 1),
"b": util.fninfo(lambda y, z: 2),
"c": util.fninfo(lambda: 3),
"d": util.fninfo(lambda o=1: o)
}
assert core.dependencies(g) == {
"a": {"x"},
"b": {"y", "z"},
"c": set(),
"d": set()
}
def test_transitive():
g = {
"a": {"b"},
"b": {"c"},
"c": {"d"}
}
assert core.transitive(g) == {
"a": {"b", "c", "d"},
"b": {"c", "d"},
"c": {"d"}
}
def test_topological():
g = {
"a": {"b", "c", "d"},
"b": {"c", "d"},
"c": {"d"},
"d": {}
}
res = core.topological(g)
assert res.index("d") > res.index("c")
assert res.index("c") > res.index("b")
assert res.index("b") > res.index("a")
|
Add tests for core graph functionsfrom graffiti import core
from graffiti import util
def test_schema():
assert "fn" in core.schema(1)
fn = lambda x: 1
assert core.schema(fn) == util.fninfo(fn)
def t():
return 1
t._schema = { "schema": 1 }
assert core.schema(t) == { "schema": 1 }
def test_dependencies():
g = {
"a": util.fninfo(lambda x: 1),
"b": util.fninfo(lambda y, z: 2),
"c": util.fninfo(lambda: 3),
"d": util.fninfo(lambda o=1: o)
}
assert core.dependencies(g) == {
"a": {"x"},
"b": {"y", "z"},
"c": set(),
"d": set()
}
def test_transitive():
g = {
"a": {"b"},
"b": {"c"},
"c": {"d"}
}
assert core.transitive(g) == {
"a": {"b", "c", "d"},
"b": {"c", "d"},
"c": {"d"}
}
def test_topological():
g = {
"a": {"b", "c", "d"},
"b": {"c", "d"},
"c": {"d"},
"d": {}
}
res = core.topological(g)
assert res.index("d") > res.index("c")
assert res.index("c") > res.index("b")
assert res.index("b") > res.index("a")
|
<commit_before><commit_msg>Add tests for core graph functions<commit_after>from graffiti import core
from graffiti import util
def test_schema():
assert "fn" in core.schema(1)
fn = lambda x: 1
assert core.schema(fn) == util.fninfo(fn)
def t():
return 1
t._schema = { "schema": 1 }
assert core.schema(t) == { "schema": 1 }
def test_dependencies():
g = {
"a": util.fninfo(lambda x: 1),
"b": util.fninfo(lambda y, z: 2),
"c": util.fninfo(lambda: 3),
"d": util.fninfo(lambda o=1: o)
}
assert core.dependencies(g) == {
"a": {"x"},
"b": {"y", "z"},
"c": set(),
"d": set()
}
def test_transitive():
g = {
"a": {"b"},
"b": {"c"},
"c": {"d"}
}
assert core.transitive(g) == {
"a": {"b", "c", "d"},
"b": {"c", "d"},
"c": {"d"}
}
def test_topological():
g = {
"a": {"b", "c", "d"},
"b": {"c", "d"},
"c": {"d"},
"d": {}
}
res = core.topological(g)
assert res.index("d") > res.index("c")
assert res.index("c") > res.index("b")
assert res.index("b") > res.index("a")
|
|
41f6c1c27fb8d3c63d8bb51471a24dcf9d59c1fb
|
tests/test_api.py
|
tests/test_api.py
|
import unittest
from flask import current_app, request, abort, jsonify, g, url_for
from api.api import *
from api.models import User
class TestApi(unittest.TestCase):
def setUp(self):
pass
@unittest.skip("")
def test_login(self):
pass
@unittest.skip("")
def test_register(self):
pass
@unittest.skip("")
def test_add_bucket_list(self):
pass
@unittest.skip("")
def test_get_bucket_lists(self):
pass
@unittest.skip("")
def test_get_bucket_list(self):
pass
@unittest.skip("")
def test_put_bucket_list(self):
pass
@unittest.skip("")
def test_delete_bucket_list(self):
pass
@unittest.skip("")
def test_create_item_in_bucket_list(self):
pass
@unittest.skip("")
def test_get_items_in_bucket_list(self):
pass
@unittest.skip("")
def test_update_bucket_list_item(self):
pass
@unittest.skip("")
def test_delete_bucket_list_item(self):
pass
|
Add test methods for api end point methods
|
Add test methods for api end point methods
|
Python
|
mit
|
EdwinKato/bucket-list,EdwinKato/bucket-list,EdwinKato/bucket-list,EdwinKato/bucket-list,EdwinKato/bucket-list
|
Add test methods for api end point methods
|
import unittest
from flask import current_app, request, abort, jsonify, g, url_for
from api.api import *
from api.models import User
class TestApi(unittest.TestCase):
def setUp(self):
pass
@unittest.skip("")
def test_login(self):
pass
@unittest.skip("")
def test_register(self):
pass
@unittest.skip("")
def test_add_bucket_list(self):
pass
@unittest.skip("")
def test_get_bucket_lists(self):
pass
@unittest.skip("")
def test_get_bucket_list(self):
pass
@unittest.skip("")
def test_put_bucket_list(self):
pass
@unittest.skip("")
def test_delete_bucket_list(self):
pass
@unittest.skip("")
def test_create_item_in_bucket_list(self):
pass
@unittest.skip("")
def test_get_items_in_bucket_list(self):
pass
@unittest.skip("")
def test_update_bucket_list_item(self):
pass
@unittest.skip("")
def test_delete_bucket_list_item(self):
pass
|
<commit_before><commit_msg>Add test methods for api end point methods<commit_after>
|
import unittest
from flask import current_app, request, abort, jsonify, g, url_for
from api.api import *
from api.models import User
class TestApi(unittest.TestCase):
def setUp(self):
pass
@unittest.skip("")
def test_login(self):
pass
@unittest.skip("")
def test_register(self):
pass
@unittest.skip("")
def test_add_bucket_list(self):
pass
@unittest.skip("")
def test_get_bucket_lists(self):
pass
@unittest.skip("")
def test_get_bucket_list(self):
pass
@unittest.skip("")
def test_put_bucket_list(self):
pass
@unittest.skip("")
def test_delete_bucket_list(self):
pass
@unittest.skip("")
def test_create_item_in_bucket_list(self):
pass
@unittest.skip("")
def test_get_items_in_bucket_list(self):
pass
@unittest.skip("")
def test_update_bucket_list_item(self):
pass
@unittest.skip("")
def test_delete_bucket_list_item(self):
pass
|
Add test methods for api end point methodsimport unittest
from flask import current_app, request, abort, jsonify, g, url_for
from api.api import *
from api.models import User
class TestApi(unittest.TestCase):
def setUp(self):
pass
@unittest.skip("")
def test_login(self):
pass
@unittest.skip("")
def test_register(self):
pass
@unittest.skip("")
def test_add_bucket_list(self):
pass
@unittest.skip("")
def test_get_bucket_lists(self):
pass
@unittest.skip("")
def test_get_bucket_list(self):
pass
@unittest.skip("")
def test_put_bucket_list(self):
pass
@unittest.skip("")
def test_delete_bucket_list(self):
pass
@unittest.skip("")
def test_create_item_in_bucket_list(self):
pass
@unittest.skip("")
def test_get_items_in_bucket_list(self):
pass
@unittest.skip("")
def test_update_bucket_list_item(self):
pass
@unittest.skip("")
def test_delete_bucket_list_item(self):
pass
|
<commit_before><commit_msg>Add test methods for api end point methods<commit_after>import unittest
from flask import current_app, request, abort, jsonify, g, url_for
from api.api import *
from api.models import User
class TestApi(unittest.TestCase):
def setUp(self):
pass
@unittest.skip("")
def test_login(self):
pass
@unittest.skip("")
def test_register(self):
pass
@unittest.skip("")
def test_add_bucket_list(self):
pass
@unittest.skip("")
def test_get_bucket_lists(self):
pass
@unittest.skip("")
def test_get_bucket_list(self):
pass
@unittest.skip("")
def test_put_bucket_list(self):
pass
@unittest.skip("")
def test_delete_bucket_list(self):
pass
@unittest.skip("")
def test_create_item_in_bucket_list(self):
pass
@unittest.skip("")
def test_get_items_in_bucket_list(self):
pass
@unittest.skip("")
def test_update_bucket_list_item(self):
pass
@unittest.skip("")
def test_delete_bucket_list_item(self):
pass
|
|
954c92db789cf5bde4752c9b46b2c3a549820d75
|
tests/test_api.py
|
tests/test_api.py
|
import mock
import unittest
from testrail.api import API
from testrail.helper import TestRailError
import copy
import ast
class TestHTTPMethod(unittest.TestCase):
def setUp(self):
self.client = API()
@mock.patch('testrail.api.requests.get')
def test_get_ok(self, mock_get):
mock_response = mock.Mock()
return_value = {
"announcement": "..",
"completed_on": None,
"id": 1,
"is_completed": False,
"name": "Datahub",
"show_announcement": True,
"url": "http://<server>/index.php?/projects/overview/1"
}
expected_response = copy.deepcopy(return_value)
mock_response.json.return_value = return_value
mock_response.status_code = 200
mock_get.return_value = mock_response
url = 'https://<server>/index.php?/api/v2/get_project/1'
actual_response = self.client._get('get_project/1')
mock_get.assert_called_once_with(url, headers={'Content-Type': 'application/json'}, params=None, auth=('user@yourdomain.com', 'your_api_key'))
self.assertEqual(1, mock_response.json.call_count)
self.assertEqual(expected_response, actual_response)
@mock.patch('testrail.api.requests.get')
def test_get_bad_no_params(self, mock_get):
mock_response = mock.Mock()
expected_response = {
'url': 'https://<server>/index.php?/api/v2/get_plan/200',
'status_code': 400,
'payload': None,
'error': 'Invalid or unknown test plan'
}
url = 'https://<server>/index.php?/api/v2/get_plan/200'
mock_response.json.return_value = {'error': 'Invalid or unknown test plan'}
mock_response.status_code = 400
mock_response.url = url
mock_get.return_value = mock_response
with self.assertRaises(TestRailError) as e:
self.client._get('get_plan/200')
mock_get.assert_called_once_with(url, headers={'Content-Type': 'application/json'}, params=None, auth=('user@yourdomain.com', 'your_api_key'))
self.assertEqual(1, mock_response.json.call_count)
self.assertEqual(expected_response, ast.literal_eval(str(e.exception)))
if __name__ == "__main__":
unittest.main()
|
Add basic low level HTTP get tests.
|
Add basic low level HTTP get tests.
|
Python
|
mit
|
travispavek/testrail-python,travispavek/testrail
|
Add basic low level HTTP get tests.
|
import mock
import unittest
from testrail.api import API
from testrail.helper import TestRailError
import copy
import ast
class TestHTTPMethod(unittest.TestCase):
def setUp(self):
self.client = API()
@mock.patch('testrail.api.requests.get')
def test_get_ok(self, mock_get):
mock_response = mock.Mock()
return_value = {
"announcement": "..",
"completed_on": None,
"id": 1,
"is_completed": False,
"name": "Datahub",
"show_announcement": True,
"url": "http://<server>/index.php?/projects/overview/1"
}
expected_response = copy.deepcopy(return_value)
mock_response.json.return_value = return_value
mock_response.status_code = 200
mock_get.return_value = mock_response
url = 'https://<server>/index.php?/api/v2/get_project/1'
actual_response = self.client._get('get_project/1')
mock_get.assert_called_once_with(url, headers={'Content-Type': 'application/json'}, params=None, auth=('user@yourdomain.com', 'your_api_key'))
self.assertEqual(1, mock_response.json.call_count)
self.assertEqual(expected_response, actual_response)
@mock.patch('testrail.api.requests.get')
def test_get_bad_no_params(self, mock_get):
mock_response = mock.Mock()
expected_response = {
'url': 'https://<server>/index.php?/api/v2/get_plan/200',
'status_code': 400,
'payload': None,
'error': 'Invalid or unknown test plan'
}
url = 'https://<server>/index.php?/api/v2/get_plan/200'
mock_response.json.return_value = {'error': 'Invalid or unknown test plan'}
mock_response.status_code = 400
mock_response.url = url
mock_get.return_value = mock_response
with self.assertRaises(TestRailError) as e:
self.client._get('get_plan/200')
mock_get.assert_called_once_with(url, headers={'Content-Type': 'application/json'}, params=None, auth=('user@yourdomain.com', 'your_api_key'))
self.assertEqual(1, mock_response.json.call_count)
self.assertEqual(expected_response, ast.literal_eval(str(e.exception)))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add basic low level HTTP get tests.<commit_after>
|
import mock
import unittest
from testrail.api import API
from testrail.helper import TestRailError
import copy
import ast
class TestHTTPMethod(unittest.TestCase):
def setUp(self):
self.client = API()
@mock.patch('testrail.api.requests.get')
def test_get_ok(self, mock_get):
mock_response = mock.Mock()
return_value = {
"announcement": "..",
"completed_on": None,
"id": 1,
"is_completed": False,
"name": "Datahub",
"show_announcement": True,
"url": "http://<server>/index.php?/projects/overview/1"
}
expected_response = copy.deepcopy(return_value)
mock_response.json.return_value = return_value
mock_response.status_code = 200
mock_get.return_value = mock_response
url = 'https://<server>/index.php?/api/v2/get_project/1'
actual_response = self.client._get('get_project/1')
mock_get.assert_called_once_with(url, headers={'Content-Type': 'application/json'}, params=None, auth=('user@yourdomain.com', 'your_api_key'))
self.assertEqual(1, mock_response.json.call_count)
self.assertEqual(expected_response, actual_response)
@mock.patch('testrail.api.requests.get')
def test_get_bad_no_params(self, mock_get):
mock_response = mock.Mock()
expected_response = {
'url': 'https://<server>/index.php?/api/v2/get_plan/200',
'status_code': 400,
'payload': None,
'error': 'Invalid or unknown test plan'
}
url = 'https://<server>/index.php?/api/v2/get_plan/200'
mock_response.json.return_value = {'error': 'Invalid or unknown test plan'}
mock_response.status_code = 400
mock_response.url = url
mock_get.return_value = mock_response
with self.assertRaises(TestRailError) as e:
self.client._get('get_plan/200')
mock_get.assert_called_once_with(url, headers={'Content-Type': 'application/json'}, params=None, auth=('user@yourdomain.com', 'your_api_key'))
self.assertEqual(1, mock_response.json.call_count)
self.assertEqual(expected_response, ast.literal_eval(str(e.exception)))
if __name__ == "__main__":
unittest.main()
|
Add basic low level HTTP get tests.import mock
import unittest
from testrail.api import API
from testrail.helper import TestRailError
import copy
import ast
class TestHTTPMethod(unittest.TestCase):
def setUp(self):
self.client = API()
@mock.patch('testrail.api.requests.get')
def test_get_ok(self, mock_get):
mock_response = mock.Mock()
return_value = {
"announcement": "..",
"completed_on": None,
"id": 1,
"is_completed": False,
"name": "Datahub",
"show_announcement": True,
"url": "http://<server>/index.php?/projects/overview/1"
}
expected_response = copy.deepcopy(return_value)
mock_response.json.return_value = return_value
mock_response.status_code = 200
mock_get.return_value = mock_response
url = 'https://<server>/index.php?/api/v2/get_project/1'
actual_response = self.client._get('get_project/1')
mock_get.assert_called_once_with(url, headers={'Content-Type': 'application/json'}, params=None, auth=('user@yourdomain.com', 'your_api_key'))
self.assertEqual(1, mock_response.json.call_count)
self.assertEqual(expected_response, actual_response)
@mock.patch('testrail.api.requests.get')
def test_get_bad_no_params(self, mock_get):
mock_response = mock.Mock()
expected_response = {
'url': 'https://<server>/index.php?/api/v2/get_plan/200',
'status_code': 400,
'payload': None,
'error': 'Invalid or unknown test plan'
}
url = 'https://<server>/index.php?/api/v2/get_plan/200'
mock_response.json.return_value = {'error': 'Invalid or unknown test plan'}
mock_response.status_code = 400
mock_response.url = url
mock_get.return_value = mock_response
with self.assertRaises(TestRailError) as e:
self.client._get('get_plan/200')
mock_get.assert_called_once_with(url, headers={'Content-Type': 'application/json'}, params=None, auth=('user@yourdomain.com', 'your_api_key'))
self.assertEqual(1, mock_response.json.call_count)
self.assertEqual(expected_response, ast.literal_eval(str(e.exception)))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add basic low level HTTP get tests.<commit_after>import mock
import unittest
from testrail.api import API
from testrail.helper import TestRailError
import copy
import ast
class TestHTTPMethod(unittest.TestCase):
def setUp(self):
self.client = API()
@mock.patch('testrail.api.requests.get')
def test_get_ok(self, mock_get):
mock_response = mock.Mock()
return_value = {
"announcement": "..",
"completed_on": None,
"id": 1,
"is_completed": False,
"name": "Datahub",
"show_announcement": True,
"url": "http://<server>/index.php?/projects/overview/1"
}
expected_response = copy.deepcopy(return_value)
mock_response.json.return_value = return_value
mock_response.status_code = 200
mock_get.return_value = mock_response
url = 'https://<server>/index.php?/api/v2/get_project/1'
actual_response = self.client._get('get_project/1')
mock_get.assert_called_once_with(url, headers={'Content-Type': 'application/json'}, params=None, auth=('user@yourdomain.com', 'your_api_key'))
self.assertEqual(1, mock_response.json.call_count)
self.assertEqual(expected_response, actual_response)
@mock.patch('testrail.api.requests.get')
def test_get_bad_no_params(self, mock_get):
mock_response = mock.Mock()
expected_response = {
'url': 'https://<server>/index.php?/api/v2/get_plan/200',
'status_code': 400,
'payload': None,
'error': 'Invalid or unknown test plan'
}
url = 'https://<server>/index.php?/api/v2/get_plan/200'
mock_response.json.return_value = {'error': 'Invalid or unknown test plan'}
mock_response.status_code = 400
mock_response.url = url
mock_get.return_value = mock_response
with self.assertRaises(TestRailError) as e:
self.client._get('get_plan/200')
mock_get.assert_called_once_with(url, headers={'Content-Type': 'application/json'}, params=None, auth=('user@yourdomain.com', 'your_api_key'))
self.assertEqual(1, mock_response.json.call_count)
self.assertEqual(expected_response, ast.literal_eval(str(e.exception)))
if __name__ == "__main__":
unittest.main()
|
|
503f651e8d0e6aa8ffeabfa0108fe21b4fa73787
|
udpPinger.py
|
udpPinger.py
|
#!/usr/bin/env python
import sys, socket, time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if len(sys.argv) > 1:
interval = float(eval(sys.argv[1]))
else:
interval = 1.0
if len(sys.argv) > 2:
size = int(eval(sys.argv[2]))
else:
size = 1420
s.settimeout(interval)
try:
while True:
s.sendto(b"\xa5"*size, ("172.31.1.1", 5551))
tick = time.time()
try:
r, a = s.recvfrom(1500)
except socket.timeout:
sys.stdout.write("Timeout\r\n")
else:
delta = time.time()-tick
sys.stdout.write("{:f}ms\r\n".format(delta*1000))
except KeyboardInterrupt:
sys.exit(0)
|
Add python script for testing with.
|
Add python script for testing with.
|
Python
|
bsd-3-clause
|
DanielCasner/esp8266-udp-throughput-test,DanielCasner/esp8266-udp-throughput-test,DanielCasner/esp8266-udp-throughput-test
|
Add python script for testing with.
|
#!/usr/bin/env python
import sys, socket, time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if len(sys.argv) > 1:
interval = float(eval(sys.argv[1]))
else:
interval = 1.0
if len(sys.argv) > 2:
size = int(eval(sys.argv[2]))
else:
size = 1420
s.settimeout(interval)
try:
while True:
s.sendto(b"\xa5"*size, ("172.31.1.1", 5551))
tick = time.time()
try:
r, a = s.recvfrom(1500)
except socket.timeout:
sys.stdout.write("Timeout\r\n")
else:
delta = time.time()-tick
sys.stdout.write("{:f}ms\r\n".format(delta*1000))
except KeyboardInterrupt:
sys.exit(0)
|
<commit_before><commit_msg>Add python script for testing with.<commit_after>
|
#!/usr/bin/env python
import sys, socket, time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if len(sys.argv) > 1:
interval = float(eval(sys.argv[1]))
else:
interval = 1.0
if len(sys.argv) > 2:
size = int(eval(sys.argv[2]))
else:
size = 1420
s.settimeout(interval)
try:
while True:
s.sendto(b"\xa5"*size, ("172.31.1.1", 5551))
tick = time.time()
try:
r, a = s.recvfrom(1500)
except socket.timeout:
sys.stdout.write("Timeout\r\n")
else:
delta = time.time()-tick
sys.stdout.write("{:f}ms\r\n".format(delta*1000))
except KeyboardInterrupt:
sys.exit(0)
|
Add python script for testing with.#!/usr/bin/env python
import sys, socket, time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if len(sys.argv) > 1:
interval = float(eval(sys.argv[1]))
else:
interval = 1.0
if len(sys.argv) > 2:
size = int(eval(sys.argv[2]))
else:
size = 1420
s.settimeout(interval)
try:
while True:
s.sendto(b"\xa5"*size, ("172.31.1.1", 5551))
tick = time.time()
try:
r, a = s.recvfrom(1500)
except socket.timeout:
sys.stdout.write("Timeout\r\n")
else:
delta = time.time()-tick
sys.stdout.write("{:f}ms\r\n".format(delta*1000))
except KeyboardInterrupt:
sys.exit(0)
|
<commit_before><commit_msg>Add python script for testing with.<commit_after>#!/usr/bin/env python
import sys, socket, time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if len(sys.argv) > 1:
interval = float(eval(sys.argv[1]))
else:
interval = 1.0
if len(sys.argv) > 2:
size = int(eval(sys.argv[2]))
else:
size = 1420
s.settimeout(interval)
try:
while True:
s.sendto(b"\xa5"*size, ("172.31.1.1", 5551))
tick = time.time()
try:
r, a = s.recvfrom(1500)
except socket.timeout:
sys.stdout.write("Timeout\r\n")
else:
delta = time.time()-tick
sys.stdout.write("{:f}ms\r\n".format(delta*1000))
except KeyboardInterrupt:
sys.exit(0)
|
|
4e6773000326076f13f5d9eaa0c95103fe8511e4
|
scripts/process_logs.py
|
scripts/process_logs.py
|
#!/usr/bin/python3
import sys
import collections
sums = collections.defaultdict(lambda: 0)
lens = collections.defaultdict(lambda: 0)
def mva(n_a, s_lb, s_a, avg_len, clients, z):
n_centers = n_a + 2
V = [2] + [1 / n_a] * n_a + [1]
S = [s_lb] + [s_a] * n_a + [z]
is_delay = [False] * (n_centers - 1) + [True]
ns = [0] * n_centers
for i in range(1, clients + 1):
r = []
for j in range(n_centers):
if is_delay[j]:
r.append(S[j])
else:
r.append(V[j] * S[j] * (1 + ns[j]))
total_r = sum(r)
throughput = i / total_r
throughputs = []
utilizations = []
for j in range(n_centers):
throughputs.append(V[j] * throughput)
utilizations.append(V[j] * throughput * S[j])
ns[j] = throughput * r[j]
print("LB Utilization:", utilizations[0])
print("AC Utilization:", utilizations[1])
print("R:", total_r - z)
for f in sys.argv[1:]:
for l in open(f):
try:
a, b = l.split()
b = int(b)
sums[a] += b
lens[a] += 1
except:
print("Error:", l)
for k, v in sums.items():
print("{} {}".format(k, v / lens[k]))
|
Add script to process log outputs and do Mean Value Analysis (MVA).
|
Add script to process log outputs and do Mean Value Analysis (MVA).
|
Python
|
mit
|
gpoesia/autocomplete,gpoesia/autocomplete,gpoesia/autocomplete,gpoesia/autocomplete
|
Add script to process log outputs and do Mean Value Analysis (MVA).
|
#!/usr/bin/python3
import sys
import collections
sums = collections.defaultdict(lambda: 0)
lens = collections.defaultdict(lambda: 0)
def mva(n_a, s_lb, s_a, avg_len, clients, z):
n_centers = n_a + 2
V = [2] + [1 / n_a] * n_a + [1]
S = [s_lb] + [s_a] * n_a + [z]
is_delay = [False] * (n_centers - 1) + [True]
ns = [0] * n_centers
for i in range(1, clients + 1):
r = []
for j in range(n_centers):
if is_delay[j]:
r.append(S[j])
else:
r.append(V[j] * S[j] * (1 + ns[j]))
total_r = sum(r)
throughput = i / total_r
throughputs = []
utilizations = []
for j in range(n_centers):
throughputs.append(V[j] * throughput)
utilizations.append(V[j] * throughput * S[j])
ns[j] = throughput * r[j]
print("LB Utilization:", utilizations[0])
print("AC Utilization:", utilizations[1])
print("R:", total_r - z)
for f in sys.argv[1:]:
for l in open(f):
try:
a, b = l.split()
b = int(b)
sums[a] += b
lens[a] += 1
except:
print("Error:", l)
for k, v in sums.items():
print("{} {}".format(k, v / lens[k]))
|
<commit_before><commit_msg>Add script to process log outputs and do Mean Value Analysis (MVA).<commit_after>
|
#!/usr/bin/python3
import sys
import collections
sums = collections.defaultdict(lambda: 0)
lens = collections.defaultdict(lambda: 0)
def mva(n_a, s_lb, s_a, avg_len, clients, z):
n_centers = n_a + 2
V = [2] + [1 / n_a] * n_a + [1]
S = [s_lb] + [s_a] * n_a + [z]
is_delay = [False] * (n_centers - 1) + [True]
ns = [0] * n_centers
for i in range(1, clients + 1):
r = []
for j in range(n_centers):
if is_delay[j]:
r.append(S[j])
else:
r.append(V[j] * S[j] * (1 + ns[j]))
total_r = sum(r)
throughput = i / total_r
throughputs = []
utilizations = []
for j in range(n_centers):
throughputs.append(V[j] * throughput)
utilizations.append(V[j] * throughput * S[j])
ns[j] = throughput * r[j]
print("LB Utilization:", utilizations[0])
print("AC Utilization:", utilizations[1])
print("R:", total_r - z)
for f in sys.argv[1:]:
for l in open(f):
try:
a, b = l.split()
b = int(b)
sums[a] += b
lens[a] += 1
except:
print("Error:", l)
for k, v in sums.items():
print("{} {}".format(k, v / lens[k]))
|
Add script to process log outputs and do Mean Value Analysis (MVA).#!/usr/bin/python3
import sys
import collections
sums = collections.defaultdict(lambda: 0)
lens = collections.defaultdict(lambda: 0)
def mva(n_a, s_lb, s_a, avg_len, clients, z):
n_centers = n_a + 2
V = [2] + [1 / n_a] * n_a + [1]
S = [s_lb] + [s_a] * n_a + [z]
is_delay = [False] * (n_centers - 1) + [True]
ns = [0] * n_centers
for i in range(1, clients + 1):
r = []
for j in range(n_centers):
if is_delay[j]:
r.append(S[j])
else:
r.append(V[j] * S[j] * (1 + ns[j]))
total_r = sum(r)
throughput = i / total_r
throughputs = []
utilizations = []
for j in range(n_centers):
throughputs.append(V[j] * throughput)
utilizations.append(V[j] * throughput * S[j])
ns[j] = throughput * r[j]
print("LB Utilization:", utilizations[0])
print("AC Utilization:", utilizations[1])
print("R:", total_r - z)
for f in sys.argv[1:]:
for l in open(f):
try:
a, b = l.split()
b = int(b)
sums[a] += b
lens[a] += 1
except:
print("Error:", l)
for k, v in sums.items():
print("{} {}".format(k, v / lens[k]))
|
<commit_before><commit_msg>Add script to process log outputs and do Mean Value Analysis (MVA).<commit_after>#!/usr/bin/python3
import sys
import collections
sums = collections.defaultdict(lambda: 0)
lens = collections.defaultdict(lambda: 0)
def mva(n_a, s_lb, s_a, avg_len, clients, z):
n_centers = n_a + 2
V = [2] + [1 / n_a] * n_a + [1]
S = [s_lb] + [s_a] * n_a + [z]
is_delay = [False] * (n_centers - 1) + [True]
ns = [0] * n_centers
for i in range(1, clients + 1):
r = []
for j in range(n_centers):
if is_delay[j]:
r.append(S[j])
else:
r.append(V[j] * S[j] * (1 + ns[j]))
total_r = sum(r)
throughput = i / total_r
throughputs = []
utilizations = []
for j in range(n_centers):
throughputs.append(V[j] * throughput)
utilizations.append(V[j] * throughput * S[j])
ns[j] = throughput * r[j]
print("LB Utilization:", utilizations[0])
print("AC Utilization:", utilizations[1])
print("R:", total_r - z)
for f in sys.argv[1:]:
for l in open(f):
try:
a, b = l.split()
b = int(b)
sums[a] += b
lens[a] += 1
except:
print("Error:", l)
for k, v in sums.items():
print("{} {}".format(k, v / lens[k]))
|
|
f6303b46ee4b7a648bef01f8c6a171c4e1573cee
|
Scripts/process_files.py
|
Scripts/process_files.py
|
import os
from subprocess import call
inputpath = 'originals'
outputpath = 'segmentations'
for filename in os.listdir(inputpath):
current = os.path.join(inputpath, filename)
if os.path.isfile(current):
call([segment_exe, current, result])
|
Add skeleton of python script to process multiple images.
|
Add skeleton of python script to process multiple images.
|
Python
|
apache-2.0
|
HackTheStacks/darwin-notes-image-processing,HackTheStacks/darwin-notes-image-processing
|
Add skeleton of python script to process multiple images.
|
import os
from subprocess import call
inputpath = 'originals'
outputpath = 'segmentations'
for filename in os.listdir(inputpath):
current = os.path.join(inputpath, filename)
if os.path.isfile(current):
call([segment_exe, current, result])
|
<commit_before><commit_msg>Add skeleton of python script to process multiple images.<commit_after>
|
import os
from subprocess import call
inputpath = 'originals'
outputpath = 'segmentations'
for filename in os.listdir(inputpath):
current = os.path.join(inputpath, filename)
if os.path.isfile(current):
call([segment_exe, current, result])
|
Add skeleton of python script to process multiple images.import os
from subprocess import call
inputpath = 'originals'
outputpath = 'segmentations'
for filename in os.listdir(inputpath):
current = os.path.join(inputpath, filename)
if os.path.isfile(current):
call([segment_exe, current, result])
|
<commit_before><commit_msg>Add skeleton of python script to process multiple images.<commit_after>import os
from subprocess import call
inputpath = 'originals'
outputpath = 'segmentations'
for filename in os.listdir(inputpath):
current = os.path.join(inputpath, filename)
if os.path.isfile(current):
call([segment_exe, current, result])
|
|
a06c38b486264477e2dd741badd4a2936e80299f
|
tests/io/open_append.py
|
tests/io/open_append.py
|
import sys
try:
import _os as os
except ImportError:
import os
if not hasattr(os, "unlink"):
print("SKIP")
sys.exit()
try:
os.unlink("testfile")
except OSError:
pass
# Should create a file
f = open("testfile", "a")
f.write("foo")
f.close()
f = open("testfile")
print(f.read())
f.close()
f = open("testfile", "a")
f.write("bar")
f.close()
f = open("testfile")
print(f.read())
f.close()
|
Add testcase for open(..., "a").
|
tests: Add testcase for open(..., "a").
|
Python
|
mit
|
orionrobots/micropython,ChuckM/micropython,adafruit/circuitpython,martinribelotta/micropython,Peetz0r/micropython-esp32,mpalomer/micropython,Peetz0r/micropython-esp32,tuc-osg/micropython,SHA2017-badge/micropython-esp32,ceramos/micropython,deshipu/micropython,TDAbboud/micropython,puuu/micropython,hiway/micropython,firstval/micropython,dxxb/micropython,misterdanb/micropython,neilh10/micropython,EcmaXp/micropython,blazewicz/micropython,EcmaXp/micropython,orionrobots/micropython,EcmaXp/micropython,mianos/micropython,misterdanb/micropython,noahchense/micropython,ryannathans/micropython,mianos/micropython,PappaPeppar/micropython,turbinenreiter/micropython,pramasoul/micropython,supergis/micropython,adafruit/micropython,xhat/micropython,drrk/micropython,dhylands/micropython,tobbad/micropython,ChuckM/micropython,drrk/micropython,ernesto-g/micropython,kostyll/micropython,selste/micropython,Peetz0r/micropython-esp32,adafruit/circuitpython,ganshun666/micropython,MrSurly/micropython-esp32,ernesto-g/micropython,vriera/micropython,suda/micropython,supergis/micropython,infinnovation/micropython,cloudformdesign/micropython,firstval/micropython,adafruit/circuitpython,suda/micropython,jmarcelino/pycom-micropython,MrSurly/micropython-esp32,danicampora/micropython,stonegithubs/micropython,dxxb/micropython,heisewangluo/micropython,blmorris/micropython,ericsnowcurrently/micropython,vriera/micropython,redbear/micropython,HenrikSolver/micropython,trezor/micropython,henriknelson/micropython,puuu/micropython,MrSurly/micropython,firstval/micropython,ernesto-g/micropython,TDAbboud/micropython,mhoffma/micropython,SHA2017-badge/micropython-esp32,dmazzella/micropython,supergis/micropython,tralamazza/micropython,pozetroninc/micropython,blmorris/micropython,matthewelse/micropython,noahchense/micropython,utopiaprince/micropython,noahchense/micropython,EcmaXp/micropython,bvernoux/micropython,adamkh/micropython,galenhz/micropython,cloudformdesign/micropython,danicampora/micropython,selste/micropython,tuc-osg/micropython,jlillest/micropython,redbear/micropython,lbattraw/micropython,TDAbboud/micropython,pozetroninc/micropython,cloudformdesign/micropython,ruffy91/micropython,swegener/micropython,ahotam/micropython,galenhz/micropython,adafruit/circuitpython,MrSurly/micropython,tuc-osg/micropython,swegener/micropython,praemdonck/micropython,Peetz0r/micropython-esp32,matthewelse/micropython,AriZuu/micropython,tdautc19841202/micropython,ericsnowcurrently/micropython,pfalcon/micropython,cnoviello/micropython,matthewelse/micropython,hiway/micropython,chrisdearman/micropython,cwyark/micropython,chrisdearman/micropython,ruffy91/micropython,utopiaprince/micropython,mhoffma/micropython,EcmaXp/micropython,praemdonck/micropython,cwyark/micropython,vriera/micropython,lowRISC/micropython,noahwilliamsson/micropython,supergis/micropython,jmarcelino/pycom-micropython,micropython/micropython-esp32,henriknelson/micropython,AriZuu/micropython,ChuckM/micropython,HenrikSolver/micropython,omtinez/micropython,torwag/micropython,lowRISC/micropython,alex-march/micropython,ernesto-g/micropython,cwyark/micropython,vitiral/micropython,utopiaprince/micropython,misterdanb/micropython,misterdanb/micropython,suda/micropython,heisewangluo/micropython,vitiral/micropython,kerneltask/micropython,dinau/micropython,pfalcon/micropython,ceramos/micropython,oopy/micropython,adafruit/micropython,lowRISC/micropython,deshipu/micropython,skybird6672/micropython,matthewelse/micropython,feilongfl/micropython,HenrikSolver/micropython,pramasoul/micropython,xuxiaoxin/micropython,cnoviello/micropython,pozetroninc/micropython,lbattraw/micropython,tdautc19841202/micropython,tdautc19841202/micropython,AriZuu/micropython,xuxiaoxin/micropython,PappaPeppar/micropython,chrisdearman/micropython,tobbad/micropython,supergis/micropython,MrSurly/micropython,rubencabrera/micropython,orionrobots/micropython,ahotam/micropython,tralamazza/micropython,feilongfl/micropython,alex-robbins/micropython,ruffy91/micropython,micropython/micropython-esp32,AriZuu/micropython,adafruit/micropython,galenhz/micropython,omtinez/micropython,toolmacher/micropython,noahwilliamsson/micropython,danicampora/micropython,rubencabrera/micropython,vriera/micropython,blmorris/micropython,orionrobots/micropython,kostyll/micropython,stonegithubs/micropython,puuu/micropython,turbinenreiter/micropython,emfcamp/micropython,kerneltask/micropython,ganshun666/micropython,skybird6672/micropython,SHA2017-badge/micropython-esp32,hiway/micropython,cloudformdesign/micropython,tobbad/micropython,ceramos/micropython,vitiral/micropython,PappaPeppar/micropython,deshipu/micropython,kerneltask/micropython,tuc-osg/micropython,pozetroninc/micropython,suda/micropython,ganshun666/micropython,toolmacher/micropython,mianos/micropython,hiway/micropython,skybird6672/micropython,pramasoul/micropython,dxxb/micropython,Timmenem/micropython,galenhz/micropython,mpalomer/micropython,neilh10/micropython,henriknelson/micropython,MrSurly/micropython-esp32,tobbad/micropython,oopy/micropython,turbinenreiter/micropython,oopy/micropython,hosaka/micropython,Timmenem/micropython,selste/micropython,hosaka/micropython,swegener/micropython,Timmenem/micropython,lbattraw/micropython,neilh10/micropython,Timmenem/micropython,vitiral/micropython,matthewelse/micropython,xyb/micropython,emfcamp/micropython,redbear/micropython,bvernoux/micropython,ruffy91/micropython,PappaPeppar/micropython,blazewicz/micropython,mgyenik/micropython,pozetroninc/micropython,ericsnowcurrently/micropython,alex-march/micropython,tobbad/micropython,micropython/micropython-esp32,cwyark/micropython,noahchense/micropython,drrk/micropython,adamkh/micropython,omtinez/micropython,infinnovation/micropython,skybird6672/micropython,infinnovation/micropython,noahwilliamsson/micropython,ahotam/micropython,bvernoux/micropython,dxxb/micropython,lbattraw/micropython,ceramos/micropython,praemdonck/micropython,trezor/micropython,infinnovation/micropython,alex-robbins/micropython,pramasoul/micropython,vriera/micropython,blmorris/micropython,toolmacher/micropython,feilongfl/micropython,ryannathans/micropython,feilongfl/micropython,mpalomer/micropython,ahotam/micropython,ChuckM/micropython,tdautc19841202/micropython,dhylands/micropython,xuxiaoxin/micropython,mpalomer/micropython,henriknelson/micropython,adafruit/micropython,bvernoux/micropython,adafruit/circuitpython,utopiaprince/micropython,ganshun666/micropython,Peetz0r/micropython-esp32,drrk/micropython,torwag/micropython,xyb/micropython,torwag/micropython,micropython/micropython-esp32,xuxiaoxin/micropython,TDAbboud/micropython,xhat/micropython,noahchense/micropython,stonegithubs/micropython,adafruit/circuitpython,hosaka/micropython,ceramos/micropython,trezor/micropython,tdautc19841202/micropython,drrk/micropython,lbattraw/micropython,dmazzella/micropython,xyb/micropython,jlillest/micropython,ChuckM/micropython,rubencabrera/micropython,skybird6672/micropython,kerneltask/micropython,pfalcon/micropython,adamkh/micropython,tuc-osg/micropython,rubencabrera/micropython,infinnovation/micropython,ryannathans/micropython,micropython/micropython-esp32,mgyenik/micropython,puuu/micropython,ahotam/micropython,praemdonck/micropython,pramasoul/micropython,emfcamp/micropython,jlillest/micropython,alex-robbins/micropython,selste/micropython,martinribelotta/micropython,slzatz/micropython,deshipu/micropython,jmarcelino/pycom-micropython,SHA2017-badge/micropython-esp32,torwag/micropython,kostyll/micropython,emfcamp/micropython,chrisdearman/micropython,pfalcon/micropython,blmorris/micropython,martinribelotta/micropython,blazewicz/micropython,SHA2017-badge/micropython-esp32,xyb/micropython,toolmacher/micropython,emfcamp/micropython,xuxiaoxin/micropython,swegener/micropython,firstval/micropython,vitiral/micropython,swegener/micropython,AriZuu/micropython,stonegithubs/micropython,rubencabrera/micropython,lowRISC/micropython,praemdonck/micropython,slzatz/micropython,turbinenreiter/micropython,slzatz/micropython,trezor/micropython,ryannathans/micropython,danicampora/micropython,omtinez/micropython,lowRISC/micropython,MrSurly/micropython,MrSurly/micropython-esp32,henriknelson/micropython,dinau/micropython,HenrikSolver/micropython,dinau/micropython,hiway/micropython,matthewelse/micropython,suda/micropython,adamkh/micropython,dinau/micropython,xhat/micropython,dhylands/micropython,xhat/micropython,jlillest/micropython,stonegithubs/micropython,danicampora/micropython,tralamazza/micropython,kerneltask/micropython,xyb/micropython,utopiaprince/micropython,firstval/micropython,deshipu/micropython,mpalomer/micropython,blazewicz/micropython,mianos/micropython,toolmacher/micropython,mianos/micropython,ericsnowcurrently/micropython,neilh10/micropython,misterdanb/micropython,hosaka/micropython,jmarcelino/pycom-micropython,alex-march/micropython,pfalcon/micropython,MrSurly/micropython,selste/micropython,blazewicz/micropython,chrisdearman/micropython,trezor/micropython,ganshun666/micropython,jlillest/micropython,adamkh/micropython,ernesto-g/micropython,mgyenik/micropython,dmazzella/micropython,alex-march/micropython,xhat/micropython,dhylands/micropython,cloudformdesign/micropython,slzatz/micropython,alex-march/micropython,torwag/micropython,HenrikSolver/micropython,noahwilliamsson/micropython,tralamazza/micropython,oopy/micropython,martinribelotta/micropython,puuu/micropython,noahwilliamsson/micropython,Timmenem/micropython,mhoffma/micropython,ryannathans/micropython,dinau/micropython,ericsnowcurrently/micropython,martinribelotta/micropython,feilongfl/micropython,galenhz/micropython,redbear/micropython,jmarcelino/pycom-micropython,slzatz/micropython,redbear/micropython,MrSurly/micropython-esp32,mhoffma/micropython,bvernoux/micropython,alex-robbins/micropython,heisewangluo/micropython,neilh10/micropython,cnoviello/micropython,cnoviello/micropython,dhylands/micropython,heisewangluo/micropython,turbinenreiter/micropython,dxxb/micropython,omtinez/micropython,dmazzella/micropython,cwyark/micropython,mgyenik/micropython,mgyenik/micropython,PappaPeppar/micropython,kostyll/micropython,adafruit/micropython,kostyll/micropython,oopy/micropython,TDAbboud/micropython,cnoviello/micropython,heisewangluo/micropython,orionrobots/micropython,alex-robbins/micropython,ruffy91/micropython,hosaka/micropython,mhoffma/micropython
|
tests: Add testcase for open(..., "a").
|
import sys
try:
import _os as os
except ImportError:
import os
if not hasattr(os, "unlink"):
print("SKIP")
sys.exit()
try:
os.unlink("testfile")
except OSError:
pass
# Should create a file
f = open("testfile", "a")
f.write("foo")
f.close()
f = open("testfile")
print(f.read())
f.close()
f = open("testfile", "a")
f.write("bar")
f.close()
f = open("testfile")
print(f.read())
f.close()
|
<commit_before><commit_msg>tests: Add testcase for open(..., "a").<commit_after>
|
import sys
try:
import _os as os
except ImportError:
import os
if not hasattr(os, "unlink"):
print("SKIP")
sys.exit()
try:
os.unlink("testfile")
except OSError:
pass
# Should create a file
f = open("testfile", "a")
f.write("foo")
f.close()
f = open("testfile")
print(f.read())
f.close()
f = open("testfile", "a")
f.write("bar")
f.close()
f = open("testfile")
print(f.read())
f.close()
|
tests: Add testcase for open(..., "a").import sys
try:
import _os as os
except ImportError:
import os
if not hasattr(os, "unlink"):
print("SKIP")
sys.exit()
try:
os.unlink("testfile")
except OSError:
pass
# Should create a file
f = open("testfile", "a")
f.write("foo")
f.close()
f = open("testfile")
print(f.read())
f.close()
f = open("testfile", "a")
f.write("bar")
f.close()
f = open("testfile")
print(f.read())
f.close()
|
<commit_before><commit_msg>tests: Add testcase for open(..., "a").<commit_after>import sys
try:
import _os as os
except ImportError:
import os
if not hasattr(os, "unlink"):
print("SKIP")
sys.exit()
try:
os.unlink("testfile")
except OSError:
pass
# Should create a file
f = open("testfile", "a")
f.write("foo")
f.close()
f = open("testfile")
print(f.read())
f.close()
f = open("testfile", "a")
f.write("bar")
f.close()
f = open("testfile")
print(f.read())
f.close()
|
|
0336f446393618ba6ab30f4d6ee8f8295e97a87e
|
csunplugged/resources/migrations/0010_auto_20171121_2304.py
|
csunplugged/resources/migrations/0010_auto_20171121_2304.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-21 23:04
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0009_auto_20171020_1005'),
]
operations = [
migrations.RemoveField(
model_name='resource',
name='webpage_template',
),
migrations.AddField(
model_name='resource',
name='content',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='resource',
name='content_en',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='resource',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5), default=[], size=None),
),
migrations.AddField(
model_name='resource',
name='name_en',
field=models.CharField(default='', max_length=200, null=True),
),
migrations.AlterField(
model_name='resource',
name='name',
field=models.CharField(default='', max_length=200),
),
]
|
Update Resource migrations to reflect model changes
|
Update Resource migrations to reflect model changes
|
Python
|
mit
|
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
|
Update Resource migrations to reflect model changes
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-21 23:04
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0009_auto_20171020_1005'),
]
operations = [
migrations.RemoveField(
model_name='resource',
name='webpage_template',
),
migrations.AddField(
model_name='resource',
name='content',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='resource',
name='content_en',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='resource',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5), default=[], size=None),
),
migrations.AddField(
model_name='resource',
name='name_en',
field=models.CharField(default='', max_length=200, null=True),
),
migrations.AlterField(
model_name='resource',
name='name',
field=models.CharField(default='', max_length=200),
),
]
|
<commit_before><commit_msg>Update Resource migrations to reflect model changes<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-21 23:04
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0009_auto_20171020_1005'),
]
operations = [
migrations.RemoveField(
model_name='resource',
name='webpage_template',
),
migrations.AddField(
model_name='resource',
name='content',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='resource',
name='content_en',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='resource',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5), default=[], size=None),
),
migrations.AddField(
model_name='resource',
name='name_en',
field=models.CharField(default='', max_length=200, null=True),
),
migrations.AlterField(
model_name='resource',
name='name',
field=models.CharField(default='', max_length=200),
),
]
|
Update Resource migrations to reflect model changes# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-21 23:04
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0009_auto_20171020_1005'),
]
operations = [
migrations.RemoveField(
model_name='resource',
name='webpage_template',
),
migrations.AddField(
model_name='resource',
name='content',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='resource',
name='content_en',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='resource',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5), default=[], size=None),
),
migrations.AddField(
model_name='resource',
name='name_en',
field=models.CharField(default='', max_length=200, null=True),
),
migrations.AlterField(
model_name='resource',
name='name',
field=models.CharField(default='', max_length=200),
),
]
|
<commit_before><commit_msg>Update Resource migrations to reflect model changes<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-21 23:04
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0009_auto_20171020_1005'),
]
operations = [
migrations.RemoveField(
model_name='resource',
name='webpage_template',
),
migrations.AddField(
model_name='resource',
name='content',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='resource',
name='content_en',
field=models.TextField(default='', null=True),
),
migrations.AddField(
model_name='resource',
name='languages',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=5), default=[], size=None),
),
migrations.AddField(
model_name='resource',
name='name_en',
field=models.CharField(default='', max_length=200, null=True),
),
migrations.AlterField(
model_name='resource',
name='name',
field=models.CharField(default='', max_length=200),
),
]
|
|
962ae6e810964a00f825d6f29ec9caa1a2996d3c
|
tests/test_bot_support.py
|
tests/test_bot_support.py
|
import pytest
from .test_bot import TestBot
class TestBotSupport(TestBot):
@pytest.mark.parametrize('url,result', [
('https://google.com', ['https://google.com']),
('google.com', ['google.com']),
('google.com/search?q=instabot', ['google.com/search?q=instabot']),
('https://google.com/search?q=instabot', ['https://google.com/search?q=instabot']),
('мвд.рф', ['мвд.рф']),
('https://мвд.рф', ['https://мвд.рф']),
('http://мвд.рф/news/', ['http://мвд.рф/news/']),
('hello, google.com/search?q=test and bing.com', ['google.com/search?q=test', 'bing.com']),
])
def test_extract_urls(self, url, result):
assert self.BOT.extract_urls(url) == result
|
Add test on extract urls method
|
Add test on extract urls method
|
Python
|
apache-2.0
|
ohld/instabot,instagrambot/instabot,instagrambot/instabot
|
Add test on extract urls method
|
import pytest
from .test_bot import TestBot
class TestBotSupport(TestBot):
@pytest.mark.parametrize('url,result', [
('https://google.com', ['https://google.com']),
('google.com', ['google.com']),
('google.com/search?q=instabot', ['google.com/search?q=instabot']),
('https://google.com/search?q=instabot', ['https://google.com/search?q=instabot']),
('мвд.рф', ['мвд.рф']),
('https://мвд.рф', ['https://мвд.рф']),
('http://мвд.рф/news/', ['http://мвд.рф/news/']),
('hello, google.com/search?q=test and bing.com', ['google.com/search?q=test', 'bing.com']),
])
def test_extract_urls(self, url, result):
assert self.BOT.extract_urls(url) == result
|
<commit_before><commit_msg>Add test on extract urls method<commit_after>
|
import pytest
from .test_bot import TestBot
class TestBotSupport(TestBot):
@pytest.mark.parametrize('url,result', [
('https://google.com', ['https://google.com']),
('google.com', ['google.com']),
('google.com/search?q=instabot', ['google.com/search?q=instabot']),
('https://google.com/search?q=instabot', ['https://google.com/search?q=instabot']),
('мвд.рф', ['мвд.рф']),
('https://мвд.рф', ['https://мвд.рф']),
('http://мвд.рф/news/', ['http://мвд.рф/news/']),
('hello, google.com/search?q=test and bing.com', ['google.com/search?q=test', 'bing.com']),
])
def test_extract_urls(self, url, result):
assert self.BOT.extract_urls(url) == result
|
Add test on extract urls methodimport pytest
from .test_bot import TestBot
class TestBotSupport(TestBot):
@pytest.mark.parametrize('url,result', [
('https://google.com', ['https://google.com']),
('google.com', ['google.com']),
('google.com/search?q=instabot', ['google.com/search?q=instabot']),
('https://google.com/search?q=instabot', ['https://google.com/search?q=instabot']),
('мвд.рф', ['мвд.рф']),
('https://мвд.рф', ['https://мвд.рф']),
('http://мвд.рф/news/', ['http://мвд.рф/news/']),
('hello, google.com/search?q=test and bing.com', ['google.com/search?q=test', 'bing.com']),
])
def test_extract_urls(self, url, result):
assert self.BOT.extract_urls(url) == result
|
<commit_before><commit_msg>Add test on extract urls method<commit_after>import pytest
from .test_bot import TestBot
class TestBotSupport(TestBot):
@pytest.mark.parametrize('url,result', [
('https://google.com', ['https://google.com']),
('google.com', ['google.com']),
('google.com/search?q=instabot', ['google.com/search?q=instabot']),
('https://google.com/search?q=instabot', ['https://google.com/search?q=instabot']),
('мвд.рф', ['мвд.рф']),
('https://мвд.рф', ['https://мвд.рф']),
('http://мвд.рф/news/', ['http://мвд.рф/news/']),
('hello, google.com/search?q=test and bing.com', ['google.com/search?q=test', 'bing.com']),
])
def test_extract_urls(self, url, result):
assert self.BOT.extract_urls(url) == result
|
|
1b7341748cc98fcb0505cf03081b92f955279d79
|
tests/test_mako_engine.py
|
tests/test_mako_engine.py
|
#!/usr/bin/env python
from __future__ import print_function
import unittest
import engines
HANDLE = 'mako'
class TestStringTemplate(unittest.TestCase):
def setUp(self):
try:
import mako
except ImportError:
self.skipTest("mako module not available")
def test_valid_engine(self):
self.assertIn(HANDLE, engines.engines)
engine = engines.engines[HANDLE]
assert issubclass(engine, engines.Engine)
def test_escape(self):
engine = engines.engines[HANDLE]
template = engine(
'<%text>Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n</%text>',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
def test_conditional(self):
engine = engines.engines[HANDLE]
template = engine(
'% if value < 10:\n'
'less than ten\n'
'% else:\n'
'greater or equal\n'
'% endif\n',
)
result = template.apply({
'value': 4,
})
self.assertMultiLineEqual(result,
'less than ten\n',
)
def test_curly_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_strict_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
self.assertRaises(Exception, template.apply, ({
'random': 'value',
}))
def test_tolerant_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
tolerant=True,
)
result = template.apply({
'random': 'value',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'<UNDEFINED> mit\n'
'<UNDEFINED>.\n'
)
if __name__ == '__main__':
unittest.main()
|
Add tests to mako engine.
|
Add tests to mako engine.
|
Python
|
mit
|
blubberdiblub/eztemplate
|
Add tests to mako engine.
|
#!/usr/bin/env python
from __future__ import print_function
import unittest
import engines
HANDLE = 'mako'
class TestStringTemplate(unittest.TestCase):
def setUp(self):
try:
import mako
except ImportError:
self.skipTest("mako module not available")
def test_valid_engine(self):
self.assertIn(HANDLE, engines.engines)
engine = engines.engines[HANDLE]
assert issubclass(engine, engines.Engine)
def test_escape(self):
engine = engines.engines[HANDLE]
template = engine(
'<%text>Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n</%text>',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
def test_conditional(self):
engine = engines.engines[HANDLE]
template = engine(
'% if value < 10:\n'
'less than ten\n'
'% else:\n'
'greater or equal\n'
'% endif\n',
)
result = template.apply({
'value': 4,
})
self.assertMultiLineEqual(result,
'less than ten\n',
)
def test_curly_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_strict_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
self.assertRaises(Exception, template.apply, ({
'random': 'value',
}))
def test_tolerant_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
tolerant=True,
)
result = template.apply({
'random': 'value',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'<UNDEFINED> mit\n'
'<UNDEFINED>.\n'
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests to mako engine.<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
import unittest
import engines
HANDLE = 'mako'
class TestStringTemplate(unittest.TestCase):
def setUp(self):
try:
import mako
except ImportError:
self.skipTest("mako module not available")
def test_valid_engine(self):
self.assertIn(HANDLE, engines.engines)
engine = engines.engines[HANDLE]
assert issubclass(engine, engines.Engine)
def test_escape(self):
engine = engines.engines[HANDLE]
template = engine(
'<%text>Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n</%text>',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
def test_conditional(self):
engine = engines.engines[HANDLE]
template = engine(
'% if value < 10:\n'
'less than ten\n'
'% else:\n'
'greater or equal\n'
'% endif\n',
)
result = template.apply({
'value': 4,
})
self.assertMultiLineEqual(result,
'less than ten\n',
)
def test_curly_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_strict_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
self.assertRaises(Exception, template.apply, ({
'random': 'value',
}))
def test_tolerant_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
tolerant=True,
)
result = template.apply({
'random': 'value',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'<UNDEFINED> mit\n'
'<UNDEFINED>.\n'
)
if __name__ == '__main__':
unittest.main()
|
Add tests to mako engine.#!/usr/bin/env python
from __future__ import print_function
import unittest
import engines
HANDLE = 'mako'
class TestStringTemplate(unittest.TestCase):
def setUp(self):
try:
import mako
except ImportError:
self.skipTest("mako module not available")
def test_valid_engine(self):
self.assertIn(HANDLE, engines.engines)
engine = engines.engines[HANDLE]
assert issubclass(engine, engines.Engine)
def test_escape(self):
engine = engines.engines[HANDLE]
template = engine(
'<%text>Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n</%text>',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
def test_conditional(self):
engine = engines.engines[HANDLE]
template = engine(
'% if value < 10:\n'
'less than ten\n'
'% else:\n'
'greater or equal\n'
'% endif\n',
)
result = template.apply({
'value': 4,
})
self.assertMultiLineEqual(result,
'less than ten\n',
)
def test_curly_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_strict_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
self.assertRaises(Exception, template.apply, ({
'random': 'value',
}))
def test_tolerant_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
tolerant=True,
)
result = template.apply({
'random': 'value',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'<UNDEFINED> mit\n'
'<UNDEFINED>.\n'
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests to mako engine.<commit_after>#!/usr/bin/env python
from __future__ import print_function
import unittest
import engines
HANDLE = 'mako'
class TestStringTemplate(unittest.TestCase):
def setUp(self):
try:
import mako
except ImportError:
self.skipTest("mako module not available")
def test_valid_engine(self):
self.assertIn(HANDLE, engines.engines)
engine = engines.engines[HANDLE]
assert issubclass(engine, engines.Engine)
def test_escape(self):
engine = engines.engines[HANDLE]
template = engine(
'<%text>Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n</%text>',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
def test_conditional(self):
engine = engines.engines[HANDLE]
template = engine(
'% if value < 10:\n'
'less than ten\n'
'% else:\n'
'greater or equal\n'
'% endif\n',
)
result = template.apply({
'value': 4,
})
self.assertMultiLineEqual(result,
'less than ten\n',
)
def test_curly_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_strict_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
self.assertRaises(Exception, template.apply, ({
'random': 'value',
}))
def test_tolerant_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
tolerant=True,
)
result = template.apply({
'random': 'value',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'<UNDEFINED> mit\n'
'<UNDEFINED>.\n'
)
if __name__ == '__main__':
unittest.main()
|
|
c349f9a1e199b3909f7f071f25d7c3d8e6d1347d
|
tests/unit/test_public.py
|
tests/unit/test_public.py
|
# Import libnacl libs
import libnacl.public
# Import python libs
import unittest
class TestPublic(unittest.TestCase):
'''
'''
def test_secretkey(self):
'''
'''
msg = 'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.public.SecretKey()
alice = libnacl.public.SecretKey()
bob_box = libnacl.public.Box(bob.sk, alice.pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
alice_ctxt = alice_box.encrypt(msg)
self.assertNotEqual(msg, alice_ctxt)
aclear = alice_box.decrypt(alice_ctxt)
self.assertEqual(msg, aclear)
self.assertNotEqual(bob_ctxt, alice_ctxt)
def test_publickey(self):
'''
'''
msg = 'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.public.SecretKey()
alice = libnacl.public.SecretKey()
alice_pk = libnacl.public.PublicKey(alice.pk)
bob_box = libnacl.public.Box(bob.sk, alice_pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
|
Add high level tests for public key encryption
|
Add high level tests for public key encryption
|
Python
|
apache-2.0
|
RaetProtocol/libnacl,coinkite/libnacl,saltstack/libnacl,johnttan/libnacl,cachedout/libnacl,mindw/libnacl
|
Add high level tests for public key encryption
|
# Import libnacl libs
import libnacl.public
# Import python libs
import unittest
class TestPublic(unittest.TestCase):
'''
'''
def test_secretkey(self):
'''
'''
msg = 'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.public.SecretKey()
alice = libnacl.public.SecretKey()
bob_box = libnacl.public.Box(bob.sk, alice.pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
alice_ctxt = alice_box.encrypt(msg)
self.assertNotEqual(msg, alice_ctxt)
aclear = alice_box.decrypt(alice_ctxt)
self.assertEqual(msg, aclear)
self.assertNotEqual(bob_ctxt, alice_ctxt)
def test_publickey(self):
'''
'''
msg = 'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.public.SecretKey()
alice = libnacl.public.SecretKey()
alice_pk = libnacl.public.PublicKey(alice.pk)
bob_box = libnacl.public.Box(bob.sk, alice_pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
|
<commit_before><commit_msg>Add high level tests for public key encryption<commit_after>
|
# Import libnacl libs
import libnacl.public
# Import python libs
import unittest
class TestPublic(unittest.TestCase):
'''
'''
def test_secretkey(self):
'''
'''
msg = 'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.public.SecretKey()
alice = libnacl.public.SecretKey()
bob_box = libnacl.public.Box(bob.sk, alice.pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
alice_ctxt = alice_box.encrypt(msg)
self.assertNotEqual(msg, alice_ctxt)
aclear = alice_box.decrypt(alice_ctxt)
self.assertEqual(msg, aclear)
self.assertNotEqual(bob_ctxt, alice_ctxt)
def test_publickey(self):
'''
'''
msg = 'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.public.SecretKey()
alice = libnacl.public.SecretKey()
alice_pk = libnacl.public.PublicKey(alice.pk)
bob_box = libnacl.public.Box(bob.sk, alice_pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
|
Add high level tests for public key encryption# Import libnacl libs
import libnacl.public
# Import python libs
import unittest
class TestPublic(unittest.TestCase):
'''
'''
def test_secretkey(self):
'''
'''
msg = 'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.public.SecretKey()
alice = libnacl.public.SecretKey()
bob_box = libnacl.public.Box(bob.sk, alice.pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
alice_ctxt = alice_box.encrypt(msg)
self.assertNotEqual(msg, alice_ctxt)
aclear = alice_box.decrypt(alice_ctxt)
self.assertEqual(msg, aclear)
self.assertNotEqual(bob_ctxt, alice_ctxt)
def test_publickey(self):
'''
'''
msg = 'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.public.SecretKey()
alice = libnacl.public.SecretKey()
alice_pk = libnacl.public.PublicKey(alice.pk)
bob_box = libnacl.public.Box(bob.sk, alice_pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
|
<commit_before><commit_msg>Add high level tests for public key encryption<commit_after># Import libnacl libs
import libnacl.public
# Import python libs
import unittest
class TestPublic(unittest.TestCase):
'''
'''
def test_secretkey(self):
'''
'''
msg = 'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.public.SecretKey()
alice = libnacl.public.SecretKey()
bob_box = libnacl.public.Box(bob.sk, alice.pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
alice_ctxt = alice_box.encrypt(msg)
self.assertNotEqual(msg, alice_ctxt)
aclear = alice_box.decrypt(alice_ctxt)
self.assertEqual(msg, aclear)
self.assertNotEqual(bob_ctxt, alice_ctxt)
def test_publickey(self):
'''
'''
msg = 'You\'ve got two empty halves of coconut and you\'re bangin\' \'em together.'
bob = libnacl.public.SecretKey()
alice = libnacl.public.SecretKey()
alice_pk = libnacl.public.PublicKey(alice.pk)
bob_box = libnacl.public.Box(bob.sk, alice_pk)
alice_box = libnacl.public.Box(alice.sk, bob.pk)
bob_ctxt = bob_box.encrypt(msg)
self.assertNotEqual(msg, bob_ctxt)
bclear = alice_box.decrypt(bob_ctxt)
self.assertEqual(msg, bclear)
|
|
3f5d30b3dd47336be009091e47c20dca265414bf
|
find-non-ascii-char.py
|
find-non-ascii-char.py
|
#!/usr/bin/python3
import string
import sys
import io
asciichars = string.whitespace + string.ascii_letters + string.digits + string.punctuation
reset = '\x1b[0m'
txt_black_bold = '\x1b[30m'
on_yellow = '\x1b[43m'
def print_line(line):
in_non_ascii = False
o = ''
for c in line:
if c not in asciichars:
if not in_non_ascii:
o = o + on_yellow + txt_black_bold
in_non_ascii = True
else:
if in_non_ascii:
o = o + reset
in_non_ascii = False
o = o + c
if in_non_ascii and not o.endswith(reset):
o = o + reset
print(o, end="")
def none_ascii_in_file(filepath):
with open(filepath) as f:
data = io.StringIO(f.read()).getvalue()
try:
data.encode('ascii')
except UnicodeEncodeError:
return True
return False
if __name__ == '__main__':
files = sys.argv[1:]
for infile in files:
if none_ascii_in_file(infile):
with open(infile) as f:
lines = f.readlines()
for line in lines:
print_line(line)
# vim: et sw=4 ts=4
|
Add script to find none ASCII char in file.
|
Add script to find none ASCII char in file.
|
Python
|
mit
|
shoma/python.tools
|
Add script to find none ASCII char in file.
|
#!/usr/bin/python3
import string
import sys
import io
asciichars = string.whitespace + string.ascii_letters + string.digits + string.punctuation
reset = '\x1b[0m'
txt_black_bold = '\x1b[30m'
on_yellow = '\x1b[43m'
def print_line(line):
in_non_ascii = False
o = ''
for c in line:
if c not in asciichars:
if not in_non_ascii:
o = o + on_yellow + txt_black_bold
in_non_ascii = True
else:
if in_non_ascii:
o = o + reset
in_non_ascii = False
o = o + c
if in_non_ascii and not o.endswith(reset):
o = o + reset
print(o, end="")
def none_ascii_in_file(filepath):
with open(filepath) as f:
data = io.StringIO(f.read()).getvalue()
try:
data.encode('ascii')
except UnicodeEncodeError:
return True
return False
if __name__ == '__main__':
files = sys.argv[1:]
for infile in files:
if none_ascii_in_file(infile):
with open(infile) as f:
lines = f.readlines()
for line in lines:
print_line(line)
# vim: et sw=4 ts=4
|
<commit_before><commit_msg>Add script to find none ASCII char in file.<commit_after>
|
#!/usr/bin/python3
import string
import sys
import io
asciichars = string.whitespace + string.ascii_letters + string.digits + string.punctuation
reset = '\x1b[0m'
txt_black_bold = '\x1b[30m'
on_yellow = '\x1b[43m'
def print_line(line):
in_non_ascii = False
o = ''
for c in line:
if c not in asciichars:
if not in_non_ascii:
o = o + on_yellow + txt_black_bold
in_non_ascii = True
else:
if in_non_ascii:
o = o + reset
in_non_ascii = False
o = o + c
if in_non_ascii and not o.endswith(reset):
o = o + reset
print(o, end="")
def none_ascii_in_file(filepath):
with open(filepath) as f:
data = io.StringIO(f.read()).getvalue()
try:
data.encode('ascii')
except UnicodeEncodeError:
return True
return False
if __name__ == '__main__':
files = sys.argv[1:]
for infile in files:
if none_ascii_in_file(infile):
with open(infile) as f:
lines = f.readlines()
for line in lines:
print_line(line)
# vim: et sw=4 ts=4
|
Add script to find none ASCII char in file.#!/usr/bin/python3
import string
import sys
import io
asciichars = string.whitespace + string.ascii_letters + string.digits + string.punctuation
reset = '\x1b[0m'
txt_black_bold = '\x1b[30m'
on_yellow = '\x1b[43m'
def print_line(line):
in_non_ascii = False
o = ''
for c in line:
if c not in asciichars:
if not in_non_ascii:
o = o + on_yellow + txt_black_bold
in_non_ascii = True
else:
if in_non_ascii:
o = o + reset
in_non_ascii = False
o = o + c
if in_non_ascii and not o.endswith(reset):
o = o + reset
print(o, end="")
def none_ascii_in_file(filepath):
with open(filepath) as f:
data = io.StringIO(f.read()).getvalue()
try:
data.encode('ascii')
except UnicodeEncodeError:
return True
return False
if __name__ == '__main__':
files = sys.argv[1:]
for infile in files:
if none_ascii_in_file(infile):
with open(infile) as f:
lines = f.readlines()
for line in lines:
print_line(line)
# vim: et sw=4 ts=4
|
<commit_before><commit_msg>Add script to find none ASCII char in file.<commit_after>#!/usr/bin/python3
import string
import sys
import io
asciichars = string.whitespace + string.ascii_letters + string.digits + string.punctuation
reset = '\x1b[0m'
txt_black_bold = '\x1b[30m'
on_yellow = '\x1b[43m'
def print_line(line):
in_non_ascii = False
o = ''
for c in line:
if c not in asciichars:
if not in_non_ascii:
o = o + on_yellow + txt_black_bold
in_non_ascii = True
else:
if in_non_ascii:
o = o + reset
in_non_ascii = False
o = o + c
if in_non_ascii and not o.endswith(reset):
o = o + reset
print(o, end="")
def none_ascii_in_file(filepath):
with open(filepath) as f:
data = io.StringIO(f.read()).getvalue()
try:
data.encode('ascii')
except UnicodeEncodeError:
return True
return False
if __name__ == '__main__':
files = sys.argv[1:]
for infile in files:
if none_ascii_in_file(infile):
with open(infile) as f:
lines = f.readlines()
for line in lines:
print_line(line)
# vim: et sw=4 ts=4
|
|
c5ca7990aa3eb1abbc14e69e6a7a849db508968e
|
tools/virtualizer_diff.py
|
tools/virtualizer_diff.py
|
#!/usr/bin/env python
# Copyright 2017 Janos Czentye <czentye@tmit.bme.hu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from os.path import abspath as abspath
from os.path import dirname as dirname
sys.path.append(abspath(dirname(__file__) + "/../unify_virtualizer"))
from virtualizer import Virtualizer
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert ESCAPE's config file from JSON into YAML format.",
add_help=True)
parser.add_argument("old", type=str, help="old XML")
parser.add_argument("new", type=str, help="new XML")
args = parser.parse_args()
old_virtualizer = Virtualizer.parse_from_file(filename=args.old)
new_virtualizer = Virtualizer.parse_from_file(filename=args.new)
print old_virtualizer.diff(target=new_virtualizer).xml()
|
Add helper script to create virtualizer diff
|
Add helper script to create virtualizer diff
|
Python
|
apache-2.0
|
hsnlab/escape,5GExchange/escape,5GExchange/escape,5GExchange/escape,hsnlab/escape,hsnlab/escape,5GExchange/escape,5GExchange/escape,hsnlab/escape,hsnlab/escape,5GExchange/escape,hsnlab/escape
|
Add helper script to create virtualizer diff
|
#!/usr/bin/env python
# Copyright 2017 Janos Czentye <czentye@tmit.bme.hu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from os.path import abspath as abspath
from os.path import dirname as dirname
sys.path.append(abspath(dirname(__file__) + "/../unify_virtualizer"))
from virtualizer import Virtualizer
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert ESCAPE's config file from JSON into YAML format.",
add_help=True)
parser.add_argument("old", type=str, help="old XML")
parser.add_argument("new", type=str, help="new XML")
args = parser.parse_args()
old_virtualizer = Virtualizer.parse_from_file(filename=args.old)
new_virtualizer = Virtualizer.parse_from_file(filename=args.new)
print old_virtualizer.diff(target=new_virtualizer).xml()
|
<commit_before><commit_msg>Add helper script to create virtualizer diff<commit_after>
|
#!/usr/bin/env python
# Copyright 2017 Janos Czentye <czentye@tmit.bme.hu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from os.path import abspath as abspath
from os.path import dirname as dirname
sys.path.append(abspath(dirname(__file__) + "/../unify_virtualizer"))
from virtualizer import Virtualizer
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert ESCAPE's config file from JSON into YAML format.",
add_help=True)
parser.add_argument("old", type=str, help="old XML")
parser.add_argument("new", type=str, help="new XML")
args = parser.parse_args()
old_virtualizer = Virtualizer.parse_from_file(filename=args.old)
new_virtualizer = Virtualizer.parse_from_file(filename=args.new)
print old_virtualizer.diff(target=new_virtualizer).xml()
|
Add helper script to create virtualizer diff#!/usr/bin/env python
# Copyright 2017 Janos Czentye <czentye@tmit.bme.hu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from os.path import abspath as abspath
from os.path import dirname as dirname
sys.path.append(abspath(dirname(__file__) + "/../unify_virtualizer"))
from virtualizer import Virtualizer
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert ESCAPE's config file from JSON into YAML format.",
add_help=True)
parser.add_argument("old", type=str, help="old XML")
parser.add_argument("new", type=str, help="new XML")
args = parser.parse_args()
old_virtualizer = Virtualizer.parse_from_file(filename=args.old)
new_virtualizer = Virtualizer.parse_from_file(filename=args.new)
print old_virtualizer.diff(target=new_virtualizer).xml()
|
<commit_before><commit_msg>Add helper script to create virtualizer diff<commit_after>#!/usr/bin/env python
# Copyright 2017 Janos Czentye <czentye@tmit.bme.hu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from os.path import abspath as abspath
from os.path import dirname as dirname
sys.path.append(abspath(dirname(__file__) + "/../unify_virtualizer"))
from virtualizer import Virtualizer
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert ESCAPE's config file from JSON into YAML format.",
add_help=True)
parser.add_argument("old", type=str, help="old XML")
parser.add_argument("new", type=str, help="new XML")
args = parser.parse_args()
old_virtualizer = Virtualizer.parse_from_file(filename=args.old)
new_virtualizer = Virtualizer.parse_from_file(filename=args.new)
print old_virtualizer.diff(target=new_virtualizer).xml()
|
|
96a93627d6e72e2e04bfc2e7f6fdc67f53623288
|
mikrotik_config_parser.py
|
mikrotik_config_parser.py
|
import ConfigParser
from mikrotik_device import MtDevice
class Config(object):
def __init__(self):
self.config = ConfigParser.ConfigParser()
self.config.read('config.ini')
def get_ftp(self):
ftp = {'host' : self.config.get('ftp', 'host'),
'port' : self.config.get('ftp', 'port'),
'username' : self.config.get('ftp', 'username'),
'password' : self.config.get('ftp', 'password')}
return ftp
def get_devices(self):
for section in self.config.sections():
if section != 'general' and section != 'ftp':
device = {'host': self.config.get(section, 'host'),
'username': self.config.get(section, 'username'),
'password': self.config.get(section, 'password'),
'dst-path': self.config.get(section, 'path')}
# Return generator
yield device
|
Add ini file parser and fill up backup commands
|
Add ini file parser and fill up backup commands
|
Python
|
mit
|
voronovim/mikrotik-api-tools
|
Add ini file parser and fill up backup commands
|
import ConfigParser
from mikrotik_device import MtDevice
class Config(object):
def __init__(self):
self.config = ConfigParser.ConfigParser()
self.config.read('config.ini')
def get_ftp(self):
ftp = {'host' : self.config.get('ftp', 'host'),
'port' : self.config.get('ftp', 'port'),
'username' : self.config.get('ftp', 'username'),
'password' : self.config.get('ftp', 'password')}
return ftp
def get_devices(self):
for section in self.config.sections():
if section != 'general' and section != 'ftp':
device = {'host': self.config.get(section, 'host'),
'username': self.config.get(section, 'username'),
'password': self.config.get(section, 'password'),
'dst-path': self.config.get(section, 'path')}
# Return generator
yield device
|
<commit_before><commit_msg>Add ini file parser and fill up backup commands<commit_after>
|
import ConfigParser
from mikrotik_device import MtDevice
class Config(object):
def __init__(self):
self.config = ConfigParser.ConfigParser()
self.config.read('config.ini')
def get_ftp(self):
ftp = {'host' : self.config.get('ftp', 'host'),
'port' : self.config.get('ftp', 'port'),
'username' : self.config.get('ftp', 'username'),
'password' : self.config.get('ftp', 'password')}
return ftp
def get_devices(self):
for section in self.config.sections():
if section != 'general' and section != 'ftp':
device = {'host': self.config.get(section, 'host'),
'username': self.config.get(section, 'username'),
'password': self.config.get(section, 'password'),
'dst-path': self.config.get(section, 'path')}
# Return generator
yield device
|
Add ini file parser and fill up backup commandsimport ConfigParser
from mikrotik_device import MtDevice
class Config(object):
def __init__(self):
self.config = ConfigParser.ConfigParser()
self.config.read('config.ini')
def get_ftp(self):
ftp = {'host' : self.config.get('ftp', 'host'),
'port' : self.config.get('ftp', 'port'),
'username' : self.config.get('ftp', 'username'),
'password' : self.config.get('ftp', 'password')}
return ftp
def get_devices(self):
for section in self.config.sections():
if section != 'general' and section != 'ftp':
device = {'host': self.config.get(section, 'host'),
'username': self.config.get(section, 'username'),
'password': self.config.get(section, 'password'),
'dst-path': self.config.get(section, 'path')}
# Return generator
yield device
|
<commit_before><commit_msg>Add ini file parser and fill up backup commands<commit_after>import ConfigParser
from mikrotik_device import MtDevice
class Config(object):
def __init__(self):
self.config = ConfigParser.ConfigParser()
self.config.read('config.ini')
def get_ftp(self):
ftp = {'host' : self.config.get('ftp', 'host'),
'port' : self.config.get('ftp', 'port'),
'username' : self.config.get('ftp', 'username'),
'password' : self.config.get('ftp', 'password')}
return ftp
def get_devices(self):
for section in self.config.sections():
if section != 'general' and section != 'ftp':
device = {'host': self.config.get(section, 'host'),
'username': self.config.get(section, 'username'),
'password': self.config.get(section, 'password'),
'dst-path': self.config.get(section, 'path')}
# Return generator
yield device
|
|
be5f12fcafe2e382ec65fef864340ae8c13fa4ea
|
tests/unit/modules/inspect_collector_test.py
|
tests/unit/modules/inspect_collector_test.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salt.modules.inspectlib.collector import Inspector
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class InspectorCollectorTestCase(TestCase):
'''
Test inspectlib:collector:Inspector
'''
def test_env_loader(self):
'''
Get packages on the different distros.
:return:
'''
inspector = Inspector(cachedir='/foo/cache', piddir='/foo/pid', pidfilename='bar.pid')
self.assertEqual(inspector.dbfile, '/foo/cache/_minion_collector.db')
self.assertEqual(inspector.pidfile, '/foo/pid/bar.pid')
def test_pkg_get(self):
'''
Test if grains switching the pkg get method.
:return:
'''
debian_list = """
g++
g++-4.9
g++-5
gawk
gcc
gcc-4.9
gcc-4.9-base:amd64
gcc-4.9-base:i386
gcc-5
gcc-5-base:amd64
gcc-5-base:i386
gcc-6-base:amd64
gcc-6-base:i386
"""
inspector = Inspector(cachedir='/test', piddir='/test', pidfilename='bar.pid')
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data.get = MagicMock(return_value='Debian')
with patch.object(inspector, '_Inspector__get_cfg_pkgs_dpkg', MagicMock(return_value='dpkg')):
with patch.object(inspector, '_Inspector__get_cfg_pkgs_rpm', MagicMock(return_value='rpm')):
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data().get = MagicMock(return_value='Debian')
self.assertEqual(inspector._get_cfg_pkgs(), 'dpkg')
inspector.grains_core.os_data().get = MagicMock(return_value='Suse')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
inspector.grains_core.os_data().get = MagicMock(return_value='redhat')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
|
Add initial unit test for inspectlib.collector.Inspector
|
Add initial unit test for inspectlib.collector.Inspector
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add initial unit test for inspectlib.collector.Inspector
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salt.modules.inspectlib.collector import Inspector
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class InspectorCollectorTestCase(TestCase):
'''
Test inspectlib:collector:Inspector
'''
def test_env_loader(self):
'''
Get packages on the different distros.
:return:
'''
inspector = Inspector(cachedir='/foo/cache', piddir='/foo/pid', pidfilename='bar.pid')
self.assertEqual(inspector.dbfile, '/foo/cache/_minion_collector.db')
self.assertEqual(inspector.pidfile, '/foo/pid/bar.pid')
def test_pkg_get(self):
'''
Test if grains switching the pkg get method.
:return:
'''
debian_list = """
g++
g++-4.9
g++-5
gawk
gcc
gcc-4.9
gcc-4.9-base:amd64
gcc-4.9-base:i386
gcc-5
gcc-5-base:amd64
gcc-5-base:i386
gcc-6-base:amd64
gcc-6-base:i386
"""
inspector = Inspector(cachedir='/test', piddir='/test', pidfilename='bar.pid')
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data.get = MagicMock(return_value='Debian')
with patch.object(inspector, '_Inspector__get_cfg_pkgs_dpkg', MagicMock(return_value='dpkg')):
with patch.object(inspector, '_Inspector__get_cfg_pkgs_rpm', MagicMock(return_value='rpm')):
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data().get = MagicMock(return_value='Debian')
self.assertEqual(inspector._get_cfg_pkgs(), 'dpkg')
inspector.grains_core.os_data().get = MagicMock(return_value='Suse')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
inspector.grains_core.os_data().get = MagicMock(return_value='redhat')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
|
<commit_before><commit_msg>Add initial unit test for inspectlib.collector.Inspector<commit_after>
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salt.modules.inspectlib.collector import Inspector
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class InspectorCollectorTestCase(TestCase):
'''
Test inspectlib:collector:Inspector
'''
def test_env_loader(self):
'''
Get packages on the different distros.
:return:
'''
inspector = Inspector(cachedir='/foo/cache', piddir='/foo/pid', pidfilename='bar.pid')
self.assertEqual(inspector.dbfile, '/foo/cache/_minion_collector.db')
self.assertEqual(inspector.pidfile, '/foo/pid/bar.pid')
def test_pkg_get(self):
'''
Test if grains switching the pkg get method.
:return:
'''
debian_list = """
g++
g++-4.9
g++-5
gawk
gcc
gcc-4.9
gcc-4.9-base:amd64
gcc-4.9-base:i386
gcc-5
gcc-5-base:amd64
gcc-5-base:i386
gcc-6-base:amd64
gcc-6-base:i386
"""
inspector = Inspector(cachedir='/test', piddir='/test', pidfilename='bar.pid')
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data.get = MagicMock(return_value='Debian')
with patch.object(inspector, '_Inspector__get_cfg_pkgs_dpkg', MagicMock(return_value='dpkg')):
with patch.object(inspector, '_Inspector__get_cfg_pkgs_rpm', MagicMock(return_value='rpm')):
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data().get = MagicMock(return_value='Debian')
self.assertEqual(inspector._get_cfg_pkgs(), 'dpkg')
inspector.grains_core.os_data().get = MagicMock(return_value='Suse')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
inspector.grains_core.os_data().get = MagicMock(return_value='redhat')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
|
Add initial unit test for inspectlib.collector.Inspector# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salt.modules.inspectlib.collector import Inspector
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class InspectorCollectorTestCase(TestCase):
'''
Test inspectlib:collector:Inspector
'''
def test_env_loader(self):
'''
Get packages on the different distros.
:return:
'''
inspector = Inspector(cachedir='/foo/cache', piddir='/foo/pid', pidfilename='bar.pid')
self.assertEqual(inspector.dbfile, '/foo/cache/_minion_collector.db')
self.assertEqual(inspector.pidfile, '/foo/pid/bar.pid')
def test_pkg_get(self):
'''
Test if grains switching the pkg get method.
:return:
'''
debian_list = """
g++
g++-4.9
g++-5
gawk
gcc
gcc-4.9
gcc-4.9-base:amd64
gcc-4.9-base:i386
gcc-5
gcc-5-base:amd64
gcc-5-base:i386
gcc-6-base:amd64
gcc-6-base:i386
"""
inspector = Inspector(cachedir='/test', piddir='/test', pidfilename='bar.pid')
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data.get = MagicMock(return_value='Debian')
with patch.object(inspector, '_Inspector__get_cfg_pkgs_dpkg', MagicMock(return_value='dpkg')):
with patch.object(inspector, '_Inspector__get_cfg_pkgs_rpm', MagicMock(return_value='rpm')):
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data().get = MagicMock(return_value='Debian')
self.assertEqual(inspector._get_cfg_pkgs(), 'dpkg')
inspector.grains_core.os_data().get = MagicMock(return_value='Suse')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
inspector.grains_core.os_data().get = MagicMock(return_value='redhat')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
|
<commit_before><commit_msg>Add initial unit test for inspectlib.collector.Inspector<commit_after># -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salt.modules.inspectlib.collector import Inspector
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class InspectorCollectorTestCase(TestCase):
'''
Test inspectlib:collector:Inspector
'''
def test_env_loader(self):
'''
Get packages on the different distros.
:return:
'''
inspector = Inspector(cachedir='/foo/cache', piddir='/foo/pid', pidfilename='bar.pid')
self.assertEqual(inspector.dbfile, '/foo/cache/_minion_collector.db')
self.assertEqual(inspector.pidfile, '/foo/pid/bar.pid')
def test_pkg_get(self):
'''
Test if grains switching the pkg get method.
:return:
'''
debian_list = """
g++
g++-4.9
g++-5
gawk
gcc
gcc-4.9
gcc-4.9-base:amd64
gcc-4.9-base:i386
gcc-5
gcc-5-base:amd64
gcc-5-base:i386
gcc-6-base:amd64
gcc-6-base:i386
"""
inspector = Inspector(cachedir='/test', piddir='/test', pidfilename='bar.pid')
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data.get = MagicMock(return_value='Debian')
with patch.object(inspector, '_Inspector__get_cfg_pkgs_dpkg', MagicMock(return_value='dpkg')):
with patch.object(inspector, '_Inspector__get_cfg_pkgs_rpm', MagicMock(return_value='rpm')):
inspector.grains_core = MagicMock()
inspector.grains_core.os_data = MagicMock()
inspector.grains_core.os_data().get = MagicMock(return_value='Debian')
self.assertEqual(inspector._get_cfg_pkgs(), 'dpkg')
inspector.grains_core.os_data().get = MagicMock(return_value='Suse')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
inspector.grains_core.os_data().get = MagicMock(return_value='redhat')
self.assertEqual(inspector._get_cfg_pkgs(), 'rpm')
|
|
4f2743ed845185de718763df6d26db390ee2eb48
|
test_putget.py
|
test_putget.py
|
from multiprocessing import Process, Queue
q = Queue()
iterations = 10000000
def produce(q):
for i in range(iterations):
q.put(i)
if __name__ == "__main__":
t = Process(target=produce, args=(q,))
t.start()
previous = -1
for i in range(iterations):
m = q.get()
if m != previous + 1:
print "Fail at:", previous, m
break
previous = m
print "done"
|
Add equivalent put/get test in python.
|
Add equivalent put/get test in python.
|
Python
|
mit
|
abwilson/L3,abwilson/L3,tempbottle/L3,tempbottle/L3
|
Add equivalent put/get test in python.
|
from multiprocessing import Process, Queue
q = Queue()
iterations = 10000000
def produce(q):
for i in range(iterations):
q.put(i)
if __name__ == "__main__":
t = Process(target=produce, args=(q,))
t.start()
previous = -1
for i in range(iterations):
m = q.get()
if m != previous + 1:
print "Fail at:", previous, m
break
previous = m
print "done"
|
<commit_before><commit_msg>Add equivalent put/get test in python.<commit_after>
|
from multiprocessing import Process, Queue
q = Queue()
iterations = 10000000
def produce(q):
for i in range(iterations):
q.put(i)
if __name__ == "__main__":
t = Process(target=produce, args=(q,))
t.start()
previous = -1
for i in range(iterations):
m = q.get()
if m != previous + 1:
print "Fail at:", previous, m
break
previous = m
print "done"
|
Add equivalent put/get test in python.from multiprocessing import Process, Queue
q = Queue()
iterations = 10000000
def produce(q):
for i in range(iterations):
q.put(i)
if __name__ == "__main__":
t = Process(target=produce, args=(q,))
t.start()
previous = -1
for i in range(iterations):
m = q.get()
if m != previous + 1:
print "Fail at:", previous, m
break
previous = m
print "done"
|
<commit_before><commit_msg>Add equivalent put/get test in python.<commit_after>from multiprocessing import Process, Queue
q = Queue()
iterations = 10000000
def produce(q):
for i in range(iterations):
q.put(i)
if __name__ == "__main__":
t = Process(target=produce, args=(q,))
t.start()
previous = -1
for i in range(iterations):
m = q.get()
if m != previous + 1:
print "Fail at:", previous, m
break
previous = m
print "done"
|
|
5f912542a555621cd259265a5029ee4da15de972
|
tests/utils.py
|
tests/utils.py
|
from django.contrib.sessions.middleware import SessionMiddleware
def add_session_to_request(request):
# Annotate a request object with a session.
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
return request
def setup_view(view, request, *args, **kwargs):
# Mimicks .as_view(), but returns the view instance instead of a
# function. The arguments are similar to that of reverse().
view.request = request
view.args = args
view.kwargs = kwargs
return view
|
Add test helpers! Yes, we've gotten this deep into it.
|
Add test helpers! Yes, we've gotten this deep into it.
|
Python
|
apache-2.0
|
hello-base/web,hello-base/web,hello-base/web,hello-base/web
|
Add test helpers! Yes, we've gotten this deep into it.
|
from django.contrib.sessions.middleware import SessionMiddleware
def add_session_to_request(request):
# Annotate a request object with a session.
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
return request
def setup_view(view, request, *args, **kwargs):
# Mimicks .as_view(), but returns the view instance instead of a
# function. The arguments are similar to that of reverse().
view.request = request
view.args = args
view.kwargs = kwargs
return view
|
<commit_before><commit_msg>Add test helpers! Yes, we've gotten this deep into it.<commit_after>
|
from django.contrib.sessions.middleware import SessionMiddleware
def add_session_to_request(request):
# Annotate a request object with a session.
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
return request
def setup_view(view, request, *args, **kwargs):
# Mimicks .as_view(), but returns the view instance instead of a
# function. The arguments are similar to that of reverse().
view.request = request
view.args = args
view.kwargs = kwargs
return view
|
Add test helpers! Yes, we've gotten this deep into it.from django.contrib.sessions.middleware import SessionMiddleware
def add_session_to_request(request):
# Annotate a request object with a session.
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
return request
def setup_view(view, request, *args, **kwargs):
# Mimicks .as_view(), but returns the view instance instead of a
# function. The arguments are similar to that of reverse().
view.request = request
view.args = args
view.kwargs = kwargs
return view
|
<commit_before><commit_msg>Add test helpers! Yes, we've gotten this deep into it.<commit_after>from django.contrib.sessions.middleware import SessionMiddleware
def add_session_to_request(request):
# Annotate a request object with a session.
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
return request
def setup_view(view, request, *args, **kwargs):
# Mimicks .as_view(), but returns the view instance instead of a
# function. The arguments are similar to that of reverse().
view.request = request
view.args = args
view.kwargs = kwargs
return view
|
|
f305a445b0e018a4140d5e28cd0a68ba450e7d87
|
tests/unit/test_default_semantic_action.py
|
tests/unit/test_default_semantic_action.py
|
# -*- coding: utf-8 -*-
#######################################################################
# Name: test_default_semantic_action
# Purpose: Default semantic action is applied during semantic analysis
# if no action is given for node type. Default action converts
# terminals to strings, remove StrMatch terminals from sequences.
# Author: Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# Copyright: (c) 2014 Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# License: MIT License
#######################################################################
import pytest
from arpeggio import ParserPython, SemanticAction, ParseTreeNode
from arpeggio import RegExMatch as _
def grammar(): return parentheses, 'strmatch'
def parentheses(): return '(', rulea, ')'
def rulea(): return ['+', '-'], number
def number(): return _(r'\d+')
p_removed = False
number_str = False
parse_tree_node = False
class ParenthesesSA(SemanticAction):
def first_pass(self, parser, node, children):
global p_removed, parse_tree_node
p_removed = str(children[0]) != '('
parse_tree_node = isinstance(children[0], ParseTreeNode)
return children[0] if len(children)==1 else children[1]
class RuleSA(SemanticAction):
def first_pass(self, parser, node, children):
global number_str
number_str = type(children[1]) == str
return children[1]
parentheses.sem = ParenthesesSA()
rulea.sem = RuleSA()
def test_default_action_enabled():
parser = ParserPython(grammar)
parse_tree = parser.parse('(-34) strmatch')
parser.getASG(defaults=True)
assert p_removed
assert number_str
assert not parse_tree_node
def test_default_action_disabled():
parser = ParserPython(grammar)
parse_tree = parser.parse('(-34) strmatch')
parser.getASG(defaults=False)
assert not p_removed
assert not number_str
assert parse_tree_node
|
Test for default semantic action
|
Test for default semantic action
|
Python
|
mit
|
leiyangyou/Arpeggio,leiyangyou/Arpeggio
|
Test for default semantic action
|
# -*- coding: utf-8 -*-
#######################################################################
# Name: test_default_semantic_action
# Purpose: Default semantic action is applied during semantic analysis
# if no action is given for node type. Default action converts
# terminals to strings, remove StrMatch terminals from sequences.
# Author: Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# Copyright: (c) 2014 Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# License: MIT License
#######################################################################
import pytest
from arpeggio import ParserPython, SemanticAction, ParseTreeNode
from arpeggio import RegExMatch as _
def grammar(): return parentheses, 'strmatch'
def parentheses(): return '(', rulea, ')'
def rulea(): return ['+', '-'], number
def number(): return _(r'\d+')
p_removed = False
number_str = False
parse_tree_node = False
class ParenthesesSA(SemanticAction):
def first_pass(self, parser, node, children):
global p_removed, parse_tree_node
p_removed = str(children[0]) != '('
parse_tree_node = isinstance(children[0], ParseTreeNode)
return children[0] if len(children)==1 else children[1]
class RuleSA(SemanticAction):
def first_pass(self, parser, node, children):
global number_str
number_str = type(children[1]) == str
return children[1]
parentheses.sem = ParenthesesSA()
rulea.sem = RuleSA()
def test_default_action_enabled():
parser = ParserPython(grammar)
parse_tree = parser.parse('(-34) strmatch')
parser.getASG(defaults=True)
assert p_removed
assert number_str
assert not parse_tree_node
def test_default_action_disabled():
parser = ParserPython(grammar)
parse_tree = parser.parse('(-34) strmatch')
parser.getASG(defaults=False)
assert not p_removed
assert not number_str
assert parse_tree_node
|
<commit_before><commit_msg>Test for default semantic action<commit_after>
|
# -*- coding: utf-8 -*-
#######################################################################
# Name: test_default_semantic_action
# Purpose: Default semantic action is applied during semantic analysis
# if no action is given for node type. Default action converts
# terminals to strings, remove StrMatch terminals from sequences.
# Author: Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# Copyright: (c) 2014 Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# License: MIT License
#######################################################################
import pytest
from arpeggio import ParserPython, SemanticAction, ParseTreeNode
from arpeggio import RegExMatch as _
def grammar(): return parentheses, 'strmatch'
def parentheses(): return '(', rulea, ')'
def rulea(): return ['+', '-'], number
def number(): return _(r'\d+')
p_removed = False
number_str = False
parse_tree_node = False
class ParenthesesSA(SemanticAction):
def first_pass(self, parser, node, children):
global p_removed, parse_tree_node
p_removed = str(children[0]) != '('
parse_tree_node = isinstance(children[0], ParseTreeNode)
return children[0] if len(children)==1 else children[1]
class RuleSA(SemanticAction):
def first_pass(self, parser, node, children):
global number_str
number_str = type(children[1]) == str
return children[1]
parentheses.sem = ParenthesesSA()
rulea.sem = RuleSA()
def test_default_action_enabled():
parser = ParserPython(grammar)
parse_tree = parser.parse('(-34) strmatch')
parser.getASG(defaults=True)
assert p_removed
assert number_str
assert not parse_tree_node
def test_default_action_disabled():
parser = ParserPython(grammar)
parse_tree = parser.parse('(-34) strmatch')
parser.getASG(defaults=False)
assert not p_removed
assert not number_str
assert parse_tree_node
|
Test for default semantic action# -*- coding: utf-8 -*-
#######################################################################
# Name: test_default_semantic_action
# Purpose: Default semantic action is applied during semantic analysis
# if no action is given for node type. Default action converts
# terminals to strings, remove StrMatch terminals from sequences.
# Author: Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# Copyright: (c) 2014 Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# License: MIT License
#######################################################################
import pytest
from arpeggio import ParserPython, SemanticAction, ParseTreeNode
from arpeggio import RegExMatch as _
def grammar(): return parentheses, 'strmatch'
def parentheses(): return '(', rulea, ')'
def rulea(): return ['+', '-'], number
def number(): return _(r'\d+')
p_removed = False
number_str = False
parse_tree_node = False
class ParenthesesSA(SemanticAction):
def first_pass(self, parser, node, children):
global p_removed, parse_tree_node
p_removed = str(children[0]) != '('
parse_tree_node = isinstance(children[0], ParseTreeNode)
return children[0] if len(children)==1 else children[1]
class RuleSA(SemanticAction):
def first_pass(self, parser, node, children):
global number_str
number_str = type(children[1]) == str
return children[1]
parentheses.sem = ParenthesesSA()
rulea.sem = RuleSA()
def test_default_action_enabled():
parser = ParserPython(grammar)
parse_tree = parser.parse('(-34) strmatch')
parser.getASG(defaults=True)
assert p_removed
assert number_str
assert not parse_tree_node
def test_default_action_disabled():
parser = ParserPython(grammar)
parse_tree = parser.parse('(-34) strmatch')
parser.getASG(defaults=False)
assert not p_removed
assert not number_str
assert parse_tree_node
|
<commit_before><commit_msg>Test for default semantic action<commit_after># -*- coding: utf-8 -*-
#######################################################################
# Name: test_default_semantic_action
# Purpose: Default semantic action is applied during semantic analysis
# if no action is given for node type. Default action converts
# terminals to strings, remove StrMatch terminals from sequences.
# Author: Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# Copyright: (c) 2014 Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# License: MIT License
#######################################################################
import pytest
from arpeggio import ParserPython, SemanticAction, ParseTreeNode
from arpeggio import RegExMatch as _
def grammar(): return parentheses, 'strmatch'
def parentheses(): return '(', rulea, ')'
def rulea(): return ['+', '-'], number
def number(): return _(r'\d+')
p_removed = False
number_str = False
parse_tree_node = False
class ParenthesesSA(SemanticAction):
def first_pass(self, parser, node, children):
global p_removed, parse_tree_node
p_removed = str(children[0]) != '('
parse_tree_node = isinstance(children[0], ParseTreeNode)
return children[0] if len(children)==1 else children[1]
class RuleSA(SemanticAction):
def first_pass(self, parser, node, children):
global number_str
number_str = type(children[1]) == str
return children[1]
parentheses.sem = ParenthesesSA()
rulea.sem = RuleSA()
def test_default_action_enabled():
parser = ParserPython(grammar)
parse_tree = parser.parse('(-34) strmatch')
parser.getASG(defaults=True)
assert p_removed
assert number_str
assert not parse_tree_node
def test_default_action_disabled():
parser = ParserPython(grammar)
parse_tree = parser.parse('(-34) strmatch')
parser.getASG(defaults=False)
assert not p_removed
assert not number_str
assert parse_tree_node
|
|
46fa2821f988dded52ca6086db2beada3ea5eea3
|
examples/set_explore_group_configuration.py
|
examples/set_explore_group_configuration.py
|
#!/usr/bin/env python
#
# Set the group configuration in explore.
#
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), '..'))
from sdcclient import SdcClient
#
# Parse arguments
#
if len(sys.argv) != 2:
print 'usage: %s <sysdig-token>' % sys.argv[0]
print 'You can find your token at https://app.sysdigcloud.com/#/settings/user'
sys.exit(1)
sdc_token = sys.argv[1]
#
# Instantiate the SDC client
#
sdclient = SdcClient(sdc_token)
#
# Fire the request, set the group configuration you need in the example below
#
groupConfig = ['agent.tag.role', 'host.mac']
res = sdclient.set_explore_grouping_hierarchy(groupConfig)
#
# Show the error if there was one
#
if res[0] == False:
print res[1]
|
Set default explore group configuration for a user
|
Set default explore group configuration for a user
|
Python
|
mit
|
draios/python-sdc-client,draios/python-sdc-client
|
Set default explore group configuration for a user
|
#!/usr/bin/env python
#
# Set the group configuration in explore.
#
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), '..'))
from sdcclient import SdcClient
#
# Parse arguments
#
if len(sys.argv) != 2:
print 'usage: %s <sysdig-token>' % sys.argv[0]
print 'You can find your token at https://app.sysdigcloud.com/#/settings/user'
sys.exit(1)
sdc_token = sys.argv[1]
#
# Instantiate the SDC client
#
sdclient = SdcClient(sdc_token)
#
# Fire the request, set the group configuration you need in the example below
#
groupConfig = ['agent.tag.role', 'host.mac']
res = sdclient.set_explore_grouping_hierarchy(groupConfig)
#
# Show the error if there was one
#
if res[0] == False:
print res[1]
|
<commit_before><commit_msg>Set default explore group configuration for a user<commit_after>
|
#!/usr/bin/env python
#
# Set the group configuration in explore.
#
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), '..'))
from sdcclient import SdcClient
#
# Parse arguments
#
if len(sys.argv) != 2:
print 'usage: %s <sysdig-token>' % sys.argv[0]
print 'You can find your token at https://app.sysdigcloud.com/#/settings/user'
sys.exit(1)
sdc_token = sys.argv[1]
#
# Instantiate the SDC client
#
sdclient = SdcClient(sdc_token)
#
# Fire the request, set the group configuration you need in the example below
#
groupConfig = ['agent.tag.role', 'host.mac']
res = sdclient.set_explore_grouping_hierarchy(groupConfig)
#
# Show the error if there was one
#
if res[0] == False:
print res[1]
|
Set default explore group configuration for a user#!/usr/bin/env python
#
# Set the group configuration in explore.
#
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), '..'))
from sdcclient import SdcClient
#
# Parse arguments
#
if len(sys.argv) != 2:
print 'usage: %s <sysdig-token>' % sys.argv[0]
print 'You can find your token at https://app.sysdigcloud.com/#/settings/user'
sys.exit(1)
sdc_token = sys.argv[1]
#
# Instantiate the SDC client
#
sdclient = SdcClient(sdc_token)
#
# Fire the request, set the group configuration you need in the example below
#
groupConfig = ['agent.tag.role', 'host.mac']
res = sdclient.set_explore_grouping_hierarchy(groupConfig)
#
# Show the error if there was one
#
if res[0] == False:
print res[1]
|
<commit_before><commit_msg>Set default explore group configuration for a user<commit_after>#!/usr/bin/env python
#
# Set the group configuration in explore.
#
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), '..'))
from sdcclient import SdcClient
#
# Parse arguments
#
if len(sys.argv) != 2:
print 'usage: %s <sysdig-token>' % sys.argv[0]
print 'You can find your token at https://app.sysdigcloud.com/#/settings/user'
sys.exit(1)
sdc_token = sys.argv[1]
#
# Instantiate the SDC client
#
sdclient = SdcClient(sdc_token)
#
# Fire the request, set the group configuration you need in the example below
#
groupConfig = ['agent.tag.role', 'host.mac']
res = sdclient.set_explore_grouping_hierarchy(groupConfig)
#
# Show the error if there was one
#
if res[0] == False:
print res[1]
|
|
e9e4d4af705c2c6785ddd63f5e6e94ef2e675a83
|
tests/integration/modules/test_autoruns.py
|
tests/integration/modules/test_autoruns.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'windows tests only')
class AutoRunsModuleTest(ModuleCase):
'''
Test the autoruns module
'''
def test_win_autoruns_list(self):
'''
test win_autoruns.list module
'''
ret = self.run_function('autoruns.list')
self.assertIn('HKLM', str(ret))
self.assertTrue(isinstance(ret, dict))
|
Add autoruns.list integration test for Windows
|
Add autoruns.list integration test for Windows
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add autoruns.list integration test for Windows
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'windows tests only')
class AutoRunsModuleTest(ModuleCase):
'''
Test the autoruns module
'''
def test_win_autoruns_list(self):
'''
test win_autoruns.list module
'''
ret = self.run_function('autoruns.list')
self.assertIn('HKLM', str(ret))
self.assertTrue(isinstance(ret, dict))
|
<commit_before><commit_msg>Add autoruns.list integration test for Windows<commit_after>
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'windows tests only')
class AutoRunsModuleTest(ModuleCase):
'''
Test the autoruns module
'''
def test_win_autoruns_list(self):
'''
test win_autoruns.list module
'''
ret = self.run_function('autoruns.list')
self.assertIn('HKLM', str(ret))
self.assertTrue(isinstance(ret, dict))
|
Add autoruns.list integration test for Windows# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'windows tests only')
class AutoRunsModuleTest(ModuleCase):
'''
Test the autoruns module
'''
def test_win_autoruns_list(self):
'''
test win_autoruns.list module
'''
ret = self.run_function('autoruns.list')
self.assertIn('HKLM', str(ret))
self.assertTrue(isinstance(ret, dict))
|
<commit_before><commit_msg>Add autoruns.list integration test for Windows<commit_after># -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'windows tests only')
class AutoRunsModuleTest(ModuleCase):
'''
Test the autoruns module
'''
def test_win_autoruns_list(self):
'''
test win_autoruns.list module
'''
ret = self.run_function('autoruns.list')
self.assertIn('HKLM', str(ret))
self.assertTrue(isinstance(ret, dict))
|
|
4f66b1662f3b4513bc3ea2eb3d684fc9b60fa9b3
|
bidb/utils/subprocess.py
|
bidb/utils/subprocess.py
|
from __future__ import absolute_import
import subprocess
def check_output2(args, stdin=None):
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _ = p.communicate(input=stdin)
retcode = p.wait()
if retcode:
raise subprocess.CalledProcessError(retcode, ' '.join(args), out)
return out
|
Add our own check_output2 wrapper
|
Add our own check_output2 wrapper
Signed-off-by: Chris Lamb <29e6d179a8d73471df7861382db6dd7e64138033@debian.org>
|
Python
|
agpl-3.0
|
lamby/buildinfo.debian.net,lamby/buildinfo.debian.net
|
Add our own check_output2 wrapper
Signed-off-by: Chris Lamb <29e6d179a8d73471df7861382db6dd7e64138033@debian.org>
|
from __future__ import absolute_import
import subprocess
def check_output2(args, stdin=None):
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _ = p.communicate(input=stdin)
retcode = p.wait()
if retcode:
raise subprocess.CalledProcessError(retcode, ' '.join(args), out)
return out
|
<commit_before><commit_msg>Add our own check_output2 wrapper
Signed-off-by: Chris Lamb <29e6d179a8d73471df7861382db6dd7e64138033@debian.org><commit_after>
|
from __future__ import absolute_import
import subprocess
def check_output2(args, stdin=None):
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _ = p.communicate(input=stdin)
retcode = p.wait()
if retcode:
raise subprocess.CalledProcessError(retcode, ' '.join(args), out)
return out
|
Add our own check_output2 wrapper
Signed-off-by: Chris Lamb <29e6d179a8d73471df7861382db6dd7e64138033@debian.org>from __future__ import absolute_import
import subprocess
def check_output2(args, stdin=None):
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _ = p.communicate(input=stdin)
retcode = p.wait()
if retcode:
raise subprocess.CalledProcessError(retcode, ' '.join(args), out)
return out
|
<commit_before><commit_msg>Add our own check_output2 wrapper
Signed-off-by: Chris Lamb <29e6d179a8d73471df7861382db6dd7e64138033@debian.org><commit_after>from __future__ import absolute_import
import subprocess
def check_output2(args, stdin=None):
p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _ = p.communicate(input=stdin)
retcode = p.wait()
if retcode:
raise subprocess.CalledProcessError(retcode, ' '.join(args), out)
return out
|
|
e262a10e9a0027e8126032551cae8b6c0816ff22
|
build/extra_gitignore.py
|
build/extra_gitignore.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
""" Adds extra patterns to the root .gitignore file.
Reads the contents of the filename given as the first argument and appends
them to the root .gitignore file. The new entires are intended to be additional
ignoring patterns, or negating patterns to override existing entries (man
gitignore for more details).
"""
import os
import sys
MODIFY_STRING = '# The following added by %s\n'
def main(argv):
if not argv[1]:
# Special case; do nothing.
return 0
modify_string = (MODIFY_STRING % argv[0])
gitignore_file = os.path.dirname(argv[0]) + '/../.gitignore'
lines = open(gitignore_file, 'r').readlines()
for i, line in enumerate(lines):
if line == modify_string:
lines = lines[:i]
break
lines.append(modify_string)
f = open(gitignore_file, 'w')
f.write(''.join(lines))
f.write(open(argv[1], 'r').read())
f.close()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add script for appending entries to .gitignore.
|
Add script for appending entries to .gitignore.
TBR=kjellander
Review URL: https://webrtc-codereview.appspot.com/1629004
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: b69cc15467456a070333ff00f886f27ca391b85b
|
Python
|
bsd-3-clause
|
sippet/webrtc,sippet/webrtc,sippet/webrtc,sippet/webrtc,sippet/webrtc,sippet/webrtc
|
Add script for appending entries to .gitignore.
TBR=kjellander
Review URL: https://webrtc-codereview.appspot.com/1629004
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: b69cc15467456a070333ff00f886f27ca391b85b
|
#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
""" Adds extra patterns to the root .gitignore file.
Reads the contents of the filename given as the first argument and appends
them to the root .gitignore file. The new entires are intended to be additional
ignoring patterns, or negating patterns to override existing entries (man
gitignore for more details).
"""
import os
import sys
MODIFY_STRING = '# The following added by %s\n'
def main(argv):
if not argv[1]:
# Special case; do nothing.
return 0
modify_string = (MODIFY_STRING % argv[0])
gitignore_file = os.path.dirname(argv[0]) + '/../.gitignore'
lines = open(gitignore_file, 'r').readlines()
for i, line in enumerate(lines):
if line == modify_string:
lines = lines[:i]
break
lines.append(modify_string)
f = open(gitignore_file, 'w')
f.write(''.join(lines))
f.write(open(argv[1], 'r').read())
f.close()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add script for appending entries to .gitignore.
TBR=kjellander
Review URL: https://webrtc-codereview.appspot.com/1629004
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: b69cc15467456a070333ff00f886f27ca391b85b<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
""" Adds extra patterns to the root .gitignore file.
Reads the contents of the filename given as the first argument and appends
them to the root .gitignore file. The new entires are intended to be additional
ignoring patterns, or negating patterns to override existing entries (man
gitignore for more details).
"""
import os
import sys
MODIFY_STRING = '# The following added by %s\n'
def main(argv):
if not argv[1]:
# Special case; do nothing.
return 0
modify_string = (MODIFY_STRING % argv[0])
gitignore_file = os.path.dirname(argv[0]) + '/../.gitignore'
lines = open(gitignore_file, 'r').readlines()
for i, line in enumerate(lines):
if line == modify_string:
lines = lines[:i]
break
lines.append(modify_string)
f = open(gitignore_file, 'w')
f.write(''.join(lines))
f.write(open(argv[1], 'r').read())
f.close()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add script for appending entries to .gitignore.
TBR=kjellander
Review URL: https://webrtc-codereview.appspot.com/1629004
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: b69cc15467456a070333ff00f886f27ca391b85b#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
""" Adds extra patterns to the root .gitignore file.
Reads the contents of the filename given as the first argument and appends
them to the root .gitignore file. The new entires are intended to be additional
ignoring patterns, or negating patterns to override existing entries (man
gitignore for more details).
"""
import os
import sys
MODIFY_STRING = '# The following added by %s\n'
def main(argv):
if not argv[1]:
# Special case; do nothing.
return 0
modify_string = (MODIFY_STRING % argv[0])
gitignore_file = os.path.dirname(argv[0]) + '/../.gitignore'
lines = open(gitignore_file, 'r').readlines()
for i, line in enumerate(lines):
if line == modify_string:
lines = lines[:i]
break
lines.append(modify_string)
f = open(gitignore_file, 'w')
f.write(''.join(lines))
f.write(open(argv[1], 'r').read())
f.close()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add script for appending entries to .gitignore.
TBR=kjellander
Review URL: https://webrtc-codereview.appspot.com/1629004
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: b69cc15467456a070333ff00f886f27ca391b85b<commit_after>#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
""" Adds extra patterns to the root .gitignore file.
Reads the contents of the filename given as the first argument and appends
them to the root .gitignore file. The new entires are intended to be additional
ignoring patterns, or negating patterns to override existing entries (man
gitignore for more details).
"""
import os
import sys
MODIFY_STRING = '# The following added by %s\n'
def main(argv):
if not argv[1]:
# Special case; do nothing.
return 0
modify_string = (MODIFY_STRING % argv[0])
gitignore_file = os.path.dirname(argv[0]) + '/../.gitignore'
lines = open(gitignore_file, 'r').readlines()
for i, line in enumerate(lines):
if line == modify_string:
lines = lines[:i]
break
lines.append(modify_string)
f = open(gitignore_file, 'w')
f.write(''.join(lines))
f.write(open(argv[1], 'r').read())
f.close()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
ea4a77d171e818dede62978ced6a4f1b1b5a2d51
|
mzalendo/core/kenya_import_scripts/import_constituency_county_map.py
|
mzalendo/core/kenya_import_scripts/import_constituency_county_map.py
|
# Takes Paul's Excel file of constituencies to counties and
# imports this into the db.
import sys
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'mzalendo.settings'
import csv
from django.template.defaultfilters import slugify
# Horrible boilerplate - there must be a better way :)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../..'
)
)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../../..'
)
)
from mzalendo.core import models
all_constituencies = set([x.slug for x in models.Place.objects.filter(kind__slug='constituency')])
csv_filename = sys.argv[1]
csv_file = open(csv_filename)
csv_reader = csv.reader(csv_file)
# First line is just headers
csv_reader.next()
county_name = None
county_slug = None
constituency_slug = None
for county_text, constituency_name, count in csv_reader:
if not constituency_name:
continue
if county_text:
county_name = county_text
county_slug = slugify(county_name) + '-county'
try:
county = models.Place.objects.get(
slug=county_slug, kind__slug='county')
except models.Place.DoesNotExist:
print "Can't find county %s" % county_slug
break
constituency_slug = slugify(constituency_name)
try:
constituency = models.Place.objects.get(
slug=constituency_slug, kind__slug='constituency')
except models.Place.DoesNotExist:
print "Can't find constituency %s" % constituency_slug
continue
all_constituencies.remove(constituency_slug)
constituency.parent_place = county
constituency.save()
print "Left over constituencies", all_constituencies
|
Add script to sort out counties as constituency parent places.
|
Add script to sort out counties as constituency parent places.
|
Python
|
agpl-3.0
|
mysociety/pombola,geoffkilpin/pombola,Hutspace/odekro,hzj123/56th,mysociety/pombola,hzj123/56th,ken-muturi/pombola,hzj123/56th,patricmutwiri/pombola,geoffkilpin/pombola,Hutspace/odekro,patricmutwiri/pombola,mysociety/pombola,patricmutwiri/pombola,ken-muturi/pombola,patricmutwiri/pombola,Hutspace/odekro,geoffkilpin/pombola,mysociety/pombola,Hutspace/odekro,geoffkilpin/pombola,Hutspace/odekro,hzj123/56th,geoffkilpin/pombola,ken-muturi/pombola,mysociety/pombola,ken-muturi/pombola,hzj123/56th,ken-muturi/pombola,hzj123/56th,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,mysociety/pombola,patricmutwiri/pombola
|
Add script to sort out counties as constituency parent places.
|
# Takes Paul's Excel file of constituencies to counties and
# imports this into the db.
import sys
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'mzalendo.settings'
import csv
from django.template.defaultfilters import slugify
# Horrible boilerplate - there must be a better way :)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../..'
)
)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../../..'
)
)
from mzalendo.core import models
all_constituencies = set([x.slug for x in models.Place.objects.filter(kind__slug='constituency')])
csv_filename = sys.argv[1]
csv_file = open(csv_filename)
csv_reader = csv.reader(csv_file)
# First line is just headers
csv_reader.next()
county_name = None
county_slug = None
constituency_slug = None
for county_text, constituency_name, count in csv_reader:
if not constituency_name:
continue
if county_text:
county_name = county_text
county_slug = slugify(county_name) + '-county'
try:
county = models.Place.objects.get(
slug=county_slug, kind__slug='county')
except models.Place.DoesNotExist:
print "Can't find county %s" % county_slug
break
constituency_slug = slugify(constituency_name)
try:
constituency = models.Place.objects.get(
slug=constituency_slug, kind__slug='constituency')
except models.Place.DoesNotExist:
print "Can't find constituency %s" % constituency_slug
continue
all_constituencies.remove(constituency_slug)
constituency.parent_place = county
constituency.save()
print "Left over constituencies", all_constituencies
|
<commit_before><commit_msg>Add script to sort out counties as constituency parent places.<commit_after>
|
# Takes Paul's Excel file of constituencies to counties and
# imports this into the db.
import sys
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'mzalendo.settings'
import csv
from django.template.defaultfilters import slugify
# Horrible boilerplate - there must be a better way :)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../..'
)
)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../../..'
)
)
from mzalendo.core import models
all_constituencies = set([x.slug for x in models.Place.objects.filter(kind__slug='constituency')])
csv_filename = sys.argv[1]
csv_file = open(csv_filename)
csv_reader = csv.reader(csv_file)
# First line is just headers
csv_reader.next()
county_name = None
county_slug = None
constituency_slug = None
for county_text, constituency_name, count in csv_reader:
if not constituency_name:
continue
if county_text:
county_name = county_text
county_slug = slugify(county_name) + '-county'
try:
county = models.Place.objects.get(
slug=county_slug, kind__slug='county')
except models.Place.DoesNotExist:
print "Can't find county %s" % county_slug
break
constituency_slug = slugify(constituency_name)
try:
constituency = models.Place.objects.get(
slug=constituency_slug, kind__slug='constituency')
except models.Place.DoesNotExist:
print "Can't find constituency %s" % constituency_slug
continue
all_constituencies.remove(constituency_slug)
constituency.parent_place = county
constituency.save()
print "Left over constituencies", all_constituencies
|
Add script to sort out counties as constituency parent places.# Takes Paul's Excel file of constituencies to counties and
# imports this into the db.
import sys
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'mzalendo.settings'
import csv
from django.template.defaultfilters import slugify
# Horrible boilerplate - there must be a better way :)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../..'
)
)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../../..'
)
)
from mzalendo.core import models
all_constituencies = set([x.slug for x in models.Place.objects.filter(kind__slug='constituency')])
csv_filename = sys.argv[1]
csv_file = open(csv_filename)
csv_reader = csv.reader(csv_file)
# First line is just headers
csv_reader.next()
county_name = None
county_slug = None
constituency_slug = None
for county_text, constituency_name, count in csv_reader:
if not constituency_name:
continue
if county_text:
county_name = county_text
county_slug = slugify(county_name) + '-county'
try:
county = models.Place.objects.get(
slug=county_slug, kind__slug='county')
except models.Place.DoesNotExist:
print "Can't find county %s" % county_slug
break
constituency_slug = slugify(constituency_name)
try:
constituency = models.Place.objects.get(
slug=constituency_slug, kind__slug='constituency')
except models.Place.DoesNotExist:
print "Can't find constituency %s" % constituency_slug
continue
all_constituencies.remove(constituency_slug)
constituency.parent_place = county
constituency.save()
print "Left over constituencies", all_constituencies
|
<commit_before><commit_msg>Add script to sort out counties as constituency parent places.<commit_after># Takes Paul's Excel file of constituencies to counties and
# imports this into the db.
import sys
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'mzalendo.settings'
import csv
from django.template.defaultfilters import slugify
# Horrible boilerplate - there must be a better way :)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../..'
)
)
sys.path.append(
os.path.abspath(
os.path.dirname(__file__) + '../../..'
)
)
from mzalendo.core import models
all_constituencies = set([x.slug for x in models.Place.objects.filter(kind__slug='constituency')])
csv_filename = sys.argv[1]
csv_file = open(csv_filename)
csv_reader = csv.reader(csv_file)
# First line is just headers
csv_reader.next()
county_name = None
county_slug = None
constituency_slug = None
for county_text, constituency_name, count in csv_reader:
if not constituency_name:
continue
if county_text:
county_name = county_text
county_slug = slugify(county_name) + '-county'
try:
county = models.Place.objects.get(
slug=county_slug, kind__slug='county')
except models.Place.DoesNotExist:
print "Can't find county %s" % county_slug
break
constituency_slug = slugify(constituency_name)
try:
constituency = models.Place.objects.get(
slug=constituency_slug, kind__slug='constituency')
except models.Place.DoesNotExist:
print "Can't find constituency %s" % constituency_slug
continue
all_constituencies.remove(constituency_slug)
constituency.parent_place = county
constituency.save()
print "Left over constituencies", all_constituencies
|
|
b7e2391e6627d7884be7def6af9f78a2184ec484
|
Snippets/cmap-format.py
|
Snippets/cmap-format.py
|
#! /usr/bin/env python
# Sample script to convert legacy cmap subtables to format-4
# subtables. Note that this is rarely what one needs. You
# probably need to just drop the legacy subtables if the font
# already has a format-4 subtable.
#
# Other times, you would need to convert a non-Unicode cmap
# legacy subtable to a Unicode one. In those cases, use the
# getEncoding() of subtable and use that encoding to map the
# characters to Unicode... TODO: Extend this script to do that.
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables._c_m_a_p import CmapSubtable
import sys
if len(sys.argv) != 3:
print("usage: cmap-format.py fontfile.ttf outfile.ttf")
sys.exit(1)
fontfile = sys.argv[1]
outfile = sys.argv[2]
font = TTFont(fontfile)
cmap = font['cmap']
outtables = []
for table in cmap.tables:
if table.format in [4, 12, 13, 14]:
outtables.append(table)
# Convert ot format4
newtable = CmapSubtable.newSubtable(4)
newtable.platformID = table.platformID
newtable.platEncID = table.platEncID
newtable.language = table.language
newtable.cmap = table.cmap
outtables.append(newtable)
cmap.tables = outtables
font.save(outfile)
|
Add script for cmap subtable format conversion
|
[Snippets] Add script for cmap subtable format conversion
Fixes https://github.com/behdad/fonttools/issues/340
|
Python
|
mit
|
fonttools/fonttools,googlefonts/fonttools
|
[Snippets] Add script for cmap subtable format conversion
Fixes https://github.com/behdad/fonttools/issues/340
|
#! /usr/bin/env python
# Sample script to convert legacy cmap subtables to format-4
# subtables. Note that this is rarely what one needs. You
# probably need to just drop the legacy subtables if the font
# already has a format-4 subtable.
#
# Other times, you would need to convert a non-Unicode cmap
# legacy subtable to a Unicode one. In those cases, use the
# getEncoding() of subtable and use that encoding to map the
# characters to Unicode... TODO: Extend this script to do that.
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables._c_m_a_p import CmapSubtable
import sys
if len(sys.argv) != 3:
print("usage: cmap-format.py fontfile.ttf outfile.ttf")
sys.exit(1)
fontfile = sys.argv[1]
outfile = sys.argv[2]
font = TTFont(fontfile)
cmap = font['cmap']
outtables = []
for table in cmap.tables:
if table.format in [4, 12, 13, 14]:
outtables.append(table)
# Convert ot format4
newtable = CmapSubtable.newSubtable(4)
newtable.platformID = table.platformID
newtable.platEncID = table.platEncID
newtable.language = table.language
newtable.cmap = table.cmap
outtables.append(newtable)
cmap.tables = outtables
font.save(outfile)
|
<commit_before><commit_msg>[Snippets] Add script for cmap subtable format conversion
Fixes https://github.com/behdad/fonttools/issues/340<commit_after>
|
#! /usr/bin/env python
# Sample script to convert legacy cmap subtables to format-4
# subtables. Note that this is rarely what one needs. You
# probably need to just drop the legacy subtables if the font
# already has a format-4 subtable.
#
# Other times, you would need to convert a non-Unicode cmap
# legacy subtable to a Unicode one. In those cases, use the
# getEncoding() of subtable and use that encoding to map the
# characters to Unicode... TODO: Extend this script to do that.
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables._c_m_a_p import CmapSubtable
import sys
if len(sys.argv) != 3:
print("usage: cmap-format.py fontfile.ttf outfile.ttf")
sys.exit(1)
fontfile = sys.argv[1]
outfile = sys.argv[2]
font = TTFont(fontfile)
cmap = font['cmap']
outtables = []
for table in cmap.tables:
if table.format in [4, 12, 13, 14]:
outtables.append(table)
# Convert ot format4
newtable = CmapSubtable.newSubtable(4)
newtable.platformID = table.platformID
newtable.platEncID = table.platEncID
newtable.language = table.language
newtable.cmap = table.cmap
outtables.append(newtable)
cmap.tables = outtables
font.save(outfile)
|
[Snippets] Add script for cmap subtable format conversion
Fixes https://github.com/behdad/fonttools/issues/340#! /usr/bin/env python
# Sample script to convert legacy cmap subtables to format-4
# subtables. Note that this is rarely what one needs. You
# probably need to just drop the legacy subtables if the font
# already has a format-4 subtable.
#
# Other times, you would need to convert a non-Unicode cmap
# legacy subtable to a Unicode one. In those cases, use the
# getEncoding() of subtable and use that encoding to map the
# characters to Unicode... TODO: Extend this script to do that.
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables._c_m_a_p import CmapSubtable
import sys
if len(sys.argv) != 3:
print("usage: cmap-format.py fontfile.ttf outfile.ttf")
sys.exit(1)
fontfile = sys.argv[1]
outfile = sys.argv[2]
font = TTFont(fontfile)
cmap = font['cmap']
outtables = []
for table in cmap.tables:
if table.format in [4, 12, 13, 14]:
outtables.append(table)
# Convert ot format4
newtable = CmapSubtable.newSubtable(4)
newtable.platformID = table.platformID
newtable.platEncID = table.platEncID
newtable.language = table.language
newtable.cmap = table.cmap
outtables.append(newtable)
cmap.tables = outtables
font.save(outfile)
|
<commit_before><commit_msg>[Snippets] Add script for cmap subtable format conversion
Fixes https://github.com/behdad/fonttools/issues/340<commit_after>#! /usr/bin/env python
# Sample script to convert legacy cmap subtables to format-4
# subtables. Note that this is rarely what one needs. You
# probably need to just drop the legacy subtables if the font
# already has a format-4 subtable.
#
# Other times, you would need to convert a non-Unicode cmap
# legacy subtable to a Unicode one. In those cases, use the
# getEncoding() of subtable and use that encoding to map the
# characters to Unicode... TODO: Extend this script to do that.
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables._c_m_a_p import CmapSubtable
import sys
if len(sys.argv) != 3:
print("usage: cmap-format.py fontfile.ttf outfile.ttf")
sys.exit(1)
fontfile = sys.argv[1]
outfile = sys.argv[2]
font = TTFont(fontfile)
cmap = font['cmap']
outtables = []
for table in cmap.tables:
if table.format in [4, 12, 13, 14]:
outtables.append(table)
# Convert ot format4
newtable = CmapSubtable.newSubtable(4)
newtable.platformID = table.platformID
newtable.platEncID = table.platEncID
newtable.language = table.language
newtable.cmap = table.cmap
outtables.append(newtable)
cmap.tables = outtables
font.save(outfile)
|
|
721565636b84a1a2bf7d2c89cca2b8206b6530a2
|
recipe-server/normandy/recipes/migrations/0046_reset_signatures.py
|
recipe-server/normandy/recipes/migrations/0046_reset_signatures.py
|
"""
Removes signatures, so they can be easily recreated during deployment.
This migration is intended to be used between "eras" of signatures. As
the serialization format of recipes changes, the signatures need to
also change. This could be handled automatically, but it is easier to
deploy if we just remove everything in a migration, and allow the
normal processes to regenerate the signatures.
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model('recipes', 'Recipe')
Action = apps.get_model('recipes', 'Action')
Signature = apps.get_model('recipes', 'Signature')
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for action in Action.objects.exclude(signature=None):
sig = action.signature
action.signature = None
action.save()
action.delete()
for sig in Signature.objects.all():
sig.delete()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0045_update_action_hashes'),
]
operations = [
# This function as both a forward and reverse migration
migrations.RunPython(remove_signatures, remove_signatures),
]
|
Add another signature reset migration.
|
recipe-server: Add another signature reset migration.
|
Python
|
mpl-2.0
|
mozilla/normandy,mozilla/normandy,mozilla/normandy,mozilla/normandy
|
recipe-server: Add another signature reset migration.
|
"""
Removes signatures, so they can be easily recreated during deployment.
This migration is intended to be used between "eras" of signatures. As
the serialization format of recipes changes, the signatures need to
also change. This could be handled automatically, but it is easier to
deploy if we just remove everything in a migration, and allow the
normal processes to regenerate the signatures.
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model('recipes', 'Recipe')
Action = apps.get_model('recipes', 'Action')
Signature = apps.get_model('recipes', 'Signature')
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for action in Action.objects.exclude(signature=None):
sig = action.signature
action.signature = None
action.save()
action.delete()
for sig in Signature.objects.all():
sig.delete()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0045_update_action_hashes'),
]
operations = [
# This function as both a forward and reverse migration
migrations.RunPython(remove_signatures, remove_signatures),
]
|
<commit_before><commit_msg>recipe-server: Add another signature reset migration.<commit_after>
|
"""
Removes signatures, so they can be easily recreated during deployment.
This migration is intended to be used between "eras" of signatures. As
the serialization format of recipes changes, the signatures need to
also change. This could be handled automatically, but it is easier to
deploy if we just remove everything in a migration, and allow the
normal processes to regenerate the signatures.
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model('recipes', 'Recipe')
Action = apps.get_model('recipes', 'Action')
Signature = apps.get_model('recipes', 'Signature')
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for action in Action.objects.exclude(signature=None):
sig = action.signature
action.signature = None
action.save()
action.delete()
for sig in Signature.objects.all():
sig.delete()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0045_update_action_hashes'),
]
operations = [
# This function as both a forward and reverse migration
migrations.RunPython(remove_signatures, remove_signatures),
]
|
recipe-server: Add another signature reset migration."""
Removes signatures, so they can be easily recreated during deployment.
This migration is intended to be used between "eras" of signatures. As
the serialization format of recipes changes, the signatures need to
also change. This could be handled automatically, but it is easier to
deploy if we just remove everything in a migration, and allow the
normal processes to regenerate the signatures.
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model('recipes', 'Recipe')
Action = apps.get_model('recipes', 'Action')
Signature = apps.get_model('recipes', 'Signature')
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for action in Action.objects.exclude(signature=None):
sig = action.signature
action.signature = None
action.save()
action.delete()
for sig in Signature.objects.all():
sig.delete()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0045_update_action_hashes'),
]
operations = [
# This function as both a forward and reverse migration
migrations.RunPython(remove_signatures, remove_signatures),
]
|
<commit_before><commit_msg>recipe-server: Add another signature reset migration.<commit_after>"""
Removes signatures, so they can be easily recreated during deployment.
This migration is intended to be used between "eras" of signatures. As
the serialization format of recipes changes, the signatures need to
also change. This could be handled automatically, but it is easier to
deploy if we just remove everything in a migration, and allow the
normal processes to regenerate the signatures.
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model('recipes', 'Recipe')
Action = apps.get_model('recipes', 'Action')
Signature = apps.get_model('recipes', 'Signature')
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for action in Action.objects.exclude(signature=None):
sig = action.signature
action.signature = None
action.save()
action.delete()
for sig in Signature.objects.all():
sig.delete()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0045_update_action_hashes'),
]
operations = [
# This function as both a forward and reverse migration
migrations.RunPython(remove_signatures, remove_signatures),
]
|
|
d98d4b41c2ecab5a61f975e1b23b8e06709d4d3f
|
registries/serializers.py
|
registries/serializers.py
|
from rest_framework import serializers
from registries.models import Organization
from gwells.models import ProvinceState
class DrillerListSerializer(serializers.ModelSerializer):
province_state = serializers.ReadOnlyField()
class Meta:
model = Organization
# Using all fields for now
fields = (
#'who_created',
#'when_created',
#'who_updated',
#'when_updated',
'name',
'street_address',
'city',
'province_state',
'postal_code',
'main_tel',
'fax_tel',
'website_url',
'certificate_authority',
)
|
Add simple driller list serializer
|
Add simple driller list serializer
|
Python
|
apache-2.0
|
rstens/gwells,bcgov/gwells,rstens/gwells,rstens/gwells,rstens/gwells,bcgov/gwells,bcgov/gwells,bcgov/gwells
|
Add simple driller list serializer
|
from rest_framework import serializers
from registries.models import Organization
from gwells.models import ProvinceState
class DrillerListSerializer(serializers.ModelSerializer):
province_state = serializers.ReadOnlyField()
class Meta:
model = Organization
# Using all fields for now
fields = (
#'who_created',
#'when_created',
#'who_updated',
#'when_updated',
'name',
'street_address',
'city',
'province_state',
'postal_code',
'main_tel',
'fax_tel',
'website_url',
'certificate_authority',
)
|
<commit_before><commit_msg>Add simple driller list serializer<commit_after>
|
from rest_framework import serializers
from registries.models import Organization
from gwells.models import ProvinceState
class DrillerListSerializer(serializers.ModelSerializer):
province_state = serializers.ReadOnlyField()
class Meta:
model = Organization
# Using all fields for now
fields = (
#'who_created',
#'when_created',
#'who_updated',
#'when_updated',
'name',
'street_address',
'city',
'province_state',
'postal_code',
'main_tel',
'fax_tel',
'website_url',
'certificate_authority',
)
|
Add simple driller list serializerfrom rest_framework import serializers
from registries.models import Organization
from gwells.models import ProvinceState
class DrillerListSerializer(serializers.ModelSerializer):
province_state = serializers.ReadOnlyField()
class Meta:
model = Organization
# Using all fields for now
fields = (
#'who_created',
#'when_created',
#'who_updated',
#'when_updated',
'name',
'street_address',
'city',
'province_state',
'postal_code',
'main_tel',
'fax_tel',
'website_url',
'certificate_authority',
)
|
<commit_before><commit_msg>Add simple driller list serializer<commit_after>from rest_framework import serializers
from registries.models import Organization
from gwells.models import ProvinceState
class DrillerListSerializer(serializers.ModelSerializer):
province_state = serializers.ReadOnlyField()
class Meta:
model = Organization
# Using all fields for now
fields = (
#'who_created',
#'when_created',
#'who_updated',
#'when_updated',
'name',
'street_address',
'city',
'province_state',
'postal_code',
'main_tel',
'fax_tel',
'website_url',
'certificate_authority',
)
|
|
452282391f356366d208bd408e5d9b7315b6d98d
|
polygraph/types/tests/test_input_object.py
|
polygraph/types/tests/test_input_object.py
|
from unittest import TestCase
from polygraph.exceptions import PolygraphSchemaError
from polygraph.types.input_object import (
InputObject,
InputValue,
validate_input_object_schema,
)
from polygraph.types.lazy_type import LazyType
from polygraph.types.scalar import String
from polygraph.types.tests.helper import Person
class SampleInput(InputObject):
name = InputValue(String, name="name")
age = InputValue(LazyType("Int", module_name="polygraph.types.scalar"), name="age")
class NoInput(InputObject):
pass
class DuplicateInputName(InputObject):
name = InputValue(String, name="name")
second_name = InputValue(String, name="name")
class NonInputType(InputObject):
person = InputValue(Person)
class InputObjectTest(TestCase):
def test_validate_good_schema(self):
self.assertIsNone(validate_input_object_schema(SampleInput))
def test_validate_bad_schema(self):
for bad_input_object in [NoInput, DuplicateInputName, NonInputType]:
with self.subTest(input_object=bad_input_object):
with self.assertRaises(PolygraphSchemaError):
validate_input_object_schema(bad_input_object)
|
Add unit tests around InputObject
|
Add unit tests around InputObject
|
Python
|
mit
|
polygraph-python/polygraph
|
Add unit tests around InputObject
|
from unittest import TestCase
from polygraph.exceptions import PolygraphSchemaError
from polygraph.types.input_object import (
InputObject,
InputValue,
validate_input_object_schema,
)
from polygraph.types.lazy_type import LazyType
from polygraph.types.scalar import String
from polygraph.types.tests.helper import Person
class SampleInput(InputObject):
name = InputValue(String, name="name")
age = InputValue(LazyType("Int", module_name="polygraph.types.scalar"), name="age")
class NoInput(InputObject):
pass
class DuplicateInputName(InputObject):
name = InputValue(String, name="name")
second_name = InputValue(String, name="name")
class NonInputType(InputObject):
person = InputValue(Person)
class InputObjectTest(TestCase):
def test_validate_good_schema(self):
self.assertIsNone(validate_input_object_schema(SampleInput))
def test_validate_bad_schema(self):
for bad_input_object in [NoInput, DuplicateInputName, NonInputType]:
with self.subTest(input_object=bad_input_object):
with self.assertRaises(PolygraphSchemaError):
validate_input_object_schema(bad_input_object)
|
<commit_before><commit_msg>Add unit tests around InputObject<commit_after>
|
from unittest import TestCase
from polygraph.exceptions import PolygraphSchemaError
from polygraph.types.input_object import (
InputObject,
InputValue,
validate_input_object_schema,
)
from polygraph.types.lazy_type import LazyType
from polygraph.types.scalar import String
from polygraph.types.tests.helper import Person
class SampleInput(InputObject):
name = InputValue(String, name="name")
age = InputValue(LazyType("Int", module_name="polygraph.types.scalar"), name="age")
class NoInput(InputObject):
pass
class DuplicateInputName(InputObject):
name = InputValue(String, name="name")
second_name = InputValue(String, name="name")
class NonInputType(InputObject):
person = InputValue(Person)
class InputObjectTest(TestCase):
def test_validate_good_schema(self):
self.assertIsNone(validate_input_object_schema(SampleInput))
def test_validate_bad_schema(self):
for bad_input_object in [NoInput, DuplicateInputName, NonInputType]:
with self.subTest(input_object=bad_input_object):
with self.assertRaises(PolygraphSchemaError):
validate_input_object_schema(bad_input_object)
|
Add unit tests around InputObjectfrom unittest import TestCase
from polygraph.exceptions import PolygraphSchemaError
from polygraph.types.input_object import (
InputObject,
InputValue,
validate_input_object_schema,
)
from polygraph.types.lazy_type import LazyType
from polygraph.types.scalar import String
from polygraph.types.tests.helper import Person
class SampleInput(InputObject):
name = InputValue(String, name="name")
age = InputValue(LazyType("Int", module_name="polygraph.types.scalar"), name="age")
class NoInput(InputObject):
pass
class DuplicateInputName(InputObject):
name = InputValue(String, name="name")
second_name = InputValue(String, name="name")
class NonInputType(InputObject):
person = InputValue(Person)
class InputObjectTest(TestCase):
def test_validate_good_schema(self):
self.assertIsNone(validate_input_object_schema(SampleInput))
def test_validate_bad_schema(self):
for bad_input_object in [NoInput, DuplicateInputName, NonInputType]:
with self.subTest(input_object=bad_input_object):
with self.assertRaises(PolygraphSchemaError):
validate_input_object_schema(bad_input_object)
|
<commit_before><commit_msg>Add unit tests around InputObject<commit_after>from unittest import TestCase
from polygraph.exceptions import PolygraphSchemaError
from polygraph.types.input_object import (
InputObject,
InputValue,
validate_input_object_schema,
)
from polygraph.types.lazy_type import LazyType
from polygraph.types.scalar import String
from polygraph.types.tests.helper import Person
class SampleInput(InputObject):
name = InputValue(String, name="name")
age = InputValue(LazyType("Int", module_name="polygraph.types.scalar"), name="age")
class NoInput(InputObject):
pass
class DuplicateInputName(InputObject):
name = InputValue(String, name="name")
second_name = InputValue(String, name="name")
class NonInputType(InputObject):
person = InputValue(Person)
class InputObjectTest(TestCase):
def test_validate_good_schema(self):
self.assertIsNone(validate_input_object_schema(SampleInput))
def test_validate_bad_schema(self):
for bad_input_object in [NoInput, DuplicateInputName, NonInputType]:
with self.subTest(input_object=bad_input_object):
with self.assertRaises(PolygraphSchemaError):
validate_input_object_schema(bad_input_object)
|
|
10ddce342da23c3702c1c0def4534d37cf6769b7
|
tests/test_threading.py
|
tests/test_threading.py
|
from unittest import TestCase
from pydatajson.threading_helper import apply_threading
class ThreadingTests(TestCase):
def test_threading(self):
elements = [1, 2, 3, 4]
def function(x):
return x ** 2
result = apply_threading(elements, function, 3)
self.assertEqual(result, [1, 4, 9, 16])
|
Test case que pase por threading
|
Test case que pase por threading
|
Python
|
mit
|
datosgobar/pydatajson,datosgobar/pydatajson
|
Test case que pase por threading
|
from unittest import TestCase
from pydatajson.threading_helper import apply_threading
class ThreadingTests(TestCase):
def test_threading(self):
elements = [1, 2, 3, 4]
def function(x):
return x ** 2
result = apply_threading(elements, function, 3)
self.assertEqual(result, [1, 4, 9, 16])
|
<commit_before><commit_msg>Test case que pase por threading<commit_after>
|
from unittest import TestCase
from pydatajson.threading_helper import apply_threading
class ThreadingTests(TestCase):
def test_threading(self):
elements = [1, 2, 3, 4]
def function(x):
return x ** 2
result = apply_threading(elements, function, 3)
self.assertEqual(result, [1, 4, 9, 16])
|
Test case que pase por threadingfrom unittest import TestCase
from pydatajson.threading_helper import apply_threading
class ThreadingTests(TestCase):
def test_threading(self):
elements = [1, 2, 3, 4]
def function(x):
return x ** 2
result = apply_threading(elements, function, 3)
self.assertEqual(result, [1, 4, 9, 16])
|
<commit_before><commit_msg>Test case que pase por threading<commit_after>from unittest import TestCase
from pydatajson.threading_helper import apply_threading
class ThreadingTests(TestCase):
def test_threading(self):
elements = [1, 2, 3, 4]
def function(x):
return x ** 2
result = apply_threading(elements, function, 3)
self.assertEqual(result, [1, 4, 9, 16])
|
|
4871896765889576eb0ef2c97d94810f50ffe9d4
|
datasciencebox/tests/salt/test_mesos.py
|
datasciencebox/tests/salt/test_mesos.py
|
import pytest
import requests
import utils
def setup_module(module):
utils.invoke('install', 'mesos')
@utils.vagranttest
def test_salt_formulas():
project = utils.get_test_project()
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.zookeeper'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.mesos.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
@utils.vagranttest
def test_namenode_ui():
'''
Note 1: Namenode webpage uses a lot of javascript requests alone is not good enough
Note 2: Mesos UI does not bing to 0.0.0.0 so need explicit vagrant IP
'''
project = utils.get_test_project()
nn_ip = project.cluster.master.ip
nn_ip = '10.10.10.100'
r = requests.get('http://%s:5050/' % nn_ip)
assert r.status_code == 200
print r.text
|
Add basic tests for mesos
|
Add basic tests for mesos
|
Python
|
apache-2.0
|
danielfrg/datasciencebox,danielfrg/datasciencebox,danielfrg/datasciencebox,danielfrg/datasciencebox
|
Add basic tests for mesos
|
import pytest
import requests
import utils
def setup_module(module):
utils.invoke('install', 'mesos')
@utils.vagranttest
def test_salt_formulas():
project = utils.get_test_project()
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.zookeeper'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.mesos.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
@utils.vagranttest
def test_namenode_ui():
'''
Note 1: Namenode webpage uses a lot of javascript requests alone is not good enough
Note 2: Mesos UI does not bing to 0.0.0.0 so need explicit vagrant IP
'''
project = utils.get_test_project()
nn_ip = project.cluster.master.ip
nn_ip = '10.10.10.100'
r = requests.get('http://%s:5050/' % nn_ip)
assert r.status_code == 200
print r.text
|
<commit_before><commit_msg>Add basic tests for mesos<commit_after>
|
import pytest
import requests
import utils
def setup_module(module):
utils.invoke('install', 'mesos')
@utils.vagranttest
def test_salt_formulas():
project = utils.get_test_project()
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.zookeeper'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.mesos.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
@utils.vagranttest
def test_namenode_ui():
'''
Note 1: Namenode webpage uses a lot of javascript requests alone is not good enough
Note 2: Mesos UI does not bing to 0.0.0.0 so need explicit vagrant IP
'''
project = utils.get_test_project()
nn_ip = project.cluster.master.ip
nn_ip = '10.10.10.100'
r = requests.get('http://%s:5050/' % nn_ip)
assert r.status_code == 200
print r.text
|
Add basic tests for mesosimport pytest
import requests
import utils
def setup_module(module):
utils.invoke('install', 'mesos')
@utils.vagranttest
def test_salt_formulas():
project = utils.get_test_project()
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.zookeeper'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.mesos.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
@utils.vagranttest
def test_namenode_ui():
'''
Note 1: Namenode webpage uses a lot of javascript requests alone is not good enough
Note 2: Mesos UI does not bing to 0.0.0.0 so need explicit vagrant IP
'''
project = utils.get_test_project()
nn_ip = project.cluster.master.ip
nn_ip = '10.10.10.100'
r = requests.get('http://%s:5050/' % nn_ip)
assert r.status_code == 200
print r.text
|
<commit_before><commit_msg>Add basic tests for mesos<commit_after>import pytest
import requests
import utils
def setup_module(module):
utils.invoke('install', 'mesos')
@utils.vagranttest
def test_salt_formulas():
project = utils.get_test_project()
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.zookeeper'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.mesos.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
@utils.vagranttest
def test_namenode_ui():
'''
Note 1: Namenode webpage uses a lot of javascript requests alone is not good enough
Note 2: Mesos UI does not bing to 0.0.0.0 so need explicit vagrant IP
'''
project = utils.get_test_project()
nn_ip = project.cluster.master.ip
nn_ip = '10.10.10.100'
r = requests.get('http://%s:5050/' % nn_ip)
assert r.status_code == 200
print r.text
|
|
a6a701778d615f57be78db494c6adfed10d55c9f
|
tools/dartium/download_multivm.py
|
tools/dartium/download_multivm.py
|
#!/usr/bin/python
# Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Download archived multivm or dartium builds.
Usage: download_multivm.py revision target_directory
"""
import imp
import os
import platform
import shutil
import subprocess
import sys
# We are in [checkout dir]/src/dart/tools/dartium in a dartium/multivm checkout
TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SRC_DIR = os.path.dirname(os.path.dirname(TOOLS_DIR))
GS_BUCKET = 'gs://dartium-archive'
if platform.system() == 'Windows':
GSUTIL = 'e:\\b\\build\\scripts\\slave\\gsutil.bat'
else:
GSUTIL = '/b/build/scripts/slave/gsutil'
if not os.path.exists(GSUTIL):
GSUTIL = 'gsutil'
def ExecuteCommand(cmd):
print 'Executing: ' + ' '.join(cmd)
subprocess.check_output(cmd)
def main():
revision = sys.argv[1]
target_dir = sys.argv[2]
archive_dir = (os.environ['BUILDBOT_BUILDERNAME']
.replace('linux', 'lucid64')
.replace('multivm', 'multivm-dartium'))
utils = imp.load_source('utils', os.path.join(TOOLS_DIR, 'utils.py'))
with utils.TempDir() as temp_dir:
archive_file = archive_dir + '-' + revision + '.zip'
gs_source = '/'.join([GS_BUCKET, archive_dir, archive_file])
zip_file = os.path.join(temp_dir, archive_file)
ExecuteCommand([GSUTIL, 'cp', gs_source, zip_file])
unzip_dir = zip_file.replace('.zip', '')
if platform.system() == 'Windows':
executable = os.path.join(SRC_DIR, 'third_party', 'lzma_sdk',
'Executable', '7za.exe')
ExecuteCommand([executable, 'x', '-aoa', '-o' + temp_dir, zip_file])
else:
ExecuteCommand(['unzip', zip_file, '-d', temp_dir])
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
shutil.move(unzip_dir, target_dir)
if __name__ == '__main__':
sys.exit(main())
|
Add multivm archive download script for buildbot use.
|
Add multivm archive download script for buildbot use.
BUG=
R=ricow@google.com
Review URL: https://codereview.chromium.org//291153010
git-svn-id: c93d8a2297af3b929165606efe145742a534bc71@36513 260f80e4-7a28-3924-810f-c04153c831b5
|
Python
|
bsd-3-clause
|
dartino/dart-sdk,dart-lang/sdk,dartino/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk,dart-lang/sdk,dart-lang/sdk,dartino/dart-sdk,dart-lang/sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dartino/dart-sdk,dartino/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk
|
Add multivm archive download script for buildbot use.
BUG=
R=ricow@google.com
Review URL: https://codereview.chromium.org//291153010
git-svn-id: c93d8a2297af3b929165606efe145742a534bc71@36513 260f80e4-7a28-3924-810f-c04153c831b5
|
#!/usr/bin/python
# Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Download archived multivm or dartium builds.
Usage: download_multivm.py revision target_directory
"""
import imp
import os
import platform
import shutil
import subprocess
import sys
# We are in [checkout dir]/src/dart/tools/dartium in a dartium/multivm checkout
TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SRC_DIR = os.path.dirname(os.path.dirname(TOOLS_DIR))
GS_BUCKET = 'gs://dartium-archive'
if platform.system() == 'Windows':
GSUTIL = 'e:\\b\\build\\scripts\\slave\\gsutil.bat'
else:
GSUTIL = '/b/build/scripts/slave/gsutil'
if not os.path.exists(GSUTIL):
GSUTIL = 'gsutil'
def ExecuteCommand(cmd):
print 'Executing: ' + ' '.join(cmd)
subprocess.check_output(cmd)
def main():
revision = sys.argv[1]
target_dir = sys.argv[2]
archive_dir = (os.environ['BUILDBOT_BUILDERNAME']
.replace('linux', 'lucid64')
.replace('multivm', 'multivm-dartium'))
utils = imp.load_source('utils', os.path.join(TOOLS_DIR, 'utils.py'))
with utils.TempDir() as temp_dir:
archive_file = archive_dir + '-' + revision + '.zip'
gs_source = '/'.join([GS_BUCKET, archive_dir, archive_file])
zip_file = os.path.join(temp_dir, archive_file)
ExecuteCommand([GSUTIL, 'cp', gs_source, zip_file])
unzip_dir = zip_file.replace('.zip', '')
if platform.system() == 'Windows':
executable = os.path.join(SRC_DIR, 'third_party', 'lzma_sdk',
'Executable', '7za.exe')
ExecuteCommand([executable, 'x', '-aoa', '-o' + temp_dir, zip_file])
else:
ExecuteCommand(['unzip', zip_file, '-d', temp_dir])
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
shutil.move(unzip_dir, target_dir)
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add multivm archive download script for buildbot use.
BUG=
R=ricow@google.com
Review URL: https://codereview.chromium.org//291153010
git-svn-id: c93d8a2297af3b929165606efe145742a534bc71@36513 260f80e4-7a28-3924-810f-c04153c831b5<commit_after>
|
#!/usr/bin/python
# Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Download archived multivm or dartium builds.
Usage: download_multivm.py revision target_directory
"""
import imp
import os
import platform
import shutil
import subprocess
import sys
# We are in [checkout dir]/src/dart/tools/dartium in a dartium/multivm checkout
TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SRC_DIR = os.path.dirname(os.path.dirname(TOOLS_DIR))
GS_BUCKET = 'gs://dartium-archive'
if platform.system() == 'Windows':
GSUTIL = 'e:\\b\\build\\scripts\\slave\\gsutil.bat'
else:
GSUTIL = '/b/build/scripts/slave/gsutil'
if not os.path.exists(GSUTIL):
GSUTIL = 'gsutil'
def ExecuteCommand(cmd):
print 'Executing: ' + ' '.join(cmd)
subprocess.check_output(cmd)
def main():
revision = sys.argv[1]
target_dir = sys.argv[2]
archive_dir = (os.environ['BUILDBOT_BUILDERNAME']
.replace('linux', 'lucid64')
.replace('multivm', 'multivm-dartium'))
utils = imp.load_source('utils', os.path.join(TOOLS_DIR, 'utils.py'))
with utils.TempDir() as temp_dir:
archive_file = archive_dir + '-' + revision + '.zip'
gs_source = '/'.join([GS_BUCKET, archive_dir, archive_file])
zip_file = os.path.join(temp_dir, archive_file)
ExecuteCommand([GSUTIL, 'cp', gs_source, zip_file])
unzip_dir = zip_file.replace('.zip', '')
if platform.system() == 'Windows':
executable = os.path.join(SRC_DIR, 'third_party', 'lzma_sdk',
'Executable', '7za.exe')
ExecuteCommand([executable, 'x', '-aoa', '-o' + temp_dir, zip_file])
else:
ExecuteCommand(['unzip', zip_file, '-d', temp_dir])
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
shutil.move(unzip_dir, target_dir)
if __name__ == '__main__':
sys.exit(main())
|
Add multivm archive download script for buildbot use.
BUG=
R=ricow@google.com
Review URL: https://codereview.chromium.org//291153010
git-svn-id: c93d8a2297af3b929165606efe145742a534bc71@36513 260f80e4-7a28-3924-810f-c04153c831b5#!/usr/bin/python
# Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Download archived multivm or dartium builds.
Usage: download_multivm.py revision target_directory
"""
import imp
import os
import platform
import shutil
import subprocess
import sys
# We are in [checkout dir]/src/dart/tools/dartium in a dartium/multivm checkout
TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SRC_DIR = os.path.dirname(os.path.dirname(TOOLS_DIR))
GS_BUCKET = 'gs://dartium-archive'
if platform.system() == 'Windows':
GSUTIL = 'e:\\b\\build\\scripts\\slave\\gsutil.bat'
else:
GSUTIL = '/b/build/scripts/slave/gsutil'
if not os.path.exists(GSUTIL):
GSUTIL = 'gsutil'
def ExecuteCommand(cmd):
print 'Executing: ' + ' '.join(cmd)
subprocess.check_output(cmd)
def main():
revision = sys.argv[1]
target_dir = sys.argv[2]
archive_dir = (os.environ['BUILDBOT_BUILDERNAME']
.replace('linux', 'lucid64')
.replace('multivm', 'multivm-dartium'))
utils = imp.load_source('utils', os.path.join(TOOLS_DIR, 'utils.py'))
with utils.TempDir() as temp_dir:
archive_file = archive_dir + '-' + revision + '.zip'
gs_source = '/'.join([GS_BUCKET, archive_dir, archive_file])
zip_file = os.path.join(temp_dir, archive_file)
ExecuteCommand([GSUTIL, 'cp', gs_source, zip_file])
unzip_dir = zip_file.replace('.zip', '')
if platform.system() == 'Windows':
executable = os.path.join(SRC_DIR, 'third_party', 'lzma_sdk',
'Executable', '7za.exe')
ExecuteCommand([executable, 'x', '-aoa', '-o' + temp_dir, zip_file])
else:
ExecuteCommand(['unzip', zip_file, '-d', temp_dir])
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
shutil.move(unzip_dir, target_dir)
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add multivm archive download script for buildbot use.
BUG=
R=ricow@google.com
Review URL: https://codereview.chromium.org//291153010
git-svn-id: c93d8a2297af3b929165606efe145742a534bc71@36513 260f80e4-7a28-3924-810f-c04153c831b5<commit_after>#!/usr/bin/python
# Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Download archived multivm or dartium builds.
Usage: download_multivm.py revision target_directory
"""
import imp
import os
import platform
import shutil
import subprocess
import sys
# We are in [checkout dir]/src/dart/tools/dartium in a dartium/multivm checkout
TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SRC_DIR = os.path.dirname(os.path.dirname(TOOLS_DIR))
GS_BUCKET = 'gs://dartium-archive'
if platform.system() == 'Windows':
GSUTIL = 'e:\\b\\build\\scripts\\slave\\gsutil.bat'
else:
GSUTIL = '/b/build/scripts/slave/gsutil'
if not os.path.exists(GSUTIL):
GSUTIL = 'gsutil'
def ExecuteCommand(cmd):
print 'Executing: ' + ' '.join(cmd)
subprocess.check_output(cmd)
def main():
revision = sys.argv[1]
target_dir = sys.argv[2]
archive_dir = (os.environ['BUILDBOT_BUILDERNAME']
.replace('linux', 'lucid64')
.replace('multivm', 'multivm-dartium'))
utils = imp.load_source('utils', os.path.join(TOOLS_DIR, 'utils.py'))
with utils.TempDir() as temp_dir:
archive_file = archive_dir + '-' + revision + '.zip'
gs_source = '/'.join([GS_BUCKET, archive_dir, archive_file])
zip_file = os.path.join(temp_dir, archive_file)
ExecuteCommand([GSUTIL, 'cp', gs_source, zip_file])
unzip_dir = zip_file.replace('.zip', '')
if platform.system() == 'Windows':
executable = os.path.join(SRC_DIR, 'third_party', 'lzma_sdk',
'Executable', '7za.exe')
ExecuteCommand([executable, 'x', '-aoa', '-o' + temp_dir, zip_file])
else:
ExecuteCommand(['unzip', zip_file, '-d', temp_dir])
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
shutil.move(unzip_dir, target_dir)
if __name__ == '__main__':
sys.exit(main())
|
|
8b5163b3048f73af86b736f2285408d39698923f
|
create_output_folders.py
|
create_output_folders.py
|
import logging
import os
from settings import CONVERSIONS, LOGGING_FORMAT, OUTPUT_PATH
logger = logging.getLogger(__name__)
def main():
"""
Create the output folder for each of the conversion types.
"""
for xsl_file_name, output_folder, checker in CONVERSIONS:
# Get the conversion output folder.
output_path = os.path.join(OUTPUT_PATH, output_folder)
if os.path.exists(output_path):
logger.info('%s already exists.', output_path)
else:
os.makedirs(output_path)
logger.info('Created %s.', output_path)
# Get the conversion error folder.
error_path = os.path.join(OUTPUT_PATH, output_folder + '_error')
if os.path.exists(error_path):
logger.info('%s already exists.', error_path)
else:
os.makedirs(error_path)
logger.info('Created %s.', error_path)
if '__main__' == __name__:
logging.basicConfig(format=LOGGING_FORMAT, level=logging.DEBUG)
logger.debug('Folder creation started.')
main()
logger.debug('Folder creation finished.')
|
Add output folder creation script
|
Add output folder creation script
|
Python
|
mit
|
AustralianAntarcticDataCentre/metadata_xml_convert,AustralianAntarcticDataCentre/metadata_xml_convert
|
Add output folder creation script
|
import logging
import os
from settings import CONVERSIONS, LOGGING_FORMAT, OUTPUT_PATH
logger = logging.getLogger(__name__)
def main():
"""
Create the output folder for each of the conversion types.
"""
for xsl_file_name, output_folder, checker in CONVERSIONS:
# Get the conversion output folder.
output_path = os.path.join(OUTPUT_PATH, output_folder)
if os.path.exists(output_path):
logger.info('%s already exists.', output_path)
else:
os.makedirs(output_path)
logger.info('Created %s.', output_path)
# Get the conversion error folder.
error_path = os.path.join(OUTPUT_PATH, output_folder + '_error')
if os.path.exists(error_path):
logger.info('%s already exists.', error_path)
else:
os.makedirs(error_path)
logger.info('Created %s.', error_path)
if '__main__' == __name__:
logging.basicConfig(format=LOGGING_FORMAT, level=logging.DEBUG)
logger.debug('Folder creation started.')
main()
logger.debug('Folder creation finished.')
|
<commit_before><commit_msg>Add output folder creation script<commit_after>
|
import logging
import os
from settings import CONVERSIONS, LOGGING_FORMAT, OUTPUT_PATH
logger = logging.getLogger(__name__)
def main():
"""
Create the output folder for each of the conversion types.
"""
for xsl_file_name, output_folder, checker in CONVERSIONS:
# Get the conversion output folder.
output_path = os.path.join(OUTPUT_PATH, output_folder)
if os.path.exists(output_path):
logger.info('%s already exists.', output_path)
else:
os.makedirs(output_path)
logger.info('Created %s.', output_path)
# Get the conversion error folder.
error_path = os.path.join(OUTPUT_PATH, output_folder + '_error')
if os.path.exists(error_path):
logger.info('%s already exists.', error_path)
else:
os.makedirs(error_path)
logger.info('Created %s.', error_path)
if '__main__' == __name__:
logging.basicConfig(format=LOGGING_FORMAT, level=logging.DEBUG)
logger.debug('Folder creation started.')
main()
logger.debug('Folder creation finished.')
|
Add output folder creation scriptimport logging
import os
from settings import CONVERSIONS, LOGGING_FORMAT, OUTPUT_PATH
logger = logging.getLogger(__name__)
def main():
"""
Create the output folder for each of the conversion types.
"""
for xsl_file_name, output_folder, checker in CONVERSIONS:
# Get the conversion output folder.
output_path = os.path.join(OUTPUT_PATH, output_folder)
if os.path.exists(output_path):
logger.info('%s already exists.', output_path)
else:
os.makedirs(output_path)
logger.info('Created %s.', output_path)
# Get the conversion error folder.
error_path = os.path.join(OUTPUT_PATH, output_folder + '_error')
if os.path.exists(error_path):
logger.info('%s already exists.', error_path)
else:
os.makedirs(error_path)
logger.info('Created %s.', error_path)
if '__main__' == __name__:
logging.basicConfig(format=LOGGING_FORMAT, level=logging.DEBUG)
logger.debug('Folder creation started.')
main()
logger.debug('Folder creation finished.')
|
<commit_before><commit_msg>Add output folder creation script<commit_after>import logging
import os
from settings import CONVERSIONS, LOGGING_FORMAT, OUTPUT_PATH
logger = logging.getLogger(__name__)
def main():
"""
Create the output folder for each of the conversion types.
"""
for xsl_file_name, output_folder, checker in CONVERSIONS:
# Get the conversion output folder.
output_path = os.path.join(OUTPUT_PATH, output_folder)
if os.path.exists(output_path):
logger.info('%s already exists.', output_path)
else:
os.makedirs(output_path)
logger.info('Created %s.', output_path)
# Get the conversion error folder.
error_path = os.path.join(OUTPUT_PATH, output_folder + '_error')
if os.path.exists(error_path):
logger.info('%s already exists.', error_path)
else:
os.makedirs(error_path)
logger.info('Created %s.', error_path)
if '__main__' == __name__:
logging.basicConfig(format=LOGGING_FORMAT, level=logging.DEBUG)
logger.debug('Folder creation started.')
main()
logger.debug('Folder creation finished.')
|
|
cc201158ebaa2d3e6fc75bc3e9a56ef10ba5a28a
|
test/time_relight.py
|
test/time_relight.py
|
import mclevel
from timeit import timeit
#import logging
#logging.basicConfig(level=logging.INFO)
path = "testfiles\\AnvilWorld"
world = mclevel.fromFile(path)
print "Relight: %d chunks in %.02f seconds" % (world.chunkCount, timeit(lambda: world.generateLights(world.allChunks), number=1))
|
Add test to measure time taken for level.generateLights
|
Debug: Add test to measure time taken for level.generateLights
|
Python
|
isc
|
mcedit/pymclevel,mcedit/pymclevel,ahh2131/mchisel,ahh2131/mchisel,arruda/pymclevel,arruda/pymclevel
|
Debug: Add test to measure time taken for level.generateLights
|
import mclevel
from timeit import timeit
#import logging
#logging.basicConfig(level=logging.INFO)
path = "testfiles\\AnvilWorld"
world = mclevel.fromFile(path)
print "Relight: %d chunks in %.02f seconds" % (world.chunkCount, timeit(lambda: world.generateLights(world.allChunks), number=1))
|
<commit_before><commit_msg>Debug: Add test to measure time taken for level.generateLights<commit_after>
|
import mclevel
from timeit import timeit
#import logging
#logging.basicConfig(level=logging.INFO)
path = "testfiles\\AnvilWorld"
world = mclevel.fromFile(path)
print "Relight: %d chunks in %.02f seconds" % (world.chunkCount, timeit(lambda: world.generateLights(world.allChunks), number=1))
|
Debug: Add test to measure time taken for level.generateLightsimport mclevel
from timeit import timeit
#import logging
#logging.basicConfig(level=logging.INFO)
path = "testfiles\\AnvilWorld"
world = mclevel.fromFile(path)
print "Relight: %d chunks in %.02f seconds" % (world.chunkCount, timeit(lambda: world.generateLights(world.allChunks), number=1))
|
<commit_before><commit_msg>Debug: Add test to measure time taken for level.generateLights<commit_after>import mclevel
from timeit import timeit
#import logging
#logging.basicConfig(level=logging.INFO)
path = "testfiles\\AnvilWorld"
world = mclevel.fromFile(path)
print "Relight: %d chunks in %.02f seconds" % (world.chunkCount, timeit(lambda: world.generateLights(world.allChunks), number=1))
|
|
e6168d3c73c6de591d2f7646c71cde27f66578ac
|
a3/visualize.py
|
a3/visualize.py
|
import seaborn as sns
from .utils import get_path
class Visualizer(object):
"""
Visualize training and validation loss
"""
@classmethod
def visualize_training(cls, tr, savefig=None, show=False):
sns.plt.plot(tr.data.Epoch.tolist(), tr.data["Training Loss"].tolist(), label="Training Loss")
sns.plt.plot(tr.data.Epoch.tolist(), tr.data["Validation Loss"].tolist(), label="Validation Loss")
sns.plt.xlabel("Epochs")
sns.plt.ylabel("Loss")
sns.plt.legend(loc="best")
if savefig:
sns.plt.savefig(get_path(savefig))
if show:
sns.plt.show()
|
Add barebones visualization of loss
|
Add barebones visualization of loss
|
Python
|
apache-2.0
|
arizona-phonological-imaging-lab/autotres,arizona-phonological-imaging-lab/autotres
|
Add barebones visualization of loss
|
import seaborn as sns
from .utils import get_path
class Visualizer(object):
"""
Visualize training and validation loss
"""
@classmethod
def visualize_training(cls, tr, savefig=None, show=False):
sns.plt.plot(tr.data.Epoch.tolist(), tr.data["Training Loss"].tolist(), label="Training Loss")
sns.plt.plot(tr.data.Epoch.tolist(), tr.data["Validation Loss"].tolist(), label="Validation Loss")
sns.plt.xlabel("Epochs")
sns.plt.ylabel("Loss")
sns.plt.legend(loc="best")
if savefig:
sns.plt.savefig(get_path(savefig))
if show:
sns.plt.show()
|
<commit_before><commit_msg>Add barebones visualization of loss<commit_after>
|
import seaborn as sns
from .utils import get_path
class Visualizer(object):
"""
Visualize training and validation loss
"""
@classmethod
def visualize_training(cls, tr, savefig=None, show=False):
sns.plt.plot(tr.data.Epoch.tolist(), tr.data["Training Loss"].tolist(), label="Training Loss")
sns.plt.plot(tr.data.Epoch.tolist(), tr.data["Validation Loss"].tolist(), label="Validation Loss")
sns.plt.xlabel("Epochs")
sns.plt.ylabel("Loss")
sns.plt.legend(loc="best")
if savefig:
sns.plt.savefig(get_path(savefig))
if show:
sns.plt.show()
|
Add barebones visualization of lossimport seaborn as sns
from .utils import get_path
class Visualizer(object):
"""
Visualize training and validation loss
"""
@classmethod
def visualize_training(cls, tr, savefig=None, show=False):
sns.plt.plot(tr.data.Epoch.tolist(), tr.data["Training Loss"].tolist(), label="Training Loss")
sns.plt.plot(tr.data.Epoch.tolist(), tr.data["Validation Loss"].tolist(), label="Validation Loss")
sns.plt.xlabel("Epochs")
sns.plt.ylabel("Loss")
sns.plt.legend(loc="best")
if savefig:
sns.plt.savefig(get_path(savefig))
if show:
sns.plt.show()
|
<commit_before><commit_msg>Add barebones visualization of loss<commit_after>import seaborn as sns
from .utils import get_path
class Visualizer(object):
"""
Visualize training and validation loss
"""
@classmethod
def visualize_training(cls, tr, savefig=None, show=False):
sns.plt.plot(tr.data.Epoch.tolist(), tr.data["Training Loss"].tolist(), label="Training Loss")
sns.plt.plot(tr.data.Epoch.tolist(), tr.data["Validation Loss"].tolist(), label="Validation Loss")
sns.plt.xlabel("Epochs")
sns.plt.ylabel("Loss")
sns.plt.legend(loc="best")
if savefig:
sns.plt.savefig(get_path(savefig))
if show:
sns.plt.show()
|
|
9124f1cf2bc02e39cd215a465d1680f6a4fdd696
|
ObjectTracking/streamer.py
|
ObjectTracking/streamer.py
|
from SimpleCV import *
import time
import serial
cam = JpegStreamCamera('http://192.168.1.6:8080/videofeed')
disp=Display()
ser=serial.Serial('/dev/ttyACM2', 9600)
alpha = 0.8
time.sleep(1)
previous_z = 200;
while True:
img = cam.getImage()
myLayer = DrawingLayer((img.width,img.height))
disk_img = img.hueDistance(color=Color.GREEN).invert().morphClose().morphClose().threshold(200)
disk = disk_img.findBlobs(minsize=2000)
if disk:
disk[0].drawMinRect(layer=myLayer, color=Color.RED)
disk_img.addDrawingLayer(myLayer)
position = disk[0].centroid()
print position
z = alpha*position[1]+(1-alpha)*previous_z
ser.write(str((z-200)*0.03))
previous_z=z
disk_img.save(disp)
time.sleep(0.01)
|
Use to make close loop test on single axis
|
Use to make close loop test on single axis
|
Python
|
mit
|
baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite
|
Use to make close loop test on single axis
|
from SimpleCV import *
import time
import serial
cam = JpegStreamCamera('http://192.168.1.6:8080/videofeed')
disp=Display()
ser=serial.Serial('/dev/ttyACM2', 9600)
alpha = 0.8
time.sleep(1)
previous_z = 200;
while True:
img = cam.getImage()
myLayer = DrawingLayer((img.width,img.height))
disk_img = img.hueDistance(color=Color.GREEN).invert().morphClose().morphClose().threshold(200)
disk = disk_img.findBlobs(minsize=2000)
if disk:
disk[0].drawMinRect(layer=myLayer, color=Color.RED)
disk_img.addDrawingLayer(myLayer)
position = disk[0].centroid()
print position
z = alpha*position[1]+(1-alpha)*previous_z
ser.write(str((z-200)*0.03))
previous_z=z
disk_img.save(disp)
time.sleep(0.01)
|
<commit_before><commit_msg>Use to make close loop test on single axis<commit_after>
|
from SimpleCV import *
import time
import serial
cam = JpegStreamCamera('http://192.168.1.6:8080/videofeed')
disp=Display()
ser=serial.Serial('/dev/ttyACM2', 9600)
alpha = 0.8
time.sleep(1)
previous_z = 200;
while True:
img = cam.getImage()
myLayer = DrawingLayer((img.width,img.height))
disk_img = img.hueDistance(color=Color.GREEN).invert().morphClose().morphClose().threshold(200)
disk = disk_img.findBlobs(minsize=2000)
if disk:
disk[0].drawMinRect(layer=myLayer, color=Color.RED)
disk_img.addDrawingLayer(myLayer)
position = disk[0].centroid()
print position
z = alpha*position[1]+(1-alpha)*previous_z
ser.write(str((z-200)*0.03))
previous_z=z
disk_img.save(disp)
time.sleep(0.01)
|
Use to make close loop test on single axisfrom SimpleCV import *
import time
import serial
cam = JpegStreamCamera('http://192.168.1.6:8080/videofeed')
disp=Display()
ser=serial.Serial('/dev/ttyACM2', 9600)
alpha = 0.8
time.sleep(1)
previous_z = 200;
while True:
img = cam.getImage()
myLayer = DrawingLayer((img.width,img.height))
disk_img = img.hueDistance(color=Color.GREEN).invert().morphClose().morphClose().threshold(200)
disk = disk_img.findBlobs(minsize=2000)
if disk:
disk[0].drawMinRect(layer=myLayer, color=Color.RED)
disk_img.addDrawingLayer(myLayer)
position = disk[0].centroid()
print position
z = alpha*position[1]+(1-alpha)*previous_z
ser.write(str((z-200)*0.03))
previous_z=z
disk_img.save(disp)
time.sleep(0.01)
|
<commit_before><commit_msg>Use to make close loop test on single axis<commit_after>from SimpleCV import *
import time
import serial
cam = JpegStreamCamera('http://192.168.1.6:8080/videofeed')
disp=Display()
ser=serial.Serial('/dev/ttyACM2', 9600)
alpha = 0.8
time.sleep(1)
previous_z = 200;
while True:
img = cam.getImage()
myLayer = DrawingLayer((img.width,img.height))
disk_img = img.hueDistance(color=Color.GREEN).invert().morphClose().morphClose().threshold(200)
disk = disk_img.findBlobs(minsize=2000)
if disk:
disk[0].drawMinRect(layer=myLayer, color=Color.RED)
disk_img.addDrawingLayer(myLayer)
position = disk[0].centroid()
print position
z = alpha*position[1]+(1-alpha)*previous_z
ser.write(str((z-200)*0.03))
previous_z=z
disk_img.save(disp)
time.sleep(0.01)
|
|
8bdc8418b0093c44947022d3649593f77c471fea
|
tests/test_compat.py
|
tests/test_compat.py
|
from auth_tkt import compat
from unittest import TestCase
class Base64DecodeTestCase(TestCase):
def test_returns_decoded_string(self):
self.assertEqual(
compat.base64decode('ZGVjb2RlZA=='), 'decoded')
class Base64EncodeTestCase(TestCase):
def test_encodes_passed_string(self):
self.assertEqual(
compat.base64encode('decoded'), 'ZGVjb2RlZA==')
class ToBytesTestCase(TestCase):
def test_returns_encoded_byte_string(self):
returns = compat.to_bytes('test')
self.assertIsInstance(returns, bytes)
self.assertEqual(returns, b'test')
def test_encodes_unicode_strings(self):
self.assertEqual(compat.to_bytes(u'\u2603'), b'\xe2\x98\x83')
|
Add tests for compat module
|
Add tests for compat module
|
Python
|
mit
|
yola/auth_tkt
|
Add tests for compat module
|
from auth_tkt import compat
from unittest import TestCase
class Base64DecodeTestCase(TestCase):
def test_returns_decoded_string(self):
self.assertEqual(
compat.base64decode('ZGVjb2RlZA=='), 'decoded')
class Base64EncodeTestCase(TestCase):
def test_encodes_passed_string(self):
self.assertEqual(
compat.base64encode('decoded'), 'ZGVjb2RlZA==')
class ToBytesTestCase(TestCase):
def test_returns_encoded_byte_string(self):
returns = compat.to_bytes('test')
self.assertIsInstance(returns, bytes)
self.assertEqual(returns, b'test')
def test_encodes_unicode_strings(self):
self.assertEqual(compat.to_bytes(u'\u2603'), b'\xe2\x98\x83')
|
<commit_before><commit_msg>Add tests for compat module<commit_after>
|
from auth_tkt import compat
from unittest import TestCase
class Base64DecodeTestCase(TestCase):
def test_returns_decoded_string(self):
self.assertEqual(
compat.base64decode('ZGVjb2RlZA=='), 'decoded')
class Base64EncodeTestCase(TestCase):
def test_encodes_passed_string(self):
self.assertEqual(
compat.base64encode('decoded'), 'ZGVjb2RlZA==')
class ToBytesTestCase(TestCase):
def test_returns_encoded_byte_string(self):
returns = compat.to_bytes('test')
self.assertIsInstance(returns, bytes)
self.assertEqual(returns, b'test')
def test_encodes_unicode_strings(self):
self.assertEqual(compat.to_bytes(u'\u2603'), b'\xe2\x98\x83')
|
Add tests for compat modulefrom auth_tkt import compat
from unittest import TestCase
class Base64DecodeTestCase(TestCase):
def test_returns_decoded_string(self):
self.assertEqual(
compat.base64decode('ZGVjb2RlZA=='), 'decoded')
class Base64EncodeTestCase(TestCase):
def test_encodes_passed_string(self):
self.assertEqual(
compat.base64encode('decoded'), 'ZGVjb2RlZA==')
class ToBytesTestCase(TestCase):
def test_returns_encoded_byte_string(self):
returns = compat.to_bytes('test')
self.assertIsInstance(returns, bytes)
self.assertEqual(returns, b'test')
def test_encodes_unicode_strings(self):
self.assertEqual(compat.to_bytes(u'\u2603'), b'\xe2\x98\x83')
|
<commit_before><commit_msg>Add tests for compat module<commit_after>from auth_tkt import compat
from unittest import TestCase
class Base64DecodeTestCase(TestCase):
def test_returns_decoded_string(self):
self.assertEqual(
compat.base64decode('ZGVjb2RlZA=='), 'decoded')
class Base64EncodeTestCase(TestCase):
def test_encodes_passed_string(self):
self.assertEqual(
compat.base64encode('decoded'), 'ZGVjb2RlZA==')
class ToBytesTestCase(TestCase):
def test_returns_encoded_byte_string(self):
returns = compat.to_bytes('test')
self.assertIsInstance(returns, bytes)
self.assertEqual(returns, b'test')
def test_encodes_unicode_strings(self):
self.assertEqual(compat.to_bytes(u'\u2603'), b'\xe2\x98\x83')
|
|
8509659e77b63f2467b0b98064433e083ac32187
|
tinman/transforms.py
|
tinman/transforms.py
|
"""
Tornado Output Transforming Classes
"""
from tornado import web
class StripBlankLines(web.OutputTransform):
def transform_first_chunk(self, status_code, headers, chunk, finishing):
content_type = headers.get("Content-Type", "").split(";")[0]
if content_type.split('/')[0] == 'text':
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
headers["Content-Length"] = str(len(chunk))
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return '\n'.join([line for line in chunk.split('\n') if line])
|
Add a blank line stripping transform
|
Add a blank line stripping transform
|
Python
|
bsd-3-clause
|
gmr/tinman,lucius-feng/tinman,lucius-feng/tinman,lucius-feng/tinman,gmr/tinman
|
Add a blank line stripping transform
|
"""
Tornado Output Transforming Classes
"""
from tornado import web
class StripBlankLines(web.OutputTransform):
def transform_first_chunk(self, status_code, headers, chunk, finishing):
content_type = headers.get("Content-Type", "").split(";")[0]
if content_type.split('/')[0] == 'text':
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
headers["Content-Length"] = str(len(chunk))
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return '\n'.join([line for line in chunk.split('\n') if line])
|
<commit_before><commit_msg>Add a blank line stripping transform<commit_after>
|
"""
Tornado Output Transforming Classes
"""
from tornado import web
class StripBlankLines(web.OutputTransform):
def transform_first_chunk(self, status_code, headers, chunk, finishing):
content_type = headers.get("Content-Type", "").split(";")[0]
if content_type.split('/')[0] == 'text':
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
headers["Content-Length"] = str(len(chunk))
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return '\n'.join([line for line in chunk.split('\n') if line])
|
Add a blank line stripping transform"""
Tornado Output Transforming Classes
"""
from tornado import web
class StripBlankLines(web.OutputTransform):
def transform_first_chunk(self, status_code, headers, chunk, finishing):
content_type = headers.get("Content-Type", "").split(";")[0]
if content_type.split('/')[0] == 'text':
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
headers["Content-Length"] = str(len(chunk))
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return '\n'.join([line for line in chunk.split('\n') if line])
|
<commit_before><commit_msg>Add a blank line stripping transform<commit_after>"""
Tornado Output Transforming Classes
"""
from tornado import web
class StripBlankLines(web.OutputTransform):
def transform_first_chunk(self, status_code, headers, chunk, finishing):
content_type = headers.get("Content-Type", "").split(";")[0]
if content_type.split('/')[0] == 'text':
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
headers["Content-Length"] = str(len(chunk))
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return '\n'.join([line for line in chunk.split('\n') if line])
|
|
d2546864c9c0579b68050ade87a440f392aa6e27
|
class_hierarchy.py
|
class_hierarchy.py
|
SIZES = {'small', 'medium', 'insanely massive'}
class PhysicalThing(object):
""" Base class for physical object
"""
def __init__(self, *args, **kwargs):
""" Validate and set attrs
"""
size = kwargs.pop('size', None)
if size and size not in SIZES:
raise ValueError('Invalid size!')
self.size = size
weight = kwargs.pop('weight', 0)
if weight and weight < 0:
raise ValueError('Weight can\'t be negative!')
self.weight = weight
class Electronic(PhysicalThing):
""" Yeah
"""
def beep(self):
specific_attr_vals = [self.__getattribute__(n) for n in {'size', 'weight'}]
print ('Bleep bloop here are some attr values: %s' %
[v for v in specific_attr_vals if v])
class Computer(PhysicalThing):
""" It's computers
"""
def __init__(self, *args, **kwargs):
self.os = kwargs.pop('os', 'Linux is the default!')
super(Computer, self).__init__(self, *args, **kwargs)
class Laptop(Electronic, Computer):
""" Aye
"""
@staticmethod
def yo():
print 'Yo.'
|
Add class hierarchy w multiple inheritance
|
Add class hierarchy w multiple inheritance
|
Python
|
mit
|
oldhill/halloween,oldhill/halloween,oldhill/halloween,oldhill/halloween
|
Add class hierarchy w multiple inheritance
|
SIZES = {'small', 'medium', 'insanely massive'}
class PhysicalThing(object):
""" Base class for physical object
"""
def __init__(self, *args, **kwargs):
""" Validate and set attrs
"""
size = kwargs.pop('size', None)
if size and size not in SIZES:
raise ValueError('Invalid size!')
self.size = size
weight = kwargs.pop('weight', 0)
if weight and weight < 0:
raise ValueError('Weight can\'t be negative!')
self.weight = weight
class Electronic(PhysicalThing):
""" Yeah
"""
def beep(self):
specific_attr_vals = [self.__getattribute__(n) for n in {'size', 'weight'}]
print ('Bleep bloop here are some attr values: %s' %
[v for v in specific_attr_vals if v])
class Computer(PhysicalThing):
""" It's computers
"""
def __init__(self, *args, **kwargs):
self.os = kwargs.pop('os', 'Linux is the default!')
super(Computer, self).__init__(self, *args, **kwargs)
class Laptop(Electronic, Computer):
""" Aye
"""
@staticmethod
def yo():
print 'Yo.'
|
<commit_before><commit_msg>Add class hierarchy w multiple inheritance<commit_after>
|
SIZES = {'small', 'medium', 'insanely massive'}
class PhysicalThing(object):
""" Base class for physical object
"""
def __init__(self, *args, **kwargs):
""" Validate and set attrs
"""
size = kwargs.pop('size', None)
if size and size not in SIZES:
raise ValueError('Invalid size!')
self.size = size
weight = kwargs.pop('weight', 0)
if weight and weight < 0:
raise ValueError('Weight can\'t be negative!')
self.weight = weight
class Electronic(PhysicalThing):
""" Yeah
"""
def beep(self):
specific_attr_vals = [self.__getattribute__(n) for n in {'size', 'weight'}]
print ('Bleep bloop here are some attr values: %s' %
[v for v in specific_attr_vals if v])
class Computer(PhysicalThing):
""" It's computers
"""
def __init__(self, *args, **kwargs):
self.os = kwargs.pop('os', 'Linux is the default!')
super(Computer, self).__init__(self, *args, **kwargs)
class Laptop(Electronic, Computer):
""" Aye
"""
@staticmethod
def yo():
print 'Yo.'
|
Add class hierarchy w multiple inheritance
SIZES = {'small', 'medium', 'insanely massive'}
class PhysicalThing(object):
""" Base class for physical object
"""
def __init__(self, *args, **kwargs):
""" Validate and set attrs
"""
size = kwargs.pop('size', None)
if size and size not in SIZES:
raise ValueError('Invalid size!')
self.size = size
weight = kwargs.pop('weight', 0)
if weight and weight < 0:
raise ValueError('Weight can\'t be negative!')
self.weight = weight
class Electronic(PhysicalThing):
""" Yeah
"""
def beep(self):
specific_attr_vals = [self.__getattribute__(n) for n in {'size', 'weight'}]
print ('Bleep bloop here are some attr values: %s' %
[v for v in specific_attr_vals if v])
class Computer(PhysicalThing):
""" It's computers
"""
def __init__(self, *args, **kwargs):
self.os = kwargs.pop('os', 'Linux is the default!')
super(Computer, self).__init__(self, *args, **kwargs)
class Laptop(Electronic, Computer):
""" Aye
"""
@staticmethod
def yo():
print 'Yo.'
|
<commit_before><commit_msg>Add class hierarchy w multiple inheritance<commit_after>
SIZES = {'small', 'medium', 'insanely massive'}
class PhysicalThing(object):
""" Base class for physical object
"""
def __init__(self, *args, **kwargs):
""" Validate and set attrs
"""
size = kwargs.pop('size', None)
if size and size not in SIZES:
raise ValueError('Invalid size!')
self.size = size
weight = kwargs.pop('weight', 0)
if weight and weight < 0:
raise ValueError('Weight can\'t be negative!')
self.weight = weight
class Electronic(PhysicalThing):
""" Yeah
"""
def beep(self):
specific_attr_vals = [self.__getattribute__(n) for n in {'size', 'weight'}]
print ('Bleep bloop here are some attr values: %s' %
[v for v in specific_attr_vals if v])
class Computer(PhysicalThing):
""" It's computers
"""
def __init__(self, *args, **kwargs):
self.os = kwargs.pop('os', 'Linux is the default!')
super(Computer, self).__init__(self, *args, **kwargs)
class Laptop(Electronic, Computer):
""" Aye
"""
@staticmethod
def yo():
print 'Yo.'
|
|
df7c5c2def8341d73a109426d5289b2e705995ca
|
ceph_deploy/tests/parser/test_calamari.py
|
ceph_deploy/tests/parser/test_calamari.py
|
import pytest
from ceph_deploy.cli import get_parser
class TestParserCalamari(object):
def setup(self):
self.parser = get_parser()
def test_calamari_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('calamari --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy calamari' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_calamari_connect_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('calamari connect'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_calamari_connect_one_host(self):
args = self.parser.parse_args('calamari connect host1'.split())
assert args.hosts == ['host1']
def test_calamari_connect_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args('calamari connect'.split() + hostnames)
assert args.hosts == hostnames
def test_calamari_connect_master_default_is_none(self):
args = self.parser.parse_args('calamari connect host1'.split())
assert args.master is None
def test_calamari_connect_master_custom(self):
args = self.parser.parse_args('calamari connect --master master.ceph.com host1'.split())
assert args.master == "master.ceph.com"
|
Add argparse tests for ceph-deploy calamari
|
[RM-11742] Add argparse tests for ceph-deploy calamari
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
Python
|
mit
|
osynge/ceph-deploy,shenhequnying/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,isyippee/ceph-deploy,ghxandsky/ceph-deploy,shenhequnying/ceph-deploy,imzhulei/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,imzhulei/ceph-deploy,Vicente-Cheng/ceph-deploy,ceph/ceph-deploy,osynge/ceph-deploy,zhouyuan/ceph-deploy,branto1/ceph-deploy,zhouyuan/ceph-deploy,codenrhoden/ceph-deploy,SUSE/ceph-deploy,isyippee/ceph-deploy,trhoden/ceph-deploy,ghxandsky/ceph-deploy,codenrhoden/ceph-deploy,Vicente-Cheng/ceph-deploy,branto1/ceph-deploy,SUSE/ceph-deploy,trhoden/ceph-deploy,ceph/ceph-deploy
|
[RM-11742] Add argparse tests for ceph-deploy calamari
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
import pytest
from ceph_deploy.cli import get_parser
class TestParserCalamari(object):
def setup(self):
self.parser = get_parser()
def test_calamari_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('calamari --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy calamari' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_calamari_connect_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('calamari connect'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_calamari_connect_one_host(self):
args = self.parser.parse_args('calamari connect host1'.split())
assert args.hosts == ['host1']
def test_calamari_connect_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args('calamari connect'.split() + hostnames)
assert args.hosts == hostnames
def test_calamari_connect_master_default_is_none(self):
args = self.parser.parse_args('calamari connect host1'.split())
assert args.master is None
def test_calamari_connect_master_custom(self):
args = self.parser.parse_args('calamari connect --master master.ceph.com host1'.split())
assert args.master == "master.ceph.com"
|
<commit_before><commit_msg>[RM-11742] Add argparse tests for ceph-deploy calamari
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>
|
import pytest
from ceph_deploy.cli import get_parser
class TestParserCalamari(object):
def setup(self):
self.parser = get_parser()
def test_calamari_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('calamari --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy calamari' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_calamari_connect_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('calamari connect'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_calamari_connect_one_host(self):
args = self.parser.parse_args('calamari connect host1'.split())
assert args.hosts == ['host1']
def test_calamari_connect_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args('calamari connect'.split() + hostnames)
assert args.hosts == hostnames
def test_calamari_connect_master_default_is_none(self):
args = self.parser.parse_args('calamari connect host1'.split())
assert args.master is None
def test_calamari_connect_master_custom(self):
args = self.parser.parse_args('calamari connect --master master.ceph.com host1'.split())
assert args.master == "master.ceph.com"
|
[RM-11742] Add argparse tests for ceph-deploy calamari
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>import pytest
from ceph_deploy.cli import get_parser
class TestParserCalamari(object):
def setup(self):
self.parser = get_parser()
def test_calamari_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('calamari --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy calamari' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_calamari_connect_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('calamari connect'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_calamari_connect_one_host(self):
args = self.parser.parse_args('calamari connect host1'.split())
assert args.hosts == ['host1']
def test_calamari_connect_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args('calamari connect'.split() + hostnames)
assert args.hosts == hostnames
def test_calamari_connect_master_default_is_none(self):
args = self.parser.parse_args('calamari connect host1'.split())
assert args.master is None
def test_calamari_connect_master_custom(self):
args = self.parser.parse_args('calamari connect --master master.ceph.com host1'.split())
assert args.master == "master.ceph.com"
|
<commit_before><commit_msg>[RM-11742] Add argparse tests for ceph-deploy calamari
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>import pytest
from ceph_deploy.cli import get_parser
class TestParserCalamari(object):
def setup(self):
self.parser = get_parser()
def test_calamari_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('calamari --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy calamari' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_calamari_connect_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('calamari connect'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_calamari_connect_one_host(self):
args = self.parser.parse_args('calamari connect host1'.split())
assert args.hosts == ['host1']
def test_calamari_connect_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args('calamari connect'.split() + hostnames)
assert args.hosts == hostnames
def test_calamari_connect_master_default_is_none(self):
args = self.parser.parse_args('calamari connect host1'.split())
assert args.master is None
def test_calamari_connect_master_custom(self):
args = self.parser.parse_args('calamari connect --master master.ceph.com host1'.split())
assert args.master == "master.ceph.com"
|
|
d40fe9a9739ed7da4a47492124715bf6b720ae1d
|
rally-jobs/plugins/test_relative_import/zzz.py
|
rally-jobs/plugins/test_relative_import/zzz.py
|
# This module is used just for test that relative imports work well
def some_very_important_function():
return 42
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This module is used just for test that relative imports work well
def some_very_important_function():
return 42
|
Add Apache 2.0 license to source file
|
Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldn't contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: Ic7c1319b81acfadc5d9a3566fee54d08741a7b73
|
Python
|
apache-2.0
|
openstack/rally,openstack/rally,openstack/rally,yeming233/rally,yeming233/rally,openstack/rally
|
# This module is used just for test that relative imports work well
def some_very_important_function():
return 42
Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldn't contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: Ic7c1319b81acfadc5d9a3566fee54d08741a7b73
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This module is used just for test that relative imports work well
def some_very_important_function():
return 42
|
<commit_before># This module is used just for test that relative imports work well
def some_very_important_function():
return 42
<commit_msg>Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldn't contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: Ic7c1319b81acfadc5d9a3566fee54d08741a7b73<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This module is used just for test that relative imports work well
def some_very_important_function():
return 42
|
# This module is used just for test that relative imports work well
def some_very_important_function():
return 42
Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldn't contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: Ic7c1319b81acfadc5d9a3566fee54d08741a7b73# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This module is used just for test that relative imports work well
def some_very_important_function():
return 42
|
<commit_before># This module is used just for test that relative imports work well
def some_very_important_function():
return 42
<commit_msg>Add Apache 2.0 license to source file
As per OpenStack licensing guide lines [1]:
[H102 H103] Newly contributed Source Code should be licensed under
the Apache 2.0 license.
[H104] Files with no code shouldn't contain any license header nor
comments, and must be left completely empty.
[1] http://docs.openstack.org/developer/hacking/#openstack-licensing
Change-Id: Ic7c1319b81acfadc5d9a3566fee54d08741a7b73<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This module is used just for test that relative imports work well
def some_very_important_function():
return 42
|
7dbbef88fedc07ee8cddf690b8c42785ee7241bd
|
astropy_helpers/sphinx/setup_package.py
|
astropy_helpers/sphinx/setup_package.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
# Install the theme files
return {
'astropy_helpers.sphinx': [
'ext/templates/*/*',
'themes/bootstrap-astropy/*.*',
'themes/bootstrap-astropy/static/*.*']}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
# Install the theme files
return {
'astropy_helpers.sphinx': [
'ext/templates/*/*',
'local/*.inv',
'themes/bootstrap-astropy/*.*',
'themes/bootstrap-astropy/static/*.*']}
|
Make sure .inv file gets installed
|
Make sure .inv file gets installed
|
Python
|
bsd-3-clause
|
Cadair/astropy-helpers,bsipocz/astropy-helpers,embray/astropy_helpers,Cadair/astropy-helpers,embray/astropy_helpers,dpshelio/astropy-helpers,larrybradley/astropy-helpers,astropy/astropy-helpers,embray/astropy_helpers,astropy/astropy-helpers,bsipocz/astropy-helpers,larrybradley/astropy-helpers,embray/astropy_helpers,bsipocz/astropy-helpers,larrybradley/astropy-helpers,dpshelio/astropy-helpers
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
# Install the theme files
return {
'astropy_helpers.sphinx': [
'ext/templates/*/*',
'themes/bootstrap-astropy/*.*',
'themes/bootstrap-astropy/static/*.*']}
Make sure .inv file gets installed
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
# Install the theme files
return {
'astropy_helpers.sphinx': [
'ext/templates/*/*',
'local/*.inv',
'themes/bootstrap-astropy/*.*',
'themes/bootstrap-astropy/static/*.*']}
|
<commit_before># Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
# Install the theme files
return {
'astropy_helpers.sphinx': [
'ext/templates/*/*',
'themes/bootstrap-astropy/*.*',
'themes/bootstrap-astropy/static/*.*']}
<commit_msg>Make sure .inv file gets installed<commit_after>
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
# Install the theme files
return {
'astropy_helpers.sphinx': [
'ext/templates/*/*',
'local/*.inv',
'themes/bootstrap-astropy/*.*',
'themes/bootstrap-astropy/static/*.*']}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
# Install the theme files
return {
'astropy_helpers.sphinx': [
'ext/templates/*/*',
'themes/bootstrap-astropy/*.*',
'themes/bootstrap-astropy/static/*.*']}
Make sure .inv file gets installed# Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
# Install the theme files
return {
'astropy_helpers.sphinx': [
'ext/templates/*/*',
'local/*.inv',
'themes/bootstrap-astropy/*.*',
'themes/bootstrap-astropy/static/*.*']}
|
<commit_before># Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
# Install the theme files
return {
'astropy_helpers.sphinx': [
'ext/templates/*/*',
'themes/bootstrap-astropy/*.*',
'themes/bootstrap-astropy/static/*.*']}
<commit_msg>Make sure .inv file gets installed<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
# Install the theme files
return {
'astropy_helpers.sphinx': [
'ext/templates/*/*',
'local/*.inv',
'themes/bootstrap-astropy/*.*',
'themes/bootstrap-astropy/static/*.*']}
|
4ae27811595ce3c53670df441429bcf4cace4e15
|
StockIndicators/StockIndicators.py
|
StockIndicators/StockIndicators.py
|
#!flask/bin/python
from flask import Blueprint, jsonify
api_si = Blueprint('api_si', __name__)
@api_si.route("/stock_indicators")
def get_stock_indicators():
return jsonify(stock_indicators=[
{"username": "alice", "user_id": 1},
{"username": "bob", "user_id": 2}
])
|
Implement blueprints on stock indicators
|
Implement blueprints on stock indicators
|
Python
|
mit
|
z0rkuM/stockbros,z0rkuM/stockbros,z0rkuM/stockbros,z0rkuM/stockbros
|
Implement blueprints on stock indicators
|
#!flask/bin/python
from flask import Blueprint, jsonify
api_si = Blueprint('api_si', __name__)
@api_si.route("/stock_indicators")
def get_stock_indicators():
return jsonify(stock_indicators=[
{"username": "alice", "user_id": 1},
{"username": "bob", "user_id": 2}
])
|
<commit_before><commit_msg>Implement blueprints on stock indicators<commit_after>
|
#!flask/bin/python
from flask import Blueprint, jsonify
api_si = Blueprint('api_si', __name__)
@api_si.route("/stock_indicators")
def get_stock_indicators():
return jsonify(stock_indicators=[
{"username": "alice", "user_id": 1},
{"username": "bob", "user_id": 2}
])
|
Implement blueprints on stock indicators#!flask/bin/python
from flask import Blueprint, jsonify
api_si = Blueprint('api_si', __name__)
@api_si.route("/stock_indicators")
def get_stock_indicators():
return jsonify(stock_indicators=[
{"username": "alice", "user_id": 1},
{"username": "bob", "user_id": 2}
])
|
<commit_before><commit_msg>Implement blueprints on stock indicators<commit_after>#!flask/bin/python
from flask import Blueprint, jsonify
api_si = Blueprint('api_si', __name__)
@api_si.route("/stock_indicators")
def get_stock_indicators():
return jsonify(stock_indicators=[
{"username": "alice", "user_id": 1},
{"username": "bob", "user_id": 2}
])
|
|
7d17358e514ad988b2619b8c364338a2058b400b
|
flocker/volume/script.py
|
flocker/volume/script.py
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""The command-line ``flocker-volume`` tool."""
import sys
from twisted.python.usage import Options
from twisted.python.filepath import FilePath
from twisted.internet.task import react
from twisted.internet.defer import succeed
from .service import VolumeService
from .. import __version__
class FlockerVolumeOptions(Options):
"""flocker-volume - volume management."""
optParameters = [
["config", None, b"/etc/flocker/volume.json",
"The path to the config file."],
]
def postOptions(self):
self["config"] = FilePath(self["config"])
def opt_version(self):
print(__version__)
raise SystemExit(0)
def _main(reactor, *arguments):
"""Parse command-line options and use them to run volume management."""
# Much of this should be moved (and expanded) into shared class:
# https://github.com/hybridlogic/flocker/issues/30
options = FlockerVolumeOptions()
options.parseOptions(arguments)
service = VolumeService(options["config"])
service.startService()
return succeed(None)
def main():
"""Entry point to the ``flocker-volume`` command-line tool."""
react(_main, sys.argv[1:])
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""The command-line ``flocker-volume`` tool."""
import sys
from twisted.python.usage import Options
from twisted.python.filepath import FilePath
from twisted.internet.task import react
from twisted.internet.defer import succeed
from .service import VolumeService
from .. import __version__
class FlockerVolumeOptions(Options):
"""Command line options for ``flocker-volume`` volume management tool."""
longdesc = """flocker-volume allows you to manage volumes, filesystems
that can be attached to Docker containers.
At the moment no functionality has been implemented.
"""
optParameters = [
["config", None, b"/etc/flocker/volume.json",
"The path to the config file."],
]
def postOptions(self):
self["config"] = FilePath(self["config"])
def opt_version(self):
"""Print the program's version and exit."""
print(__version__)
raise SystemExit(0)
def _main(reactor, *arguments):
"""Parse command-line options and use them to run volume management."""
# Much of this should be moved (and expanded) into shared class:
# https://github.com/hybridlogic/flocker/issues/30
options = FlockerVolumeOptions()
options.parseOptions(arguments)
service = VolumeService(options["config"])
service.startService()
return succeed(None)
def main():
"""Entry point to the ``flocker-volume`` command-line tool."""
react(_main, sys.argv[1:])
|
Address review comment: More useful help output.
|
Address review comment: More useful help output.
|
Python
|
apache-2.0
|
wallnerryan/flocker-profiles,mbrukman/flocker,adamtheturtle/flocker,jml/flocker,mbrukman/flocker,Azulinho/flocker,lukemarsden/flocker,adamtheturtle/flocker,w4ngyi/flocker,lukemarsden/flocker,runcom/flocker,agonzalezro/flocker,1d4Nf6/flocker,AndyHuu/flocker,w4ngyi/flocker,moypray/flocker,achanda/flocker,w4ngyi/flocker,hackday-profilers/flocker,1d4Nf6/flocker,1d4Nf6/flocker,mbrukman/flocker,LaynePeng/flocker,hackday-profilers/flocker,hackday-profilers/flocker,runcom/flocker,agonzalezro/flocker,wallnerryan/flocker-profiles,runcom/flocker,jml/flocker,Azulinho/flocker,AndyHuu/flocker,Azulinho/flocker,beni55/flocker,agonzalezro/flocker,lukemarsden/flocker,moypray/flocker,achanda/flocker,wallnerryan/flocker-profiles,beni55/flocker,jml/flocker,LaynePeng/flocker,moypray/flocker,AndyHuu/flocker,LaynePeng/flocker,achanda/flocker,adamtheturtle/flocker,beni55/flocker
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""The command-line ``flocker-volume`` tool."""
import sys
from twisted.python.usage import Options
from twisted.python.filepath import FilePath
from twisted.internet.task import react
from twisted.internet.defer import succeed
from .service import VolumeService
from .. import __version__
class FlockerVolumeOptions(Options):
"""flocker-volume - volume management."""
optParameters = [
["config", None, b"/etc/flocker/volume.json",
"The path to the config file."],
]
def postOptions(self):
self["config"] = FilePath(self["config"])
def opt_version(self):
print(__version__)
raise SystemExit(0)
def _main(reactor, *arguments):
"""Parse command-line options and use them to run volume management."""
# Much of this should be moved (and expanded) into shared class:
# https://github.com/hybridlogic/flocker/issues/30
options = FlockerVolumeOptions()
options.parseOptions(arguments)
service = VolumeService(options["config"])
service.startService()
return succeed(None)
def main():
"""Entry point to the ``flocker-volume`` command-line tool."""
react(_main, sys.argv[1:])
Address review comment: More useful help output.
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""The command-line ``flocker-volume`` tool."""
import sys
from twisted.python.usage import Options
from twisted.python.filepath import FilePath
from twisted.internet.task import react
from twisted.internet.defer import succeed
from .service import VolumeService
from .. import __version__
class FlockerVolumeOptions(Options):
"""Command line options for ``flocker-volume`` volume management tool."""
longdesc = """flocker-volume allows you to manage volumes, filesystems
that can be attached to Docker containers.
At the moment no functionality has been implemented.
"""
optParameters = [
["config", None, b"/etc/flocker/volume.json",
"The path to the config file."],
]
def postOptions(self):
self["config"] = FilePath(self["config"])
def opt_version(self):
"""Print the program's version and exit."""
print(__version__)
raise SystemExit(0)
def _main(reactor, *arguments):
"""Parse command-line options and use them to run volume management."""
# Much of this should be moved (and expanded) into shared class:
# https://github.com/hybridlogic/flocker/issues/30
options = FlockerVolumeOptions()
options.parseOptions(arguments)
service = VolumeService(options["config"])
service.startService()
return succeed(None)
def main():
"""Entry point to the ``flocker-volume`` command-line tool."""
react(_main, sys.argv[1:])
|
<commit_before># Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""The command-line ``flocker-volume`` tool."""
import sys
from twisted.python.usage import Options
from twisted.python.filepath import FilePath
from twisted.internet.task import react
from twisted.internet.defer import succeed
from .service import VolumeService
from .. import __version__
class FlockerVolumeOptions(Options):
"""flocker-volume - volume management."""
optParameters = [
["config", None, b"/etc/flocker/volume.json",
"The path to the config file."],
]
def postOptions(self):
self["config"] = FilePath(self["config"])
def opt_version(self):
print(__version__)
raise SystemExit(0)
def _main(reactor, *arguments):
"""Parse command-line options and use them to run volume management."""
# Much of this should be moved (and expanded) into shared class:
# https://github.com/hybridlogic/flocker/issues/30
options = FlockerVolumeOptions()
options.parseOptions(arguments)
service = VolumeService(options["config"])
service.startService()
return succeed(None)
def main():
"""Entry point to the ``flocker-volume`` command-line tool."""
react(_main, sys.argv[1:])
<commit_msg>Address review comment: More useful help output.<commit_after>
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""The command-line ``flocker-volume`` tool."""
import sys
from twisted.python.usage import Options
from twisted.python.filepath import FilePath
from twisted.internet.task import react
from twisted.internet.defer import succeed
from .service import VolumeService
from .. import __version__
class FlockerVolumeOptions(Options):
"""Command line options for ``flocker-volume`` volume management tool."""
longdesc = """flocker-volume allows you to manage volumes, filesystems
that can be attached to Docker containers.
At the moment no functionality has been implemented.
"""
optParameters = [
["config", None, b"/etc/flocker/volume.json",
"The path to the config file."],
]
def postOptions(self):
self["config"] = FilePath(self["config"])
def opt_version(self):
"""Print the program's version and exit."""
print(__version__)
raise SystemExit(0)
def _main(reactor, *arguments):
"""Parse command-line options and use them to run volume management."""
# Much of this should be moved (and expanded) into shared class:
# https://github.com/hybridlogic/flocker/issues/30
options = FlockerVolumeOptions()
options.parseOptions(arguments)
service = VolumeService(options["config"])
service.startService()
return succeed(None)
def main():
"""Entry point to the ``flocker-volume`` command-line tool."""
react(_main, sys.argv[1:])
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""The command-line ``flocker-volume`` tool."""
import sys
from twisted.python.usage import Options
from twisted.python.filepath import FilePath
from twisted.internet.task import react
from twisted.internet.defer import succeed
from .service import VolumeService
from .. import __version__
class FlockerVolumeOptions(Options):
"""flocker-volume - volume management."""
optParameters = [
["config", None, b"/etc/flocker/volume.json",
"The path to the config file."],
]
def postOptions(self):
self["config"] = FilePath(self["config"])
def opt_version(self):
print(__version__)
raise SystemExit(0)
def _main(reactor, *arguments):
"""Parse command-line options and use them to run volume management."""
# Much of this should be moved (and expanded) into shared class:
# https://github.com/hybridlogic/flocker/issues/30
options = FlockerVolumeOptions()
options.parseOptions(arguments)
service = VolumeService(options["config"])
service.startService()
return succeed(None)
def main():
"""Entry point to the ``flocker-volume`` command-line tool."""
react(_main, sys.argv[1:])
Address review comment: More useful help output.# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""The command-line ``flocker-volume`` tool."""
import sys
from twisted.python.usage import Options
from twisted.python.filepath import FilePath
from twisted.internet.task import react
from twisted.internet.defer import succeed
from .service import VolumeService
from .. import __version__
class FlockerVolumeOptions(Options):
"""Command line options for ``flocker-volume`` volume management tool."""
longdesc = """flocker-volume allows you to manage volumes, filesystems
that can be attached to Docker containers.
At the moment no functionality has been implemented.
"""
optParameters = [
["config", None, b"/etc/flocker/volume.json",
"The path to the config file."],
]
def postOptions(self):
self["config"] = FilePath(self["config"])
def opt_version(self):
"""Print the program's version and exit."""
print(__version__)
raise SystemExit(0)
def _main(reactor, *arguments):
"""Parse command-line options and use them to run volume management."""
# Much of this should be moved (and expanded) into shared class:
# https://github.com/hybridlogic/flocker/issues/30
options = FlockerVolumeOptions()
options.parseOptions(arguments)
service = VolumeService(options["config"])
service.startService()
return succeed(None)
def main():
"""Entry point to the ``flocker-volume`` command-line tool."""
react(_main, sys.argv[1:])
|
<commit_before># Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""The command-line ``flocker-volume`` tool."""
import sys
from twisted.python.usage import Options
from twisted.python.filepath import FilePath
from twisted.internet.task import react
from twisted.internet.defer import succeed
from .service import VolumeService
from .. import __version__
class FlockerVolumeOptions(Options):
"""flocker-volume - volume management."""
optParameters = [
["config", None, b"/etc/flocker/volume.json",
"The path to the config file."],
]
def postOptions(self):
self["config"] = FilePath(self["config"])
def opt_version(self):
print(__version__)
raise SystemExit(0)
def _main(reactor, *arguments):
"""Parse command-line options and use them to run volume management."""
# Much of this should be moved (and expanded) into shared class:
# https://github.com/hybridlogic/flocker/issues/30
options = FlockerVolumeOptions()
options.parseOptions(arguments)
service = VolumeService(options["config"])
service.startService()
return succeed(None)
def main():
"""Entry point to the ``flocker-volume`` command-line tool."""
react(_main, sys.argv[1:])
<commit_msg>Address review comment: More useful help output.<commit_after># Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""The command-line ``flocker-volume`` tool."""
import sys
from twisted.python.usage import Options
from twisted.python.filepath import FilePath
from twisted.internet.task import react
from twisted.internet.defer import succeed
from .service import VolumeService
from .. import __version__
class FlockerVolumeOptions(Options):
"""Command line options for ``flocker-volume`` volume management tool."""
longdesc = """flocker-volume allows you to manage volumes, filesystems
that can be attached to Docker containers.
At the moment no functionality has been implemented.
"""
optParameters = [
["config", None, b"/etc/flocker/volume.json",
"The path to the config file."],
]
def postOptions(self):
self["config"] = FilePath(self["config"])
def opt_version(self):
"""Print the program's version and exit."""
print(__version__)
raise SystemExit(0)
def _main(reactor, *arguments):
"""Parse command-line options and use them to run volume management."""
# Much of this should be moved (and expanded) into shared class:
# https://github.com/hybridlogic/flocker/issues/30
options = FlockerVolumeOptions()
options.parseOptions(arguments)
service = VolumeService(options["config"])
service.startService()
return succeed(None)
def main():
"""Entry point to the ``flocker-volume`` command-line tool."""
react(_main, sys.argv[1:])
|
a2a73049c03f6144e68c4eca36bb70fdb929ac04
|
grab/spider/data/shortcut.py
|
grab/spider/data/shortcut.py
|
import os
from .base import Data
from grab.tools.files import hashed_path
from .. import Task
class MongoObjectImageData(Data):
def handler(self, url, collection, obj, path_field):
path = hashed_path(url, base_dir='media/post_image')
if os.path.exists(path):
if path != getattr(obj, path_field, None):
collection.update({'_id': obj['_id']},
{'$set': {path_field: path}})
else:
yield Task(
callback=self.task_handler,
url=url,
collection=collection,
path=path,
obj=obj,
path_field=path_field,
disable_cache=True,
)
def task_handler(self, grab, task):
grab.response.save(task.path)
task.collection.update({'_id': task.obj['_id']},
{'$set': {task.path_field: task.path}})
|
Add MongoObjectImageData shourt to handle specific cases when you need to download image and assign its local path to some field of object stored in the mongo database
|
Add MongoObjectImageData shourt to handle specific cases when you need to download image and assign its local path to some field of object stored in the mongo database
|
Python
|
mit
|
codevlabs/grab,pombredanne/grab-1,lorien/grab,liorvh/grab,giserh/grab,kevinlondon/grab,DDShadoww/grab,subeax/grab,giserh/grab,codevlabs/grab,shaunstanislaus/grab,pombredanne/grab-1,maurobaraldi/grab,alihalabyah/grab,raybuhr/grab,huiyi1990/grab,kevinlondon/grab,istinspring/grab,lorien/grab,alihalabyah/grab,huiyi1990/grab,subeax/grab,SpaceAppsXploration/grab,istinspring/grab,liorvh/grab,subeax/grab,SpaceAppsXploration/grab,raybuhr/grab,maurobaraldi/grab,shaunstanislaus/grab,DDShadoww/grab
|
Add MongoObjectImageData shourt to handle specific cases when you need to download image and assign its local path to some field of object stored in the mongo database
|
import os
from .base import Data
from grab.tools.files import hashed_path
from .. import Task
class MongoObjectImageData(Data):
def handler(self, url, collection, obj, path_field):
path = hashed_path(url, base_dir='media/post_image')
if os.path.exists(path):
if path != getattr(obj, path_field, None):
collection.update({'_id': obj['_id']},
{'$set': {path_field: path}})
else:
yield Task(
callback=self.task_handler,
url=url,
collection=collection,
path=path,
obj=obj,
path_field=path_field,
disable_cache=True,
)
def task_handler(self, grab, task):
grab.response.save(task.path)
task.collection.update({'_id': task.obj['_id']},
{'$set': {task.path_field: task.path}})
|
<commit_before><commit_msg>Add MongoObjectImageData shourt to handle specific cases when you need to download image and assign its local path to some field of object stored in the mongo database<commit_after>
|
import os
from .base import Data
from grab.tools.files import hashed_path
from .. import Task
class MongoObjectImageData(Data):
def handler(self, url, collection, obj, path_field):
path = hashed_path(url, base_dir='media/post_image')
if os.path.exists(path):
if path != getattr(obj, path_field, None):
collection.update({'_id': obj['_id']},
{'$set': {path_field: path}})
else:
yield Task(
callback=self.task_handler,
url=url,
collection=collection,
path=path,
obj=obj,
path_field=path_field,
disable_cache=True,
)
def task_handler(self, grab, task):
grab.response.save(task.path)
task.collection.update({'_id': task.obj['_id']},
{'$set': {task.path_field: task.path}})
|
Add MongoObjectImageData shourt to handle specific cases when you need to download image and assign its local path to some field of object stored in the mongo databaseimport os
from .base import Data
from grab.tools.files import hashed_path
from .. import Task
class MongoObjectImageData(Data):
def handler(self, url, collection, obj, path_field):
path = hashed_path(url, base_dir='media/post_image')
if os.path.exists(path):
if path != getattr(obj, path_field, None):
collection.update({'_id': obj['_id']},
{'$set': {path_field: path}})
else:
yield Task(
callback=self.task_handler,
url=url,
collection=collection,
path=path,
obj=obj,
path_field=path_field,
disable_cache=True,
)
def task_handler(self, grab, task):
grab.response.save(task.path)
task.collection.update({'_id': task.obj['_id']},
{'$set': {task.path_field: task.path}})
|
<commit_before><commit_msg>Add MongoObjectImageData shourt to handle specific cases when you need to download image and assign its local path to some field of object stored in the mongo database<commit_after>import os
from .base import Data
from grab.tools.files import hashed_path
from .. import Task
class MongoObjectImageData(Data):
def handler(self, url, collection, obj, path_field):
path = hashed_path(url, base_dir='media/post_image')
if os.path.exists(path):
if path != getattr(obj, path_field, None):
collection.update({'_id': obj['_id']},
{'$set': {path_field: path}})
else:
yield Task(
callback=self.task_handler,
url=url,
collection=collection,
path=path,
obj=obj,
path_field=path_field,
disable_cache=True,
)
def task_handler(self, grab, task):
grab.response.save(task.path)
task.collection.update({'_id': task.obj['_id']},
{'$set': {task.path_field: task.path}})
|
|
d790a9e1a83d4a7bc1555c23235c2b0a31a5b69a
|
functest/tests/unit/features/test_domino.py
|
functest/tests/unit/features/test_domino.py
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import domino
from functest.utils import constants
class DominoTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.domino = domino.Domino()
def test_init(self):
self.assertEqual(self.domino.project_name, "domino")
self.assertEqual(self.domino.case_name, "domino-multinode")
self.assertEqual(
self.domino.repo,
constants.CONST.__getattribute__("dir_repo_domino"))
self.assertEqual(
self.domino.cmd,
'cd {} && ./tests/run_multinode.sh'.format(self.domino.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for domino
|
Add unit tests for domino
Change-Id: Ie6671080a3d38a17da0ee608a362605a6d9df9db
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
Python
|
apache-2.0
|
opnfv/functest,mywulin/functest,opnfv/functest,mywulin/functest
|
Add unit tests for domino
Change-Id: Ie6671080a3d38a17da0ee608a362605a6d9df9db
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import domino
from functest.utils import constants
class DominoTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.domino = domino.Domino()
def test_init(self):
self.assertEqual(self.domino.project_name, "domino")
self.assertEqual(self.domino.case_name, "domino-multinode")
self.assertEqual(
self.domino.repo,
constants.CONST.__getattribute__("dir_repo_domino"))
self.assertEqual(
self.domino.cmd,
'cd {} && ./tests/run_multinode.sh'.format(self.domino.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for domino
Change-Id: Ie6671080a3d38a17da0ee608a362605a6d9df9db
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import domino
from functest.utils import constants
class DominoTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.domino = domino.Domino()
def test_init(self):
self.assertEqual(self.domino.project_name, "domino")
self.assertEqual(self.domino.case_name, "domino-multinode")
self.assertEqual(
self.domino.repo,
constants.CONST.__getattribute__("dir_repo_domino"))
self.assertEqual(
self.domino.cmd,
'cd {} && ./tests/run_multinode.sh'.format(self.domino.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for domino
Change-Id: Ie6671080a3d38a17da0ee608a362605a6d9df9db
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import domino
from functest.utils import constants
class DominoTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.domino = domino.Domino()
def test_init(self):
self.assertEqual(self.domino.project_name, "domino")
self.assertEqual(self.domino.case_name, "domino-multinode")
self.assertEqual(
self.domino.repo,
constants.CONST.__getattribute__("dir_repo_domino"))
self.assertEqual(
self.domino.cmd,
'cd {} && ./tests/run_multinode.sh'.format(self.domino.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for domino
Change-Id: Ie6671080a3d38a17da0ee608a362605a6d9df9db
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import domino
from functest.utils import constants
class DominoTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.domino = domino.Domino()
def test_init(self):
self.assertEqual(self.domino.project_name, "domino")
self.assertEqual(self.domino.case_name, "domino-multinode")
self.assertEqual(
self.domino.repo,
constants.CONST.__getattribute__("dir_repo_domino"))
self.assertEqual(
self.domino.cmd,
'cd {} && ./tests/run_multinode.sh'.format(self.domino.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
88d936f6df9b609e7d6bdfc7d637d860b92da7a7
|
scripts/export_sequences_data.py
|
scripts/export_sequences_data.py
|
import argparse
import csv
import gevent.monkey
gevent.monkey.patch_all()
from closeio_api import Client as CloseIO_API
from gevent.pool import Pool
parser = argparse.ArgumentParser(description='Download a CSV of email sequences and their subscription counts (number of active/paused/finished subscriptions)')
parser.add_argument('--api-key', '-k', required=True, help='API Key')
args = parser.parse_args()
api = CloseIO_API(args.api_key)
org_id = api.get('api_key/' + args.api_key)['organization_id']
org_name = api.get('organization/' + org_id, params={'_fields': 'name'})['name'].replace('/', "")
params = {'_fields': 'id'}
has_more = True
offset = 0
sequence_ids = []
while has_more:
params['_skip'] = offset
resp = api.get('sequence', params=params)
for sequence in resp['data']:
sequence_ids.append(sequence['id'])
offset += len(resp['data'])
has_more = resp['has_more']
def fetch_sequence(sequence_id):
resp_sequence = api.get(f'sequence/{sequence_id}', params=params)
active_subscriptions = resp_sequence['subscription_counts_by_status']['active']
paused_subscriptions = resp_sequence['subscription_counts_by_status']['paused']
finished_subscriptions = resp_sequence['subscription_counts_by_status']['finished']
total_subscriptions = active_subscriptions + paused_subscriptions + finished_subscriptions
sequences.append({
'id': resp_sequence['id'],
'name': resp_sequence['name'],
'is_active': resp_sequence['status'] == 'active',
'total_subscriptions': total_subscriptions,
'active_subscriptions': active_subscriptions,
'paused_subscriptions': paused_subscriptions,
'finished_subscriptions': finished_subscriptions
})
sequences = []
pool = Pool(5)
pool.map(fetch_sequence, sequence_ids)
f = open(f'{org_name} Email Sequences.csv', 'wt', encoding='utf-8')
try:
keys = ['id', 'name', 'is_active', 'total_subscriptions', 'active_subscriptions', 'paused_subscriptions', 'finished_subscriptions']
writer = csv.DictWriter(f, keys)
writer.writeheader()
writer.writerows(sequences)
finally:
f.close()
|
Add a script that exports email sequences stats
|
Add a script that exports email sequences stats
|
Python
|
mit
|
closeio/closeio-api-scripts
|
Add a script that exports email sequences stats
|
import argparse
import csv
import gevent.monkey
gevent.monkey.patch_all()
from closeio_api import Client as CloseIO_API
from gevent.pool import Pool
parser = argparse.ArgumentParser(description='Download a CSV of email sequences and their subscription counts (number of active/paused/finished subscriptions)')
parser.add_argument('--api-key', '-k', required=True, help='API Key')
args = parser.parse_args()
api = CloseIO_API(args.api_key)
org_id = api.get('api_key/' + args.api_key)['organization_id']
org_name = api.get('organization/' + org_id, params={'_fields': 'name'})['name'].replace('/', "")
params = {'_fields': 'id'}
has_more = True
offset = 0
sequence_ids = []
while has_more:
params['_skip'] = offset
resp = api.get('sequence', params=params)
for sequence in resp['data']:
sequence_ids.append(sequence['id'])
offset += len(resp['data'])
has_more = resp['has_more']
def fetch_sequence(sequence_id):
resp_sequence = api.get(f'sequence/{sequence_id}', params=params)
active_subscriptions = resp_sequence['subscription_counts_by_status']['active']
paused_subscriptions = resp_sequence['subscription_counts_by_status']['paused']
finished_subscriptions = resp_sequence['subscription_counts_by_status']['finished']
total_subscriptions = active_subscriptions + paused_subscriptions + finished_subscriptions
sequences.append({
'id': resp_sequence['id'],
'name': resp_sequence['name'],
'is_active': resp_sequence['status'] == 'active',
'total_subscriptions': total_subscriptions,
'active_subscriptions': active_subscriptions,
'paused_subscriptions': paused_subscriptions,
'finished_subscriptions': finished_subscriptions
})
sequences = []
pool = Pool(5)
pool.map(fetch_sequence, sequence_ids)
f = open(f'{org_name} Email Sequences.csv', 'wt', encoding='utf-8')
try:
keys = ['id', 'name', 'is_active', 'total_subscriptions', 'active_subscriptions', 'paused_subscriptions', 'finished_subscriptions']
writer = csv.DictWriter(f, keys)
writer.writeheader()
writer.writerows(sequences)
finally:
f.close()
|
<commit_before><commit_msg>Add a script that exports email sequences stats<commit_after>
|
import argparse
import csv
import gevent.monkey
gevent.monkey.patch_all()
from closeio_api import Client as CloseIO_API
from gevent.pool import Pool
parser = argparse.ArgumentParser(description='Download a CSV of email sequences and their subscription counts (number of active/paused/finished subscriptions)')
parser.add_argument('--api-key', '-k', required=True, help='API Key')
args = parser.parse_args()
api = CloseIO_API(args.api_key)
org_id = api.get('api_key/' + args.api_key)['organization_id']
org_name = api.get('organization/' + org_id, params={'_fields': 'name'})['name'].replace('/', "")
params = {'_fields': 'id'}
has_more = True
offset = 0
sequence_ids = []
while has_more:
params['_skip'] = offset
resp = api.get('sequence', params=params)
for sequence in resp['data']:
sequence_ids.append(sequence['id'])
offset += len(resp['data'])
has_more = resp['has_more']
def fetch_sequence(sequence_id):
resp_sequence = api.get(f'sequence/{sequence_id}', params=params)
active_subscriptions = resp_sequence['subscription_counts_by_status']['active']
paused_subscriptions = resp_sequence['subscription_counts_by_status']['paused']
finished_subscriptions = resp_sequence['subscription_counts_by_status']['finished']
total_subscriptions = active_subscriptions + paused_subscriptions + finished_subscriptions
sequences.append({
'id': resp_sequence['id'],
'name': resp_sequence['name'],
'is_active': resp_sequence['status'] == 'active',
'total_subscriptions': total_subscriptions,
'active_subscriptions': active_subscriptions,
'paused_subscriptions': paused_subscriptions,
'finished_subscriptions': finished_subscriptions
})
sequences = []
pool = Pool(5)
pool.map(fetch_sequence, sequence_ids)
f = open(f'{org_name} Email Sequences.csv', 'wt', encoding='utf-8')
try:
keys = ['id', 'name', 'is_active', 'total_subscriptions', 'active_subscriptions', 'paused_subscriptions', 'finished_subscriptions']
writer = csv.DictWriter(f, keys)
writer.writeheader()
writer.writerows(sequences)
finally:
f.close()
|
Add a script that exports email sequences statsimport argparse
import csv
import gevent.monkey
gevent.monkey.patch_all()
from closeio_api import Client as CloseIO_API
from gevent.pool import Pool
parser = argparse.ArgumentParser(description='Download a CSV of email sequences and their subscription counts (number of active/paused/finished subscriptions)')
parser.add_argument('--api-key', '-k', required=True, help='API Key')
args = parser.parse_args()
api = CloseIO_API(args.api_key)
org_id = api.get('api_key/' + args.api_key)['organization_id']
org_name = api.get('organization/' + org_id, params={'_fields': 'name'})['name'].replace('/', "")
params = {'_fields': 'id'}
has_more = True
offset = 0
sequence_ids = []
while has_more:
params['_skip'] = offset
resp = api.get('sequence', params=params)
for sequence in resp['data']:
sequence_ids.append(sequence['id'])
offset += len(resp['data'])
has_more = resp['has_more']
def fetch_sequence(sequence_id):
resp_sequence = api.get(f'sequence/{sequence_id}', params=params)
active_subscriptions = resp_sequence['subscription_counts_by_status']['active']
paused_subscriptions = resp_sequence['subscription_counts_by_status']['paused']
finished_subscriptions = resp_sequence['subscription_counts_by_status']['finished']
total_subscriptions = active_subscriptions + paused_subscriptions + finished_subscriptions
sequences.append({
'id': resp_sequence['id'],
'name': resp_sequence['name'],
'is_active': resp_sequence['status'] == 'active',
'total_subscriptions': total_subscriptions,
'active_subscriptions': active_subscriptions,
'paused_subscriptions': paused_subscriptions,
'finished_subscriptions': finished_subscriptions
})
sequences = []
pool = Pool(5)
pool.map(fetch_sequence, sequence_ids)
f = open(f'{org_name} Email Sequences.csv', 'wt', encoding='utf-8')
try:
keys = ['id', 'name', 'is_active', 'total_subscriptions', 'active_subscriptions', 'paused_subscriptions', 'finished_subscriptions']
writer = csv.DictWriter(f, keys)
writer.writeheader()
writer.writerows(sequences)
finally:
f.close()
|
<commit_before><commit_msg>Add a script that exports email sequences stats<commit_after>import argparse
import csv
import gevent.monkey
gevent.monkey.patch_all()
from closeio_api import Client as CloseIO_API
from gevent.pool import Pool
parser = argparse.ArgumentParser(description='Download a CSV of email sequences and their subscription counts (number of active/paused/finished subscriptions)')
parser.add_argument('--api-key', '-k', required=True, help='API Key')
args = parser.parse_args()
api = CloseIO_API(args.api_key)
org_id = api.get('api_key/' + args.api_key)['organization_id']
org_name = api.get('organization/' + org_id, params={'_fields': 'name'})['name'].replace('/', "")
params = {'_fields': 'id'}
has_more = True
offset = 0
sequence_ids = []
while has_more:
params['_skip'] = offset
resp = api.get('sequence', params=params)
for sequence in resp['data']:
sequence_ids.append(sequence['id'])
offset += len(resp['data'])
has_more = resp['has_more']
def fetch_sequence(sequence_id):
resp_sequence = api.get(f'sequence/{sequence_id}', params=params)
active_subscriptions = resp_sequence['subscription_counts_by_status']['active']
paused_subscriptions = resp_sequence['subscription_counts_by_status']['paused']
finished_subscriptions = resp_sequence['subscription_counts_by_status']['finished']
total_subscriptions = active_subscriptions + paused_subscriptions + finished_subscriptions
sequences.append({
'id': resp_sequence['id'],
'name': resp_sequence['name'],
'is_active': resp_sequence['status'] == 'active',
'total_subscriptions': total_subscriptions,
'active_subscriptions': active_subscriptions,
'paused_subscriptions': paused_subscriptions,
'finished_subscriptions': finished_subscriptions
})
sequences = []
pool = Pool(5)
pool.map(fetch_sequence, sequence_ids)
f = open(f'{org_name} Email Sequences.csv', 'wt', encoding='utf-8')
try:
keys = ['id', 'name', 'is_active', 'total_subscriptions', 'active_subscriptions', 'paused_subscriptions', 'finished_subscriptions']
writer = csv.DictWriter(f, keys)
writer.writeheader()
writer.writerows(sequences)
finally:
f.close()
|
|
f68b51409b5a2f0ec3ad8720b32cdd1e9174dbd6
|
scripts/linearmodel.py
|
scripts/linearmodel.py
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/", one_hot=True)
image_dim = 28 * 28
label_count = 10
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder("float", shape=[None, image_dim])
y_ = tf.placeholder("float", shape=[None, label_count])
W = tf.Variable(tf.zeros([ image_dim, label_count ]))
b = tf.Variable(tf.zeros([ label_count ]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = - tf.reduce_mean(y_ * tf.log(y))
optimizer = tf.train.GradientDescentOptimizer(0.4).minimize(cross_entropy)
# tf.train.write_graph(graph.as_graph_def(), '.', 'linearmodel.pbtxt', as_text=True)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
print sess.run([ cross_entropy ], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
sess.run([ optimizer ], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
|
Add a linear sample for mnist in python.
|
Add a linear sample for mnist in python.
|
Python
|
apache-2.0
|
LaurentMazare/tensorflow-ocaml,hhugo/tensorflow-ocaml,LaurentMazare/tensorflow-ocaml,hhugo/tensorflow-ocaml
|
Add a linear sample for mnist in python.
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/", one_hot=True)
image_dim = 28 * 28
label_count = 10
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder("float", shape=[None, image_dim])
y_ = tf.placeholder("float", shape=[None, label_count])
W = tf.Variable(tf.zeros([ image_dim, label_count ]))
b = tf.Variable(tf.zeros([ label_count ]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = - tf.reduce_mean(y_ * tf.log(y))
optimizer = tf.train.GradientDescentOptimizer(0.4).minimize(cross_entropy)
# tf.train.write_graph(graph.as_graph_def(), '.', 'linearmodel.pbtxt', as_text=True)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
print sess.run([ cross_entropy ], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
sess.run([ optimizer ], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
|
<commit_before><commit_msg>Add a linear sample for mnist in python.<commit_after>
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/", one_hot=True)
image_dim = 28 * 28
label_count = 10
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder("float", shape=[None, image_dim])
y_ = tf.placeholder("float", shape=[None, label_count])
W = tf.Variable(tf.zeros([ image_dim, label_count ]))
b = tf.Variable(tf.zeros([ label_count ]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = - tf.reduce_mean(y_ * tf.log(y))
optimizer = tf.train.GradientDescentOptimizer(0.4).minimize(cross_entropy)
# tf.train.write_graph(graph.as_graph_def(), '.', 'linearmodel.pbtxt', as_text=True)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
print sess.run([ cross_entropy ], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
sess.run([ optimizer ], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
|
Add a linear sample for mnist in python.import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/", one_hot=True)
image_dim = 28 * 28
label_count = 10
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder("float", shape=[None, image_dim])
y_ = tf.placeholder("float", shape=[None, label_count])
W = tf.Variable(tf.zeros([ image_dim, label_count ]))
b = tf.Variable(tf.zeros([ label_count ]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = - tf.reduce_mean(y_ * tf.log(y))
optimizer = tf.train.GradientDescentOptimizer(0.4).minimize(cross_entropy)
# tf.train.write_graph(graph.as_graph_def(), '.', 'linearmodel.pbtxt', as_text=True)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
print sess.run([ cross_entropy ], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
sess.run([ optimizer ], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
|
<commit_before><commit_msg>Add a linear sample for mnist in python.<commit_after>import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("data/", one_hot=True)
image_dim = 28 * 28
label_count = 10
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder("float", shape=[None, image_dim])
y_ = tf.placeholder("float", shape=[None, label_count])
W = tf.Variable(tf.zeros([ image_dim, label_count ]))
b = tf.Variable(tf.zeros([ label_count ]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = - tf.reduce_mean(y_ * tf.log(y))
optimizer = tf.train.GradientDescentOptimizer(0.4).minimize(cross_entropy)
# tf.train.write_graph(graph.as_graph_def(), '.', 'linearmodel.pbtxt', as_text=True)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
print sess.run([ cross_entropy ], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
sess.run([ optimizer ], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
|
|
6a06cbcb6b3ee52a85dc4bb0eeb952234e05b6d5
|
nototools/drop_hints.py
|
nototools/drop_hints.py
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drop hints from a font."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import array
import sys
from fontTools import ttLib
def drop_hints_from_glyphs(font):
"""Drops the hints from a font's glyphs."""
glyf_table = font['glyf']
for glyph_index in range(len(glyf_table.glyphOrder)):
glyph_name = glyf_table.glyphOrder[glyph_index]
glyph = glyf_table[glyph_name]
if glyph.numberOfContours > 0:
if glyph.program.bytecode:
glyph.program.bytecode = array.array('B')
def drop_tables(font, tables):
"""Drops the listed tables from a font."""
for table in tables:
if table in font:
del font[table]
def main(argv):
"""Drop the hints from the first file specified and save as second."""
font = ttLib.TTFont(argv[1])
drop_hints_from_glyphs(font)
drop_tables(font, ['cvt', 'fpgm', 'hdmx', 'LTSH', 'prep', 'VDMX'])
font.save(argv[2])
if __name__ == '__main__':
main(sys.argv)
|
Add script to drop hints.
|
[nototools] Add script to drop hints.
|
Python
|
apache-2.0
|
dougfelt/nototools,googlei18n/nototools,pahans/nototools,dougfelt/nototools,googlefonts/nototools,googlei18n/nototools,googlefonts/nototools,anthrotype/nototools,dougfelt/nototools,pathumego/nototools,anthrotype/nototools,pahans/nototools,davelab6/nototools,namemealrady/nototools,googlei18n/nototools,pathumego/nototools,moyogo/nototools,googlefonts/nototools,davelab6/nototools,anthrotype/nototools,namemealrady/nototools,namemealrady/nototools,googlefonts/nototools,googlefonts/nototools,moyogo/nototools,davelab6/nototools,moyogo/nototools,pathumego/nototools,pahans/nototools
|
[nototools] Add script to drop hints.
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drop hints from a font."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import array
import sys
from fontTools import ttLib
def drop_hints_from_glyphs(font):
"""Drops the hints from a font's glyphs."""
glyf_table = font['glyf']
for glyph_index in range(len(glyf_table.glyphOrder)):
glyph_name = glyf_table.glyphOrder[glyph_index]
glyph = glyf_table[glyph_name]
if glyph.numberOfContours > 0:
if glyph.program.bytecode:
glyph.program.bytecode = array.array('B')
def drop_tables(font, tables):
"""Drops the listed tables from a font."""
for table in tables:
if table in font:
del font[table]
def main(argv):
"""Drop the hints from the first file specified and save as second."""
font = ttLib.TTFont(argv[1])
drop_hints_from_glyphs(font)
drop_tables(font, ['cvt', 'fpgm', 'hdmx', 'LTSH', 'prep', 'VDMX'])
font.save(argv[2])
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>[nototools] Add script to drop hints.<commit_after>
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drop hints from a font."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import array
import sys
from fontTools import ttLib
def drop_hints_from_glyphs(font):
"""Drops the hints from a font's glyphs."""
glyf_table = font['glyf']
for glyph_index in range(len(glyf_table.glyphOrder)):
glyph_name = glyf_table.glyphOrder[glyph_index]
glyph = glyf_table[glyph_name]
if glyph.numberOfContours > 0:
if glyph.program.bytecode:
glyph.program.bytecode = array.array('B')
def drop_tables(font, tables):
"""Drops the listed tables from a font."""
for table in tables:
if table in font:
del font[table]
def main(argv):
"""Drop the hints from the first file specified and save as second."""
font = ttLib.TTFont(argv[1])
drop_hints_from_glyphs(font)
drop_tables(font, ['cvt', 'fpgm', 'hdmx', 'LTSH', 'prep', 'VDMX'])
font.save(argv[2])
if __name__ == '__main__':
main(sys.argv)
|
[nototools] Add script to drop hints.#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drop hints from a font."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import array
import sys
from fontTools import ttLib
def drop_hints_from_glyphs(font):
"""Drops the hints from a font's glyphs."""
glyf_table = font['glyf']
for glyph_index in range(len(glyf_table.glyphOrder)):
glyph_name = glyf_table.glyphOrder[glyph_index]
glyph = glyf_table[glyph_name]
if glyph.numberOfContours > 0:
if glyph.program.bytecode:
glyph.program.bytecode = array.array('B')
def drop_tables(font, tables):
"""Drops the listed tables from a font."""
for table in tables:
if table in font:
del font[table]
def main(argv):
"""Drop the hints from the first file specified and save as second."""
font = ttLib.TTFont(argv[1])
drop_hints_from_glyphs(font)
drop_tables(font, ['cvt', 'fpgm', 'hdmx', 'LTSH', 'prep', 'VDMX'])
font.save(argv[2])
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>[nototools] Add script to drop hints.<commit_after>#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drop hints from a font."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import array
import sys
from fontTools import ttLib
def drop_hints_from_glyphs(font):
"""Drops the hints from a font's glyphs."""
glyf_table = font['glyf']
for glyph_index in range(len(glyf_table.glyphOrder)):
glyph_name = glyf_table.glyphOrder[glyph_index]
glyph = glyf_table[glyph_name]
if glyph.numberOfContours > 0:
if glyph.program.bytecode:
glyph.program.bytecode = array.array('B')
def drop_tables(font, tables):
"""Drops the listed tables from a font."""
for table in tables:
if table in font:
del font[table]
def main(argv):
"""Drop the hints from the first file specified and save as second."""
font = ttLib.TTFont(argv[1])
drop_hints_from_glyphs(font)
drop_tables(font, ['cvt', 'fpgm', 'hdmx', 'LTSH', 'prep', 'VDMX'])
font.save(argv[2])
if __name__ == '__main__':
main(sys.argv)
|
|
620b7afd50e93847dc6d9fa08751fd69bec35d95
|
barython/events/__init__.py
|
barython/events/__init__.py
|
#!/usr/bin/env python3
import logging
import threading
logger = logging.getLogger("barython")
class _Hook(threading.Thread):
#: list of callbacks
callbacks = None
def notify(self, *args, **kwargs):
for c in self.callbacks:
try:
threading.Thread(target=c, args=args, kwargs=kwargs).start()
except:
continue
def __init__(self, callbacks=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.callbacks = []
if callbacks is not None:
self.callbacks.extend(callbacks)
|
Add the abstract class _Hook
|
Add the abstract class _Hook
Related to #2
Should be used by the panel to handle events and spread it through the
widgets.
|
Python
|
bsd-3-clause
|
Anthony25/barython
|
Add the abstract class _Hook
Related to #2
Should be used by the panel to handle events and spread it through the
widgets.
|
#!/usr/bin/env python3
import logging
import threading
logger = logging.getLogger("barython")
class _Hook(threading.Thread):
#: list of callbacks
callbacks = None
def notify(self, *args, **kwargs):
for c in self.callbacks:
try:
threading.Thread(target=c, args=args, kwargs=kwargs).start()
except:
continue
def __init__(self, callbacks=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.callbacks = []
if callbacks is not None:
self.callbacks.extend(callbacks)
|
<commit_before><commit_msg>Add the abstract class _Hook
Related to #2
Should be used by the panel to handle events and spread it through the
widgets.<commit_after>
|
#!/usr/bin/env python3
import logging
import threading
logger = logging.getLogger("barython")
class _Hook(threading.Thread):
#: list of callbacks
callbacks = None
def notify(self, *args, **kwargs):
for c in self.callbacks:
try:
threading.Thread(target=c, args=args, kwargs=kwargs).start()
except:
continue
def __init__(self, callbacks=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.callbacks = []
if callbacks is not None:
self.callbacks.extend(callbacks)
|
Add the abstract class _Hook
Related to #2
Should be used by the panel to handle events and spread it through the
widgets.#!/usr/bin/env python3
import logging
import threading
logger = logging.getLogger("barython")
class _Hook(threading.Thread):
#: list of callbacks
callbacks = None
def notify(self, *args, **kwargs):
for c in self.callbacks:
try:
threading.Thread(target=c, args=args, kwargs=kwargs).start()
except:
continue
def __init__(self, callbacks=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.callbacks = []
if callbacks is not None:
self.callbacks.extend(callbacks)
|
<commit_before><commit_msg>Add the abstract class _Hook
Related to #2
Should be used by the panel to handle events and spread it through the
widgets.<commit_after>#!/usr/bin/env python3
import logging
import threading
logger = logging.getLogger("barython")
class _Hook(threading.Thread):
#: list of callbacks
callbacks = None
def notify(self, *args, **kwargs):
for c in self.callbacks:
try:
threading.Thread(target=c, args=args, kwargs=kwargs).start()
except:
continue
def __init__(self, callbacks=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.callbacks = []
if callbacks is not None:
self.callbacks.extend(callbacks)
|
|
ef754c3bb0fd4d026b898fd259632d98f2688ab0
|
test.py
|
test.py
|
#!/usr/bin/env python
import ystockquote as y
x = 'SYK'
a = y.get_all(x)
# 'fifty_two_week_low', 'fifty_day_moving_avg', 'price', 'price_book_ratio', 'volume', 'market_cap', 'dividend_yield', 'ebitda', 'change', 'dividend_per_share', 'stock_exchange', 'two_hundred_day_moving_avg', 'fifty_two_week_high', 'price_sales_ratio', 'price_earnings_growth_ratio', 'earnings_per_share', 'short_ratio', 'avg_daily_volume', 'price_earnings_ratio', 'book_value'
L52 = int(round(float(a['fifty_two_week_low']), 0))
P = round(float(a['price']), 1)
C = a['change']
H52 = int(round(float(a['fifty_two_week_high']), 0))
PE = round(float(a['price_earnings_ratio']), 1)
Cp = int(round(float(C) / float(P) * 100))
ran = round((float(P) - float(L52)) / (float(H52) - float(L52)) * 100)
#printme = '{} {} {} {}% [{} {}] {}% PE {}'.format(x, P, C, Cp, L52, H52, ran, PE)
printme = '{} {} {}% [{} {}] PE {}'.format(x, P, Cp, L52, H52, PE)
print printme
print "_" * 31
print len(printme)
# price(x)
# print y.get_change(x)
# print y.get_52_week_high(x)
# print y.get_52_week_low(x)
# print y.get_price_earnings_ratio(x)
|
Print more stock stuff in small space.
|
Print more stock stuff in small space.
|
Python
|
mit
|
zimolzak/Raspberry-Pi-newbie,zimolzak/Raspberry-Pi-newbie,zimolzak/Raspberry-Pi-newbie,zimolzak/Raspberry-Pi-newbie,zimolzak/Raspberry-Pi-newbie
|
Print more stock stuff in small space.
|
#!/usr/bin/env python
import ystockquote as y
x = 'SYK'
a = y.get_all(x)
# 'fifty_two_week_low', 'fifty_day_moving_avg', 'price', 'price_book_ratio', 'volume', 'market_cap', 'dividend_yield', 'ebitda', 'change', 'dividend_per_share', 'stock_exchange', 'two_hundred_day_moving_avg', 'fifty_two_week_high', 'price_sales_ratio', 'price_earnings_growth_ratio', 'earnings_per_share', 'short_ratio', 'avg_daily_volume', 'price_earnings_ratio', 'book_value'
L52 = int(round(float(a['fifty_two_week_low']), 0))
P = round(float(a['price']), 1)
C = a['change']
H52 = int(round(float(a['fifty_two_week_high']), 0))
PE = round(float(a['price_earnings_ratio']), 1)
Cp = int(round(float(C) / float(P) * 100))
ran = round((float(P) - float(L52)) / (float(H52) - float(L52)) * 100)
#printme = '{} {} {} {}% [{} {}] {}% PE {}'.format(x, P, C, Cp, L52, H52, ran, PE)
printme = '{} {} {}% [{} {}] PE {}'.format(x, P, Cp, L52, H52, PE)
print printme
print "_" * 31
print len(printme)
# price(x)
# print y.get_change(x)
# print y.get_52_week_high(x)
# print y.get_52_week_low(x)
# print y.get_price_earnings_ratio(x)
|
<commit_before><commit_msg>Print more stock stuff in small space.<commit_after>
|
#!/usr/bin/env python
import ystockquote as y
x = 'SYK'
a = y.get_all(x)
# 'fifty_two_week_low', 'fifty_day_moving_avg', 'price', 'price_book_ratio', 'volume', 'market_cap', 'dividend_yield', 'ebitda', 'change', 'dividend_per_share', 'stock_exchange', 'two_hundred_day_moving_avg', 'fifty_two_week_high', 'price_sales_ratio', 'price_earnings_growth_ratio', 'earnings_per_share', 'short_ratio', 'avg_daily_volume', 'price_earnings_ratio', 'book_value'
L52 = int(round(float(a['fifty_two_week_low']), 0))
P = round(float(a['price']), 1)
C = a['change']
H52 = int(round(float(a['fifty_two_week_high']), 0))
PE = round(float(a['price_earnings_ratio']), 1)
Cp = int(round(float(C) / float(P) * 100))
ran = round((float(P) - float(L52)) / (float(H52) - float(L52)) * 100)
#printme = '{} {} {} {}% [{} {}] {}% PE {}'.format(x, P, C, Cp, L52, H52, ran, PE)
printme = '{} {} {}% [{} {}] PE {}'.format(x, P, Cp, L52, H52, PE)
print printme
print "_" * 31
print len(printme)
# price(x)
# print y.get_change(x)
# print y.get_52_week_high(x)
# print y.get_52_week_low(x)
# print y.get_price_earnings_ratio(x)
|
Print more stock stuff in small space.#!/usr/bin/env python
import ystockquote as y
x = 'SYK'
a = y.get_all(x)
# 'fifty_two_week_low', 'fifty_day_moving_avg', 'price', 'price_book_ratio', 'volume', 'market_cap', 'dividend_yield', 'ebitda', 'change', 'dividend_per_share', 'stock_exchange', 'two_hundred_day_moving_avg', 'fifty_two_week_high', 'price_sales_ratio', 'price_earnings_growth_ratio', 'earnings_per_share', 'short_ratio', 'avg_daily_volume', 'price_earnings_ratio', 'book_value'
L52 = int(round(float(a['fifty_two_week_low']), 0))
P = round(float(a['price']), 1)
C = a['change']
H52 = int(round(float(a['fifty_two_week_high']), 0))
PE = round(float(a['price_earnings_ratio']), 1)
Cp = int(round(float(C) / float(P) * 100))
ran = round((float(P) - float(L52)) / (float(H52) - float(L52)) * 100)
#printme = '{} {} {} {}% [{} {}] {}% PE {}'.format(x, P, C, Cp, L52, H52, ran, PE)
printme = '{} {} {}% [{} {}] PE {}'.format(x, P, Cp, L52, H52, PE)
print printme
print "_" * 31
print len(printme)
# price(x)
# print y.get_change(x)
# print y.get_52_week_high(x)
# print y.get_52_week_low(x)
# print y.get_price_earnings_ratio(x)
|
<commit_before><commit_msg>Print more stock stuff in small space.<commit_after>#!/usr/bin/env python
import ystockquote as y
x = 'SYK'
a = y.get_all(x)
# 'fifty_two_week_low', 'fifty_day_moving_avg', 'price', 'price_book_ratio', 'volume', 'market_cap', 'dividend_yield', 'ebitda', 'change', 'dividend_per_share', 'stock_exchange', 'two_hundred_day_moving_avg', 'fifty_two_week_high', 'price_sales_ratio', 'price_earnings_growth_ratio', 'earnings_per_share', 'short_ratio', 'avg_daily_volume', 'price_earnings_ratio', 'book_value'
L52 = int(round(float(a['fifty_two_week_low']), 0))
P = round(float(a['price']), 1)
C = a['change']
H52 = int(round(float(a['fifty_two_week_high']), 0))
PE = round(float(a['price_earnings_ratio']), 1)
Cp = int(round(float(C) / float(P) * 100))
ran = round((float(P) - float(L52)) / (float(H52) - float(L52)) * 100)
#printme = '{} {} {} {}% [{} {}] {}% PE {}'.format(x, P, C, Cp, L52, H52, ran, PE)
printme = '{} {} {}% [{} {}] PE {}'.format(x, P, Cp, L52, H52, PE)
print printme
print "_" * 31
print len(printme)
# price(x)
# print y.get_change(x)
# print y.get_52_week_high(x)
# print y.get_52_week_low(x)
# print y.get_price_earnings_ratio(x)
|
|
22c6455ce5e05e5ec532d17210ef60fed4bb6aba
|
tests/chainer_tests/training_tests/extensions_tests/test_print_report.py
|
tests/chainer_tests/training_tests/extensions_tests/test_print_report.py
|
import sys
import unittest
from chainer import testing
from chainer.training import extensions
class TestPrintReport(unittest.TestCase):
def test_stream_typecheck(self):
report = extensions.PrintReport(['epoch'], out=sys.stderr)
self.assertIsInstance(report, extensions.PrintReport)
with self.assertRaises(TypeError):
report = extensions.PrintReport(['epoch'], out=False)
testing.run_module(__name__, __file__)
|
Test typechecking for an output stream
|
Test typechecking for an output stream
|
Python
|
mit
|
keisuke-umezawa/chainer,hvy/chainer,okuta/chainer,rezoo/chainer,wkentaro/chainer,tkerola/chainer,keisuke-umezawa/chainer,niboshi/chainer,ktnyt/chainer,ktnyt/chainer,niboshi/chainer,jnishi/chainer,pfnet/chainer,chainer/chainer,ktnyt/chainer,hvy/chainer,okuta/chainer,keisuke-umezawa/chainer,niboshi/chainer,jnishi/chainer,wkentaro/chainer,jnishi/chainer,ktnyt/chainer,hvy/chainer,niboshi/chainer,chainer/chainer,keisuke-umezawa/chainer,chainer/chainer,okuta/chainer,jnishi/chainer,chainer/chainer,okuta/chainer,hvy/chainer,wkentaro/chainer,wkentaro/chainer
|
Test typechecking for an output stream
|
import sys
import unittest
from chainer import testing
from chainer.training import extensions
class TestPrintReport(unittest.TestCase):
def test_stream_typecheck(self):
report = extensions.PrintReport(['epoch'], out=sys.stderr)
self.assertIsInstance(report, extensions.PrintReport)
with self.assertRaises(TypeError):
report = extensions.PrintReport(['epoch'], out=False)
testing.run_module(__name__, __file__)
|
<commit_before><commit_msg>Test typechecking for an output stream<commit_after>
|
import sys
import unittest
from chainer import testing
from chainer.training import extensions
class TestPrintReport(unittest.TestCase):
def test_stream_typecheck(self):
report = extensions.PrintReport(['epoch'], out=sys.stderr)
self.assertIsInstance(report, extensions.PrintReport)
with self.assertRaises(TypeError):
report = extensions.PrintReport(['epoch'], out=False)
testing.run_module(__name__, __file__)
|
Test typechecking for an output streamimport sys
import unittest
from chainer import testing
from chainer.training import extensions
class TestPrintReport(unittest.TestCase):
def test_stream_typecheck(self):
report = extensions.PrintReport(['epoch'], out=sys.stderr)
self.assertIsInstance(report, extensions.PrintReport)
with self.assertRaises(TypeError):
report = extensions.PrintReport(['epoch'], out=False)
testing.run_module(__name__, __file__)
|
<commit_before><commit_msg>Test typechecking for an output stream<commit_after>import sys
import unittest
from chainer import testing
from chainer.training import extensions
class TestPrintReport(unittest.TestCase):
def test_stream_typecheck(self):
report = extensions.PrintReport(['epoch'], out=sys.stderr)
self.assertIsInstance(report, extensions.PrintReport)
with self.assertRaises(TypeError):
report = extensions.PrintReport(['epoch'], out=False)
testing.run_module(__name__, __file__)
|
|
c4cfacfb8038b104ff91baf664ef1359a8ebb128
|
games/migrations/0010_auto_20160615_0436.py
|
games/migrations/0010_auto_20160615_0436.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-15 02:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0009_installer_rating'),
]
operations = [
migrations.AlterField(
model_name='installer',
name='rating',
field=models.CharField(blank=True, choices=[(b'platinum', b'Platinum: installs and runs flawlessly'), (b'garbage', b'Garbage: game is not playable'), (b'bronze', b'Bronze: works: but has some issues: even for normal use'), (b'silver', b'Silver: works excellently for "normal" use but some features may be broken'), (b'gold', b'Gold: works flawlessly with some minor tweaking')], max_length=24),
),
]
|
Add migration for rating choices modification
|
Add migration for rating choices modification
|
Python
|
agpl-3.0
|
lutris/website,Turupawn/website,Turupawn/website,lutris/website,Turupawn/website,Turupawn/website,lutris/website,lutris/website
|
Add migration for rating choices modification
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-15 02:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0009_installer_rating'),
]
operations = [
migrations.AlterField(
model_name='installer',
name='rating',
field=models.CharField(blank=True, choices=[(b'platinum', b'Platinum: installs and runs flawlessly'), (b'garbage', b'Garbage: game is not playable'), (b'bronze', b'Bronze: works: but has some issues: even for normal use'), (b'silver', b'Silver: works excellently for "normal" use but some features may be broken'), (b'gold', b'Gold: works flawlessly with some minor tweaking')], max_length=24),
),
]
|
<commit_before><commit_msg>Add migration for rating choices modification<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-15 02:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0009_installer_rating'),
]
operations = [
migrations.AlterField(
model_name='installer',
name='rating',
field=models.CharField(blank=True, choices=[(b'platinum', b'Platinum: installs and runs flawlessly'), (b'garbage', b'Garbage: game is not playable'), (b'bronze', b'Bronze: works: but has some issues: even for normal use'), (b'silver', b'Silver: works excellently for "normal" use but some features may be broken'), (b'gold', b'Gold: works flawlessly with some minor tweaking')], max_length=24),
),
]
|
Add migration for rating choices modification# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-15 02:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0009_installer_rating'),
]
operations = [
migrations.AlterField(
model_name='installer',
name='rating',
field=models.CharField(blank=True, choices=[(b'platinum', b'Platinum: installs and runs flawlessly'), (b'garbage', b'Garbage: game is not playable'), (b'bronze', b'Bronze: works: but has some issues: even for normal use'), (b'silver', b'Silver: works excellently for "normal" use but some features may be broken'), (b'gold', b'Gold: works flawlessly with some minor tweaking')], max_length=24),
),
]
|
<commit_before><commit_msg>Add migration for rating choices modification<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-15 02:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('games', '0009_installer_rating'),
]
operations = [
migrations.AlterField(
model_name='installer',
name='rating',
field=models.CharField(blank=True, choices=[(b'platinum', b'Platinum: installs and runs flawlessly'), (b'garbage', b'Garbage: game is not playable'), (b'bronze', b'Bronze: works: but has some issues: even for normal use'), (b'silver', b'Silver: works excellently for "normal" use but some features may be broken'), (b'gold', b'Gold: works flawlessly with some minor tweaking')], max_length=24),
),
]
|
|
7e307fb6eb8246fdec9fe9f3249f8dff9c89ccd3
|
librisxl-tools/blazegraph/lddb-to-import.py
|
librisxl-tools/blazegraph/lddb-to-import.py
|
from __future__ import unicode_literals, print_function
import sys
import os
import re
CONTEXT_PATH = 'context.jsonld'
args = sys.argv[1:]
basepath = args.pop(0) if args else 'data'
chunksize = int(args.pop(0)) if args else 100 * 1000
outfile = None
def next_outfile(i):
global outfile
fpath = "{}-{}.jsonld".format(basepath, i)
dirname = os.path.dirname(fpath)
if not os.path.exists(dirname):
os.makedirs(dirname)
outfile = open(fpath, 'w')
def process_input(line_stream):
try:
for i, line in enumerate(line_stream):
line = line.strip()
if not line:
continue
if i % chunksize == 0:
if outfile:
print(b']}', file=outfile)
next_outfile(i)
print(b'{"@graph": [', file=outfile)
else:
print(', ', end="", file=outfile)
process_record_line(i, line, outfile)
print(b']}', file=outfile)
finally:
if outfile:
outfile.close()
def process_record_line(i, line, outfile):
# find record id
for rec_id in re.findall(r'{"@graph": \[{"@id": "([^"]+)', line):
break
else:
print("Unable to find an IRI in line {0}:".format(i),
file=sys.stderr)
print(line, file=sys.stderr)
return
# add record id to top graph to name it
line = b'{{"@id": "{0}", {1}'.format(rec_id, line[1:])
# add context reference
line = b'{{"@context": "{0}", {1}'.format(CONTEXT_PATH, line[1:])
# * Fix broken @id values:
# Remove Unicode control characters (mostly harmful in terms and ids)
line = re.sub(r'[\x00-\x1F\x7F]', b'', line)
# TODO: @id values, replace(' ', '+') and replace('\\', r'\\')
# Add "marker" for blank nodes to cope with BlazeGraph limitation
line = re.sub(r'{}', b'{"@id": "owl:Nothing"}', line)
# Or remove empty blank nodes entirely?
#line = re.sub(r',{}|{},?', '', line)
print(line, file=outfile)
if __name__ == '__main__':
process_input(sys.stdin)
|
Add script for turning lines of JSON-LD records into chunked datasets
|
Add script for turning lines of JSON-LD records into chunked datasets
This creates a bunch of "reasonably sized" JSON-LD dataset files with a
given count of named graphs.
It also fixes some things for BlazeGraph to be able to parse the data.
|
Python
|
apache-2.0
|
libris/librisxl,libris/librisxl,libris/librisxl
|
Add script for turning lines of JSON-LD records into chunked datasets
This creates a bunch of "reasonably sized" JSON-LD dataset files with a
given count of named graphs.
It also fixes some things for BlazeGraph to be able to parse the data.
|
from __future__ import unicode_literals, print_function
import sys
import os
import re
CONTEXT_PATH = 'context.jsonld'
args = sys.argv[1:]
basepath = args.pop(0) if args else 'data'
chunksize = int(args.pop(0)) if args else 100 * 1000
outfile = None
def next_outfile(i):
global outfile
fpath = "{}-{}.jsonld".format(basepath, i)
dirname = os.path.dirname(fpath)
if not os.path.exists(dirname):
os.makedirs(dirname)
outfile = open(fpath, 'w')
def process_input(line_stream):
try:
for i, line in enumerate(line_stream):
line = line.strip()
if not line:
continue
if i % chunksize == 0:
if outfile:
print(b']}', file=outfile)
next_outfile(i)
print(b'{"@graph": [', file=outfile)
else:
print(', ', end="", file=outfile)
process_record_line(i, line, outfile)
print(b']}', file=outfile)
finally:
if outfile:
outfile.close()
def process_record_line(i, line, outfile):
# find record id
for rec_id in re.findall(r'{"@graph": \[{"@id": "([^"]+)', line):
break
else:
print("Unable to find an IRI in line {0}:".format(i),
file=sys.stderr)
print(line, file=sys.stderr)
return
# add record id to top graph to name it
line = b'{{"@id": "{0}", {1}'.format(rec_id, line[1:])
# add context reference
line = b'{{"@context": "{0}", {1}'.format(CONTEXT_PATH, line[1:])
# * Fix broken @id values:
# Remove Unicode control characters (mostly harmful in terms and ids)
line = re.sub(r'[\x00-\x1F\x7F]', b'', line)
# TODO: @id values, replace(' ', '+') and replace('\\', r'\\')
# Add "marker" for blank nodes to cope with BlazeGraph limitation
line = re.sub(r'{}', b'{"@id": "owl:Nothing"}', line)
# Or remove empty blank nodes entirely?
#line = re.sub(r',{}|{},?', '', line)
print(line, file=outfile)
if __name__ == '__main__':
process_input(sys.stdin)
|
<commit_before><commit_msg>Add script for turning lines of JSON-LD records into chunked datasets
This creates a bunch of "reasonably sized" JSON-LD dataset files with a
given count of named graphs.
It also fixes some things for BlazeGraph to be able to parse the data.<commit_after>
|
from __future__ import unicode_literals, print_function
import sys
import os
import re
CONTEXT_PATH = 'context.jsonld'
args = sys.argv[1:]
basepath = args.pop(0) if args else 'data'
chunksize = int(args.pop(0)) if args else 100 * 1000
outfile = None
def next_outfile(i):
global outfile
fpath = "{}-{}.jsonld".format(basepath, i)
dirname = os.path.dirname(fpath)
if not os.path.exists(dirname):
os.makedirs(dirname)
outfile = open(fpath, 'w')
def process_input(line_stream):
try:
for i, line in enumerate(line_stream):
line = line.strip()
if not line:
continue
if i % chunksize == 0:
if outfile:
print(b']}', file=outfile)
next_outfile(i)
print(b'{"@graph": [', file=outfile)
else:
print(', ', end="", file=outfile)
process_record_line(i, line, outfile)
print(b']}', file=outfile)
finally:
if outfile:
outfile.close()
def process_record_line(i, line, outfile):
# find record id
for rec_id in re.findall(r'{"@graph": \[{"@id": "([^"]+)', line):
break
else:
print("Unable to find an IRI in line {0}:".format(i),
file=sys.stderr)
print(line, file=sys.stderr)
return
# add record id to top graph to name it
line = b'{{"@id": "{0}", {1}'.format(rec_id, line[1:])
# add context reference
line = b'{{"@context": "{0}", {1}'.format(CONTEXT_PATH, line[1:])
# * Fix broken @id values:
# Remove Unicode control characters (mostly harmful in terms and ids)
line = re.sub(r'[\x00-\x1F\x7F]', b'', line)
# TODO: @id values, replace(' ', '+') and replace('\\', r'\\')
# Add "marker" for blank nodes to cope with BlazeGraph limitation
line = re.sub(r'{}', b'{"@id": "owl:Nothing"}', line)
# Or remove empty blank nodes entirely?
#line = re.sub(r',{}|{},?', '', line)
print(line, file=outfile)
if __name__ == '__main__':
process_input(sys.stdin)
|
Add script for turning lines of JSON-LD records into chunked datasets
This creates a bunch of "reasonably sized" JSON-LD dataset files with a
given count of named graphs.
It also fixes some things for BlazeGraph to be able to parse the data.from __future__ import unicode_literals, print_function
import sys
import os
import re
CONTEXT_PATH = 'context.jsonld'
args = sys.argv[1:]
basepath = args.pop(0) if args else 'data'
chunksize = int(args.pop(0)) if args else 100 * 1000
outfile = None
def next_outfile(i):
global outfile
fpath = "{}-{}.jsonld".format(basepath, i)
dirname = os.path.dirname(fpath)
if not os.path.exists(dirname):
os.makedirs(dirname)
outfile = open(fpath, 'w')
def process_input(line_stream):
try:
for i, line in enumerate(line_stream):
line = line.strip()
if not line:
continue
if i % chunksize == 0:
if outfile:
print(b']}', file=outfile)
next_outfile(i)
print(b'{"@graph": [', file=outfile)
else:
print(', ', end="", file=outfile)
process_record_line(i, line, outfile)
print(b']}', file=outfile)
finally:
if outfile:
outfile.close()
def process_record_line(i, line, outfile):
# find record id
for rec_id in re.findall(r'{"@graph": \[{"@id": "([^"]+)', line):
break
else:
print("Unable to find an IRI in line {0}:".format(i),
file=sys.stderr)
print(line, file=sys.stderr)
return
# add record id to top graph to name it
line = b'{{"@id": "{0}", {1}'.format(rec_id, line[1:])
# add context reference
line = b'{{"@context": "{0}", {1}'.format(CONTEXT_PATH, line[1:])
# * Fix broken @id values:
# Remove Unicode control characters (mostly harmful in terms and ids)
line = re.sub(r'[\x00-\x1F\x7F]', b'', line)
# TODO: @id values, replace(' ', '+') and replace('\\', r'\\')
# Add "marker" for blank nodes to cope with BlazeGraph limitation
line = re.sub(r'{}', b'{"@id": "owl:Nothing"}', line)
# Or remove empty blank nodes entirely?
#line = re.sub(r',{}|{},?', '', line)
print(line, file=outfile)
if __name__ == '__main__':
process_input(sys.stdin)
|
<commit_before><commit_msg>Add script for turning lines of JSON-LD records into chunked datasets
This creates a bunch of "reasonably sized" JSON-LD dataset files with a
given count of named graphs.
It also fixes some things for BlazeGraph to be able to parse the data.<commit_after>from __future__ import unicode_literals, print_function
import sys
import os
import re
CONTEXT_PATH = 'context.jsonld'
args = sys.argv[1:]
basepath = args.pop(0) if args else 'data'
chunksize = int(args.pop(0)) if args else 100 * 1000
outfile = None
def next_outfile(i):
global outfile
fpath = "{}-{}.jsonld".format(basepath, i)
dirname = os.path.dirname(fpath)
if not os.path.exists(dirname):
os.makedirs(dirname)
outfile = open(fpath, 'w')
def process_input(line_stream):
try:
for i, line in enumerate(line_stream):
line = line.strip()
if not line:
continue
if i % chunksize == 0:
if outfile:
print(b']}', file=outfile)
next_outfile(i)
print(b'{"@graph": [', file=outfile)
else:
print(', ', end="", file=outfile)
process_record_line(i, line, outfile)
print(b']}', file=outfile)
finally:
if outfile:
outfile.close()
def process_record_line(i, line, outfile):
# find record id
for rec_id in re.findall(r'{"@graph": \[{"@id": "([^"]+)', line):
break
else:
print("Unable to find an IRI in line {0}:".format(i),
file=sys.stderr)
print(line, file=sys.stderr)
return
# add record id to top graph to name it
line = b'{{"@id": "{0}", {1}'.format(rec_id, line[1:])
# add context reference
line = b'{{"@context": "{0}", {1}'.format(CONTEXT_PATH, line[1:])
# * Fix broken @id values:
# Remove Unicode control characters (mostly harmful in terms and ids)
line = re.sub(r'[\x00-\x1F\x7F]', b'', line)
# TODO: @id values, replace(' ', '+') and replace('\\', r'\\')
# Add "marker" for blank nodes to cope with BlazeGraph limitation
line = re.sub(r'{}', b'{"@id": "owl:Nothing"}', line)
# Or remove empty blank nodes entirely?
#line = re.sub(r',{}|{},?', '', line)
print(line, file=outfile)
if __name__ == '__main__':
process_input(sys.stdin)
|
|
25d53a43576753f1aa0cc6fbaf05ae94dcdec564
|
tmp/cacd2000_split_identities.py
|
tmp/cacd2000_split_identities.py
|
import shutil
import argparse
import os
import sys
def main(args):
src_path_exp = os.path.expanduser(args.src_path)
dst_path_exp = os.path.expanduser(args.dst_path)
if not os.path.exists(dst_path_exp):
os.makedirs(dst_path_exp)
files = os.listdir(src_path_exp)
for f in files:
file_name = '.'.join(f.split('.')[0:-1])
x = file_name.split('_')
dir_name = '_'.join(x[1:-1])
class_dst_path = os.path.join(dst_path_exp, dir_name)
if not os.path.exists(class_dst_path):
os.makedirs(class_dst_path)
src_file_path = os.path.join(src_path_exp, f)
dst_file = os.path.join(class_dst_path, f)
print('%s -> %s' % (src_file_path, dst_file))
shutil.copyfile(src_file_path, dst_file)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('src_path', type=str, help='Path to the source directory.')
parser.add_argument('dst_path', type=str, help='Path to the destination directory.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
Split CACD2000 dataset into one directory per identity
|
Split CACD2000 dataset into one directory per identity
|
Python
|
mit
|
davidsandberg/facenet,wangxianliang/facenet,wangxianliang/facenet,liuzz1983/open_vision,davidsandberg/facenet
|
Split CACD2000 dataset into one directory per identity
|
import shutil
import argparse
import os
import sys
def main(args):
src_path_exp = os.path.expanduser(args.src_path)
dst_path_exp = os.path.expanduser(args.dst_path)
if not os.path.exists(dst_path_exp):
os.makedirs(dst_path_exp)
files = os.listdir(src_path_exp)
for f in files:
file_name = '.'.join(f.split('.')[0:-1])
x = file_name.split('_')
dir_name = '_'.join(x[1:-1])
class_dst_path = os.path.join(dst_path_exp, dir_name)
if not os.path.exists(class_dst_path):
os.makedirs(class_dst_path)
src_file_path = os.path.join(src_path_exp, f)
dst_file = os.path.join(class_dst_path, f)
print('%s -> %s' % (src_file_path, dst_file))
shutil.copyfile(src_file_path, dst_file)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('src_path', type=str, help='Path to the source directory.')
parser.add_argument('dst_path', type=str, help='Path to the destination directory.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
<commit_before><commit_msg>Split CACD2000 dataset into one directory per identity<commit_after>
|
import shutil
import argparse
import os
import sys
def main(args):
src_path_exp = os.path.expanduser(args.src_path)
dst_path_exp = os.path.expanduser(args.dst_path)
if not os.path.exists(dst_path_exp):
os.makedirs(dst_path_exp)
files = os.listdir(src_path_exp)
for f in files:
file_name = '.'.join(f.split('.')[0:-1])
x = file_name.split('_')
dir_name = '_'.join(x[1:-1])
class_dst_path = os.path.join(dst_path_exp, dir_name)
if not os.path.exists(class_dst_path):
os.makedirs(class_dst_path)
src_file_path = os.path.join(src_path_exp, f)
dst_file = os.path.join(class_dst_path, f)
print('%s -> %s' % (src_file_path, dst_file))
shutil.copyfile(src_file_path, dst_file)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('src_path', type=str, help='Path to the source directory.')
parser.add_argument('dst_path', type=str, help='Path to the destination directory.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
Split CACD2000 dataset into one directory per identityimport shutil
import argparse
import os
import sys
def main(args):
src_path_exp = os.path.expanduser(args.src_path)
dst_path_exp = os.path.expanduser(args.dst_path)
if not os.path.exists(dst_path_exp):
os.makedirs(dst_path_exp)
files = os.listdir(src_path_exp)
for f in files:
file_name = '.'.join(f.split('.')[0:-1])
x = file_name.split('_')
dir_name = '_'.join(x[1:-1])
class_dst_path = os.path.join(dst_path_exp, dir_name)
if not os.path.exists(class_dst_path):
os.makedirs(class_dst_path)
src_file_path = os.path.join(src_path_exp, f)
dst_file = os.path.join(class_dst_path, f)
print('%s -> %s' % (src_file_path, dst_file))
shutil.copyfile(src_file_path, dst_file)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('src_path', type=str, help='Path to the source directory.')
parser.add_argument('dst_path', type=str, help='Path to the destination directory.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
<commit_before><commit_msg>Split CACD2000 dataset into one directory per identity<commit_after>import shutil
import argparse
import os
import sys
def main(args):
src_path_exp = os.path.expanduser(args.src_path)
dst_path_exp = os.path.expanduser(args.dst_path)
if not os.path.exists(dst_path_exp):
os.makedirs(dst_path_exp)
files = os.listdir(src_path_exp)
for f in files:
file_name = '.'.join(f.split('.')[0:-1])
x = file_name.split('_')
dir_name = '_'.join(x[1:-1])
class_dst_path = os.path.join(dst_path_exp, dir_name)
if not os.path.exists(class_dst_path):
os.makedirs(class_dst_path)
src_file_path = os.path.join(src_path_exp, f)
dst_file = os.path.join(class_dst_path, f)
print('%s -> %s' % (src_file_path, dst_file))
shutil.copyfile(src_file_path, dst_file)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('src_path', type=str, help='Path to the source directory.')
parser.add_argument('dst_path', type=str, help='Path to the destination directory.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
|
c55d917b28c41d363e2dea8ecaf750a431f016da
|
migrations/versions/0364_drop_old_column.py
|
migrations/versions/0364_drop_old_column.py
|
"""
Revision ID: 0364_drop_old_column
Revises: 0363_cancelled_by_api_key
Create Date: 2022-01-25 18:05:27.750234
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0364_drop_old_column'
down_revision = '0363_cancelled_by_api_key'
def upgrade():
# move data over
op.execute("UPDATE broadcast_message SET created_by_api_key_id=api_key_id WHERE created_by_api_key_id IS NULL")
op.create_check_constraint(
"ck_broadcast_message_created_by_not_null",
"broadcast_message",
"created_by_id is not null or created_by_api_key_id is not null"
)
op.drop_column('broadcast_message', 'api_key_id')
def downgrade():
op.add_column('broadcast_message', sa.Column('api_key_id', postgresql.UUID(), autoincrement=False, nullable=True))
op.execute("UPDATE broadcast_message SET api_key_id=created_by_api_key_id") # move data over
op.drop_constraint(
"ck_broadcast_message_created_by_not_null",
"broadcast_message"
)
|
Drop api_key_id column from broadcast_message table
|
Drop api_key_id column from broadcast_message table
This column has been superseded by a new column named
created_by_api_key_id.
Also create constraint checking that we know who created broadcast
Also move data so that constraint is met before instatiating it.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Drop api_key_id column from broadcast_message table
This column has been superseded by a new column named
created_by_api_key_id.
Also create constraint checking that we know who created broadcast
Also move data so that constraint is met before instatiating it.
|
"""
Revision ID: 0364_drop_old_column
Revises: 0363_cancelled_by_api_key
Create Date: 2022-01-25 18:05:27.750234
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0364_drop_old_column'
down_revision = '0363_cancelled_by_api_key'
def upgrade():
# move data over
op.execute("UPDATE broadcast_message SET created_by_api_key_id=api_key_id WHERE created_by_api_key_id IS NULL")
op.create_check_constraint(
"ck_broadcast_message_created_by_not_null",
"broadcast_message",
"created_by_id is not null or created_by_api_key_id is not null"
)
op.drop_column('broadcast_message', 'api_key_id')
def downgrade():
op.add_column('broadcast_message', sa.Column('api_key_id', postgresql.UUID(), autoincrement=False, nullable=True))
op.execute("UPDATE broadcast_message SET api_key_id=created_by_api_key_id") # move data over
op.drop_constraint(
"ck_broadcast_message_created_by_not_null",
"broadcast_message"
)
|
<commit_before><commit_msg>Drop api_key_id column from broadcast_message table
This column has been superseded by a new column named
created_by_api_key_id.
Also create constraint checking that we know who created broadcast
Also move data so that constraint is met before instatiating it.<commit_after>
|
"""
Revision ID: 0364_drop_old_column
Revises: 0363_cancelled_by_api_key
Create Date: 2022-01-25 18:05:27.750234
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0364_drop_old_column'
down_revision = '0363_cancelled_by_api_key'
def upgrade():
# move data over
op.execute("UPDATE broadcast_message SET created_by_api_key_id=api_key_id WHERE created_by_api_key_id IS NULL")
op.create_check_constraint(
"ck_broadcast_message_created_by_not_null",
"broadcast_message",
"created_by_id is not null or created_by_api_key_id is not null"
)
op.drop_column('broadcast_message', 'api_key_id')
def downgrade():
op.add_column('broadcast_message', sa.Column('api_key_id', postgresql.UUID(), autoincrement=False, nullable=True))
op.execute("UPDATE broadcast_message SET api_key_id=created_by_api_key_id") # move data over
op.drop_constraint(
"ck_broadcast_message_created_by_not_null",
"broadcast_message"
)
|
Drop api_key_id column from broadcast_message table
This column has been superseded by a new column named
created_by_api_key_id.
Also create constraint checking that we know who created broadcast
Also move data so that constraint is met before instatiating it."""
Revision ID: 0364_drop_old_column
Revises: 0363_cancelled_by_api_key
Create Date: 2022-01-25 18:05:27.750234
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0364_drop_old_column'
down_revision = '0363_cancelled_by_api_key'
def upgrade():
# move data over
op.execute("UPDATE broadcast_message SET created_by_api_key_id=api_key_id WHERE created_by_api_key_id IS NULL")
op.create_check_constraint(
"ck_broadcast_message_created_by_not_null",
"broadcast_message",
"created_by_id is not null or created_by_api_key_id is not null"
)
op.drop_column('broadcast_message', 'api_key_id')
def downgrade():
op.add_column('broadcast_message', sa.Column('api_key_id', postgresql.UUID(), autoincrement=False, nullable=True))
op.execute("UPDATE broadcast_message SET api_key_id=created_by_api_key_id") # move data over
op.drop_constraint(
"ck_broadcast_message_created_by_not_null",
"broadcast_message"
)
|
<commit_before><commit_msg>Drop api_key_id column from broadcast_message table
This column has been superseded by a new column named
created_by_api_key_id.
Also create constraint checking that we know who created broadcast
Also move data so that constraint is met before instatiating it.<commit_after>"""
Revision ID: 0364_drop_old_column
Revises: 0363_cancelled_by_api_key
Create Date: 2022-01-25 18:05:27.750234
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0364_drop_old_column'
down_revision = '0363_cancelled_by_api_key'
def upgrade():
# move data over
op.execute("UPDATE broadcast_message SET created_by_api_key_id=api_key_id WHERE created_by_api_key_id IS NULL")
op.create_check_constraint(
"ck_broadcast_message_created_by_not_null",
"broadcast_message",
"created_by_id is not null or created_by_api_key_id is not null"
)
op.drop_column('broadcast_message', 'api_key_id')
def downgrade():
op.add_column('broadcast_message', sa.Column('api_key_id', postgresql.UUID(), autoincrement=False, nullable=True))
op.execute("UPDATE broadcast_message SET api_key_id=created_by_api_key_id") # move data over
op.drop_constraint(
"ck_broadcast_message_created_by_not_null",
"broadcast_message"
)
|
|
2b9830d89fd1c7aef5deb5bd16a7f6a26ea8e682
|
data/mongorandomgraph.py
|
data/mongorandomgraph.py
|
import bson.json_util
from bson.objectid import ObjectId
import itertools
import random
import string
import sys
def emit_node(name):
oid = ObjectId()
print bson.json_util.dumps({"_id": oid,
"type": "node",
"data": {"name": name}})
return oid
def emit_link(oid1, oid2, undirected=False):
oid = ObjectId()
record = {"_id": oid,
"type": "link",
"source": oid1,
"target": oid2,
"data": {}}
if undirected:
record["undirected"] = True
print bson.json_util.dumps(record)
def main():
table = {letter: emit_node(letter) for letter in string.ascii_lowercase}
for (a, b) in itertools.product(string.ascii_lowercase, repeat=2):
if a == b:
continue
if random.random() > 0.2:
undirected = random.random() > 0.5
emit_link(table[a], table[b], undirected)
if __name__ == "__main__":
sys.exit(main())
|
Add script to generate test mongo data
|
Add script to generate test mongo data
|
Python
|
apache-2.0
|
XDATA-Year-3/clique,XDATA-Year-3/clique,Kitware/clique,Kitware/clique,Kitware/clique,XDATA-Year-3/clique
|
Add script to generate test mongo data
|
import bson.json_util
from bson.objectid import ObjectId
import itertools
import random
import string
import sys
def emit_node(name):
oid = ObjectId()
print bson.json_util.dumps({"_id": oid,
"type": "node",
"data": {"name": name}})
return oid
def emit_link(oid1, oid2, undirected=False):
oid = ObjectId()
record = {"_id": oid,
"type": "link",
"source": oid1,
"target": oid2,
"data": {}}
if undirected:
record["undirected"] = True
print bson.json_util.dumps(record)
def main():
table = {letter: emit_node(letter) for letter in string.ascii_lowercase}
for (a, b) in itertools.product(string.ascii_lowercase, repeat=2):
if a == b:
continue
if random.random() > 0.2:
undirected = random.random() > 0.5
emit_link(table[a], table[b], undirected)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add script to generate test mongo data<commit_after>
|
import bson.json_util
from bson.objectid import ObjectId
import itertools
import random
import string
import sys
def emit_node(name):
oid = ObjectId()
print bson.json_util.dumps({"_id": oid,
"type": "node",
"data": {"name": name}})
return oid
def emit_link(oid1, oid2, undirected=False):
oid = ObjectId()
record = {"_id": oid,
"type": "link",
"source": oid1,
"target": oid2,
"data": {}}
if undirected:
record["undirected"] = True
print bson.json_util.dumps(record)
def main():
table = {letter: emit_node(letter) for letter in string.ascii_lowercase}
for (a, b) in itertools.product(string.ascii_lowercase, repeat=2):
if a == b:
continue
if random.random() > 0.2:
undirected = random.random() > 0.5
emit_link(table[a], table[b], undirected)
if __name__ == "__main__":
sys.exit(main())
|
Add script to generate test mongo dataimport bson.json_util
from bson.objectid import ObjectId
import itertools
import random
import string
import sys
def emit_node(name):
oid = ObjectId()
print bson.json_util.dumps({"_id": oid,
"type": "node",
"data": {"name": name}})
return oid
def emit_link(oid1, oid2, undirected=False):
oid = ObjectId()
record = {"_id": oid,
"type": "link",
"source": oid1,
"target": oid2,
"data": {}}
if undirected:
record["undirected"] = True
print bson.json_util.dumps(record)
def main():
table = {letter: emit_node(letter) for letter in string.ascii_lowercase}
for (a, b) in itertools.product(string.ascii_lowercase, repeat=2):
if a == b:
continue
if random.random() > 0.2:
undirected = random.random() > 0.5
emit_link(table[a], table[b], undirected)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add script to generate test mongo data<commit_after>import bson.json_util
from bson.objectid import ObjectId
import itertools
import random
import string
import sys
def emit_node(name):
oid = ObjectId()
print bson.json_util.dumps({"_id": oid,
"type": "node",
"data": {"name": name}})
return oid
def emit_link(oid1, oid2, undirected=False):
oid = ObjectId()
record = {"_id": oid,
"type": "link",
"source": oid1,
"target": oid2,
"data": {}}
if undirected:
record["undirected"] = True
print bson.json_util.dumps(record)
def main():
table = {letter: emit_node(letter) for letter in string.ascii_lowercase}
for (a, b) in itertools.product(string.ascii_lowercase, repeat=2):
if a == b:
continue
if random.random() > 0.2:
undirected = random.random() > 0.5
emit_link(table[a], table[b], undirected)
if __name__ == "__main__":
sys.exit(main())
|
|
6dba942d41c38d301f225627aae318910d139eb0
|
scripts/create_pca_component_overlay.py
|
scripts/create_pca_component_overlay.py
|
# Generate overlay corresponding to 2nd PCA component
# which serves as a proxy for senescence
import csv
from collections import defaultdict
import dtoolcore
import click
import numpy as np
def calc_pca_components(all_entries):
rgb_matrix = np.transpose(np.array(
[
map(float, [entry['R'], entry['G'], entry['B']])
for entry in all_entries
]
))
cov = np.cov(rgb_matrix)
evalues, evectors = np.linalg.eig(cov)
return evectors.T
def calc_senescence(entry, pca_rotation):
c_R = pca_rotation[0] * float(entry['R'])
c_G = pca_rotation[1] * float(entry['G'])
c_B = pca_rotation[2] * float(entry['B'])
return c_R + c_G + c_B
def find_senescence_values_by_plot_and_date(results):
pca_components = calc_pca_components(results)
pca_component_2 = pca_components[1]
by_plot_then_date = defaultdict(dict)
for entry in results:
senescence = calc_senescence(entry, pca_component_2)
by_plot_then_date[entry['plot']][entry['date']] = senescence
return by_plot_then_date
def generate_pca_overlay(dataset, results):
senescence_values = find_senescence_values_by_plot_and_date(results)
plot_number_overlay = dataset.get_overlay('plot_number')
ordering_overlay = dataset.get_overlay('ordering')
date_overlay = dataset.get_overlay('date')
pca_overlay = {}
for identifier in dataset.identifiers:
label = "{}_{}".format(
plot_number_overlay[identifier],
ordering_overlay[identifier]
)
date = date_overlay[identifier]
try:
senescence = senescence_values[label][date]
except KeyError:
senescence = None
pca_overlay[identifier] = senescence
dataset.put_overlay('pca_component_2', pca_overlay)
def load_output_csv_data(results_file):
with open(results_file, 'r') as fh:
reader = csv.DictReader(fh)
all_entries = [row for row in reader]
return all_entries
@click.command()
@click.argument('dataset_uri')
@click.argument('results_csv_file')
def main(dataset_uri, results_csv_file):
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
results = load_output_csv_data(results_csv_file)
generate_pca_overlay(dataset, results)
if __name__ == '__main__':
main()
|
Add script to create overlay on individual plots for senescence
|
Add script to create overlay on individual plots for senescence
|
Python
|
mit
|
JIC-Image-Analysis/senescence-in-field,JIC-Image-Analysis/senescence-in-field,JIC-Image-Analysis/senescence-in-field
|
Add script to create overlay on individual plots for senescence
|
# Generate overlay corresponding to 2nd PCA component
# which serves as a proxy for senescence
import csv
from collections import defaultdict
import dtoolcore
import click
import numpy as np
def calc_pca_components(all_entries):
rgb_matrix = np.transpose(np.array(
[
map(float, [entry['R'], entry['G'], entry['B']])
for entry in all_entries
]
))
cov = np.cov(rgb_matrix)
evalues, evectors = np.linalg.eig(cov)
return evectors.T
def calc_senescence(entry, pca_rotation):
c_R = pca_rotation[0] * float(entry['R'])
c_G = pca_rotation[1] * float(entry['G'])
c_B = pca_rotation[2] * float(entry['B'])
return c_R + c_G + c_B
def find_senescence_values_by_plot_and_date(results):
pca_components = calc_pca_components(results)
pca_component_2 = pca_components[1]
by_plot_then_date = defaultdict(dict)
for entry in results:
senescence = calc_senescence(entry, pca_component_2)
by_plot_then_date[entry['plot']][entry['date']] = senescence
return by_plot_then_date
def generate_pca_overlay(dataset, results):
senescence_values = find_senescence_values_by_plot_and_date(results)
plot_number_overlay = dataset.get_overlay('plot_number')
ordering_overlay = dataset.get_overlay('ordering')
date_overlay = dataset.get_overlay('date')
pca_overlay = {}
for identifier in dataset.identifiers:
label = "{}_{}".format(
plot_number_overlay[identifier],
ordering_overlay[identifier]
)
date = date_overlay[identifier]
try:
senescence = senescence_values[label][date]
except KeyError:
senescence = None
pca_overlay[identifier] = senescence
dataset.put_overlay('pca_component_2', pca_overlay)
def load_output_csv_data(results_file):
with open(results_file, 'r') as fh:
reader = csv.DictReader(fh)
all_entries = [row for row in reader]
return all_entries
@click.command()
@click.argument('dataset_uri')
@click.argument('results_csv_file')
def main(dataset_uri, results_csv_file):
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
results = load_output_csv_data(results_csv_file)
generate_pca_overlay(dataset, results)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to create overlay on individual plots for senescence<commit_after>
|
# Generate overlay corresponding to 2nd PCA component
# which serves as a proxy for senescence
import csv
from collections import defaultdict
import dtoolcore
import click
import numpy as np
def calc_pca_components(all_entries):
rgb_matrix = np.transpose(np.array(
[
map(float, [entry['R'], entry['G'], entry['B']])
for entry in all_entries
]
))
cov = np.cov(rgb_matrix)
evalues, evectors = np.linalg.eig(cov)
return evectors.T
def calc_senescence(entry, pca_rotation):
c_R = pca_rotation[0] * float(entry['R'])
c_G = pca_rotation[1] * float(entry['G'])
c_B = pca_rotation[2] * float(entry['B'])
return c_R + c_G + c_B
def find_senescence_values_by_plot_and_date(results):
pca_components = calc_pca_components(results)
pca_component_2 = pca_components[1]
by_plot_then_date = defaultdict(dict)
for entry in results:
senescence = calc_senescence(entry, pca_component_2)
by_plot_then_date[entry['plot']][entry['date']] = senescence
return by_plot_then_date
def generate_pca_overlay(dataset, results):
senescence_values = find_senescence_values_by_plot_and_date(results)
plot_number_overlay = dataset.get_overlay('plot_number')
ordering_overlay = dataset.get_overlay('ordering')
date_overlay = dataset.get_overlay('date')
pca_overlay = {}
for identifier in dataset.identifiers:
label = "{}_{}".format(
plot_number_overlay[identifier],
ordering_overlay[identifier]
)
date = date_overlay[identifier]
try:
senescence = senescence_values[label][date]
except KeyError:
senescence = None
pca_overlay[identifier] = senescence
dataset.put_overlay('pca_component_2', pca_overlay)
def load_output_csv_data(results_file):
with open(results_file, 'r') as fh:
reader = csv.DictReader(fh)
all_entries = [row for row in reader]
return all_entries
@click.command()
@click.argument('dataset_uri')
@click.argument('results_csv_file')
def main(dataset_uri, results_csv_file):
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
results = load_output_csv_data(results_csv_file)
generate_pca_overlay(dataset, results)
if __name__ == '__main__':
main()
|
Add script to create overlay on individual plots for senescence# Generate overlay corresponding to 2nd PCA component
# which serves as a proxy for senescence
import csv
from collections import defaultdict
import dtoolcore
import click
import numpy as np
def calc_pca_components(all_entries):
rgb_matrix = np.transpose(np.array(
[
map(float, [entry['R'], entry['G'], entry['B']])
for entry in all_entries
]
))
cov = np.cov(rgb_matrix)
evalues, evectors = np.linalg.eig(cov)
return evectors.T
def calc_senescence(entry, pca_rotation):
c_R = pca_rotation[0] * float(entry['R'])
c_G = pca_rotation[1] * float(entry['G'])
c_B = pca_rotation[2] * float(entry['B'])
return c_R + c_G + c_B
def find_senescence_values_by_plot_and_date(results):
pca_components = calc_pca_components(results)
pca_component_2 = pca_components[1]
by_plot_then_date = defaultdict(dict)
for entry in results:
senescence = calc_senescence(entry, pca_component_2)
by_plot_then_date[entry['plot']][entry['date']] = senescence
return by_plot_then_date
def generate_pca_overlay(dataset, results):
senescence_values = find_senescence_values_by_plot_and_date(results)
plot_number_overlay = dataset.get_overlay('plot_number')
ordering_overlay = dataset.get_overlay('ordering')
date_overlay = dataset.get_overlay('date')
pca_overlay = {}
for identifier in dataset.identifiers:
label = "{}_{}".format(
plot_number_overlay[identifier],
ordering_overlay[identifier]
)
date = date_overlay[identifier]
try:
senescence = senescence_values[label][date]
except KeyError:
senescence = None
pca_overlay[identifier] = senescence
dataset.put_overlay('pca_component_2', pca_overlay)
def load_output_csv_data(results_file):
with open(results_file, 'r') as fh:
reader = csv.DictReader(fh)
all_entries = [row for row in reader]
return all_entries
@click.command()
@click.argument('dataset_uri')
@click.argument('results_csv_file')
def main(dataset_uri, results_csv_file):
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
results = load_output_csv_data(results_csv_file)
generate_pca_overlay(dataset, results)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to create overlay on individual plots for senescence<commit_after># Generate overlay corresponding to 2nd PCA component
# which serves as a proxy for senescence
import csv
from collections import defaultdict
import dtoolcore
import click
import numpy as np
def calc_pca_components(all_entries):
rgb_matrix = np.transpose(np.array(
[
map(float, [entry['R'], entry['G'], entry['B']])
for entry in all_entries
]
))
cov = np.cov(rgb_matrix)
evalues, evectors = np.linalg.eig(cov)
return evectors.T
def calc_senescence(entry, pca_rotation):
c_R = pca_rotation[0] * float(entry['R'])
c_G = pca_rotation[1] * float(entry['G'])
c_B = pca_rotation[2] * float(entry['B'])
return c_R + c_G + c_B
def find_senescence_values_by_plot_and_date(results):
pca_components = calc_pca_components(results)
pca_component_2 = pca_components[1]
by_plot_then_date = defaultdict(dict)
for entry in results:
senescence = calc_senescence(entry, pca_component_2)
by_plot_then_date[entry['plot']][entry['date']] = senescence
return by_plot_then_date
def generate_pca_overlay(dataset, results):
senescence_values = find_senescence_values_by_plot_and_date(results)
plot_number_overlay = dataset.get_overlay('plot_number')
ordering_overlay = dataset.get_overlay('ordering')
date_overlay = dataset.get_overlay('date')
pca_overlay = {}
for identifier in dataset.identifiers:
label = "{}_{}".format(
plot_number_overlay[identifier],
ordering_overlay[identifier]
)
date = date_overlay[identifier]
try:
senescence = senescence_values[label][date]
except KeyError:
senescence = None
pca_overlay[identifier] = senescence
dataset.put_overlay('pca_component_2', pca_overlay)
def load_output_csv_data(results_file):
with open(results_file, 'r') as fh:
reader = csv.DictReader(fh)
all_entries = [row for row in reader]
return all_entries
@click.command()
@click.argument('dataset_uri')
@click.argument('results_csv_file')
def main(dataset_uri, results_csv_file):
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
results = load_output_csv_data(results_csv_file)
generate_pca_overlay(dataset, results)
if __name__ == '__main__':
main()
|
|
4c0273b38437302526457c90a142efd465d8addd
|
tests/algebra/test_abstract_quantum_algebra.py
|
tests/algebra/test_abstract_quantum_algebra.py
|
from qnet import (
One, Zero, ZeroOperator, IdentityOperator, ZeroSuperOperator,
IdentitySuperOperator, ZeroKet, TrivialKet, FullSpace, TrivialSpace,
CIdentity, CircuitZero)
def test_neutral_elements():
"""test the properties of the neutral elements in the quantum algebras.
This tests the resolution of #63
*Only* the scalar neutral elements compare to and hash as 0 and 1. The
neutral elements of all other algebras are "unique" and don't compare to 0
and 1. Elements of a quantum algebra have an is_zero attribute
"""
assert One == 1
assert One is not 1
assert hash(One) == hash(1)
assert Zero == 0
assert Zero is not 0
assert hash(Zero) == hash(0)
assert Zero.is_zero
assert IdentityOperator != 1
assert hash(IdentityOperator) != hash(1)
assert ZeroOperator != 0
assert hash(ZeroOperator) != hash(0)
assert ZeroOperator.is_zero
assert IdentitySuperOperator != 1
assert hash(IdentitySuperOperator) != hash(1)
assert ZeroSuperOperator != 0
assert hash(ZeroSuperOperator) != hash(0)
assert ZeroSuperOperator.is_zero
assert TrivialKet != 1
assert hash(TrivialKet) != hash(1)
assert ZeroKet != 0
assert hash(ZeroKet) != hash(0)
assert ZeroKet.is_zero
# the remainder are not quantum algebra elements, to they don't have
# is_zero
assert FullSpace != 1
assert hash(FullSpace) != hash(1)
assert TrivialSpace != 0
assert hash(TrivialSpace) != hash(0)
assert CIdentity != 1
assert hash(CIdentity) != hash(1)
assert CircuitZero != 0
assert hash(CircuitZero) != hash(0)
|
Test equality and hashing of neutral elements
|
Test equality and hashing of neutral elements
With the implementation of the scalar algebra it turns out that the way
to go is to have only the scalar Zero and One equal to 0 and 1. In every
other algebra, the neutral elements have no relation to the scalar 0 and
1, or to the neutral elements of other algebras.
Hoever, elements of "quantum" algebras (anything that has a Hilbert
space, including in fact the scalar algebra on the TrivialSpace) have an
is_zero attribute to compare to the abstract zero.
This closes #63
|
Python
|
mit
|
mabuchilab/QNET
|
Test equality and hashing of neutral elements
With the implementation of the scalar algebra it turns out that the way
to go is to have only the scalar Zero and One equal to 0 and 1. In every
other algebra, the neutral elements have no relation to the scalar 0 and
1, or to the neutral elements of other algebras.
Hoever, elements of "quantum" algebras (anything that has a Hilbert
space, including in fact the scalar algebra on the TrivialSpace) have an
is_zero attribute to compare to the abstract zero.
This closes #63
|
from qnet import (
One, Zero, ZeroOperator, IdentityOperator, ZeroSuperOperator,
IdentitySuperOperator, ZeroKet, TrivialKet, FullSpace, TrivialSpace,
CIdentity, CircuitZero)
def test_neutral_elements():
"""test the properties of the neutral elements in the quantum algebras.
This tests the resolution of #63
*Only* the scalar neutral elements compare to and hash as 0 and 1. The
neutral elements of all other algebras are "unique" and don't compare to 0
and 1. Elements of a quantum algebra have an is_zero attribute
"""
assert One == 1
assert One is not 1
assert hash(One) == hash(1)
assert Zero == 0
assert Zero is not 0
assert hash(Zero) == hash(0)
assert Zero.is_zero
assert IdentityOperator != 1
assert hash(IdentityOperator) != hash(1)
assert ZeroOperator != 0
assert hash(ZeroOperator) != hash(0)
assert ZeroOperator.is_zero
assert IdentitySuperOperator != 1
assert hash(IdentitySuperOperator) != hash(1)
assert ZeroSuperOperator != 0
assert hash(ZeroSuperOperator) != hash(0)
assert ZeroSuperOperator.is_zero
assert TrivialKet != 1
assert hash(TrivialKet) != hash(1)
assert ZeroKet != 0
assert hash(ZeroKet) != hash(0)
assert ZeroKet.is_zero
# the remainder are not quantum algebra elements, to they don't have
# is_zero
assert FullSpace != 1
assert hash(FullSpace) != hash(1)
assert TrivialSpace != 0
assert hash(TrivialSpace) != hash(0)
assert CIdentity != 1
assert hash(CIdentity) != hash(1)
assert CircuitZero != 0
assert hash(CircuitZero) != hash(0)
|
<commit_before><commit_msg>Test equality and hashing of neutral elements
With the implementation of the scalar algebra it turns out that the way
to go is to have only the scalar Zero and One equal to 0 and 1. In every
other algebra, the neutral elements have no relation to the scalar 0 and
1, or to the neutral elements of other algebras.
Hoever, elements of "quantum" algebras (anything that has a Hilbert
space, including in fact the scalar algebra on the TrivialSpace) have an
is_zero attribute to compare to the abstract zero.
This closes #63<commit_after>
|
from qnet import (
One, Zero, ZeroOperator, IdentityOperator, ZeroSuperOperator,
IdentitySuperOperator, ZeroKet, TrivialKet, FullSpace, TrivialSpace,
CIdentity, CircuitZero)
def test_neutral_elements():
"""test the properties of the neutral elements in the quantum algebras.
This tests the resolution of #63
*Only* the scalar neutral elements compare to and hash as 0 and 1. The
neutral elements of all other algebras are "unique" and don't compare to 0
and 1. Elements of a quantum algebra have an is_zero attribute
"""
assert One == 1
assert One is not 1
assert hash(One) == hash(1)
assert Zero == 0
assert Zero is not 0
assert hash(Zero) == hash(0)
assert Zero.is_zero
assert IdentityOperator != 1
assert hash(IdentityOperator) != hash(1)
assert ZeroOperator != 0
assert hash(ZeroOperator) != hash(0)
assert ZeroOperator.is_zero
assert IdentitySuperOperator != 1
assert hash(IdentitySuperOperator) != hash(1)
assert ZeroSuperOperator != 0
assert hash(ZeroSuperOperator) != hash(0)
assert ZeroSuperOperator.is_zero
assert TrivialKet != 1
assert hash(TrivialKet) != hash(1)
assert ZeroKet != 0
assert hash(ZeroKet) != hash(0)
assert ZeroKet.is_zero
# the remainder are not quantum algebra elements, to they don't have
# is_zero
assert FullSpace != 1
assert hash(FullSpace) != hash(1)
assert TrivialSpace != 0
assert hash(TrivialSpace) != hash(0)
assert CIdentity != 1
assert hash(CIdentity) != hash(1)
assert CircuitZero != 0
assert hash(CircuitZero) != hash(0)
|
Test equality and hashing of neutral elements
With the implementation of the scalar algebra it turns out that the way
to go is to have only the scalar Zero and One equal to 0 and 1. In every
other algebra, the neutral elements have no relation to the scalar 0 and
1, or to the neutral elements of other algebras.
Hoever, elements of "quantum" algebras (anything that has a Hilbert
space, including in fact the scalar algebra on the TrivialSpace) have an
is_zero attribute to compare to the abstract zero.
This closes #63from qnet import (
One, Zero, ZeroOperator, IdentityOperator, ZeroSuperOperator,
IdentitySuperOperator, ZeroKet, TrivialKet, FullSpace, TrivialSpace,
CIdentity, CircuitZero)
def test_neutral_elements():
"""test the properties of the neutral elements in the quantum algebras.
This tests the resolution of #63
*Only* the scalar neutral elements compare to and hash as 0 and 1. The
neutral elements of all other algebras are "unique" and don't compare to 0
and 1. Elements of a quantum algebra have an is_zero attribute
"""
assert One == 1
assert One is not 1
assert hash(One) == hash(1)
assert Zero == 0
assert Zero is not 0
assert hash(Zero) == hash(0)
assert Zero.is_zero
assert IdentityOperator != 1
assert hash(IdentityOperator) != hash(1)
assert ZeroOperator != 0
assert hash(ZeroOperator) != hash(0)
assert ZeroOperator.is_zero
assert IdentitySuperOperator != 1
assert hash(IdentitySuperOperator) != hash(1)
assert ZeroSuperOperator != 0
assert hash(ZeroSuperOperator) != hash(0)
assert ZeroSuperOperator.is_zero
assert TrivialKet != 1
assert hash(TrivialKet) != hash(1)
assert ZeroKet != 0
assert hash(ZeroKet) != hash(0)
assert ZeroKet.is_zero
# the remainder are not quantum algebra elements, to they don't have
# is_zero
assert FullSpace != 1
assert hash(FullSpace) != hash(1)
assert TrivialSpace != 0
assert hash(TrivialSpace) != hash(0)
assert CIdentity != 1
assert hash(CIdentity) != hash(1)
assert CircuitZero != 0
assert hash(CircuitZero) != hash(0)
|
<commit_before><commit_msg>Test equality and hashing of neutral elements
With the implementation of the scalar algebra it turns out that the way
to go is to have only the scalar Zero and One equal to 0 and 1. In every
other algebra, the neutral elements have no relation to the scalar 0 and
1, or to the neutral elements of other algebras.
Hoever, elements of "quantum" algebras (anything that has a Hilbert
space, including in fact the scalar algebra on the TrivialSpace) have an
is_zero attribute to compare to the abstract zero.
This closes #63<commit_after>from qnet import (
One, Zero, ZeroOperator, IdentityOperator, ZeroSuperOperator,
IdentitySuperOperator, ZeroKet, TrivialKet, FullSpace, TrivialSpace,
CIdentity, CircuitZero)
def test_neutral_elements():
"""test the properties of the neutral elements in the quantum algebras.
This tests the resolution of #63
*Only* the scalar neutral elements compare to and hash as 0 and 1. The
neutral elements of all other algebras are "unique" and don't compare to 0
and 1. Elements of a quantum algebra have an is_zero attribute
"""
assert One == 1
assert One is not 1
assert hash(One) == hash(1)
assert Zero == 0
assert Zero is not 0
assert hash(Zero) == hash(0)
assert Zero.is_zero
assert IdentityOperator != 1
assert hash(IdentityOperator) != hash(1)
assert ZeroOperator != 0
assert hash(ZeroOperator) != hash(0)
assert ZeroOperator.is_zero
assert IdentitySuperOperator != 1
assert hash(IdentitySuperOperator) != hash(1)
assert ZeroSuperOperator != 0
assert hash(ZeroSuperOperator) != hash(0)
assert ZeroSuperOperator.is_zero
assert TrivialKet != 1
assert hash(TrivialKet) != hash(1)
assert ZeroKet != 0
assert hash(ZeroKet) != hash(0)
assert ZeroKet.is_zero
# the remainder are not quantum algebra elements, to they don't have
# is_zero
assert FullSpace != 1
assert hash(FullSpace) != hash(1)
assert TrivialSpace != 0
assert hash(TrivialSpace) != hash(0)
assert CIdentity != 1
assert hash(CIdentity) != hash(1)
assert CircuitZero != 0
assert hash(CircuitZero) != hash(0)
|
|
768f98a2b873833b5029f587c869a39697e7683f
|
plenum/test/requests/test_send_audit_txn.py
|
plenum/test/requests/test_send_audit_txn.py
|
import json
import time
import pytest
from plenum.test.helper import sdk_get_and_check_replies
from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request
from plenum.common.exceptions import RequestNackedException
from plenum.common.constants import TXN_TYPE, AUDIT, CURRENT_PROTOCOL_VERSION
from plenum.common.types import OPERATION, f
def test_send_audit_txn(looper, sdk_wallet_client, sdk_pool_handle):
req = {
OPERATION: {
TXN_TYPE: AUDIT,
'data': 'data1'
},
f.IDENTIFIER.nm: sdk_wallet_client[1],
f.REQ_ID.nm: int(time.time()),
f.PROTOCOL_VERSION.nm: CURRENT_PROTOCOL_VERSION
}
rep = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, sdk_pool_handle, json.dumps(req))
with pytest.raises(RequestNackedException) as e:
sdk_get_and_check_replies(looper, [rep])
e.match('External audit requests are not allowed')
|
Test for audit txn sending
|
Test for audit txn sending
Signed-off-by: ArtObr <24870ba6726087be7be3af8a7040487294fd73a9@gmail.com>
|
Python
|
apache-2.0
|
evernym/plenum,evernym/zeno
|
Test for audit txn sending
Signed-off-by: ArtObr <24870ba6726087be7be3af8a7040487294fd73a9@gmail.com>
|
import json
import time
import pytest
from plenum.test.helper import sdk_get_and_check_replies
from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request
from plenum.common.exceptions import RequestNackedException
from plenum.common.constants import TXN_TYPE, AUDIT, CURRENT_PROTOCOL_VERSION
from plenum.common.types import OPERATION, f
def test_send_audit_txn(looper, sdk_wallet_client, sdk_pool_handle):
req = {
OPERATION: {
TXN_TYPE: AUDIT,
'data': 'data1'
},
f.IDENTIFIER.nm: sdk_wallet_client[1],
f.REQ_ID.nm: int(time.time()),
f.PROTOCOL_VERSION.nm: CURRENT_PROTOCOL_VERSION
}
rep = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, sdk_pool_handle, json.dumps(req))
with pytest.raises(RequestNackedException) as e:
sdk_get_and_check_replies(looper, [rep])
e.match('External audit requests are not allowed')
|
<commit_before><commit_msg>Test for audit txn sending
Signed-off-by: ArtObr <24870ba6726087be7be3af8a7040487294fd73a9@gmail.com><commit_after>
|
import json
import time
import pytest
from plenum.test.helper import sdk_get_and_check_replies
from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request
from plenum.common.exceptions import RequestNackedException
from plenum.common.constants import TXN_TYPE, AUDIT, CURRENT_PROTOCOL_VERSION
from plenum.common.types import OPERATION, f
def test_send_audit_txn(looper, sdk_wallet_client, sdk_pool_handle):
req = {
OPERATION: {
TXN_TYPE: AUDIT,
'data': 'data1'
},
f.IDENTIFIER.nm: sdk_wallet_client[1],
f.REQ_ID.nm: int(time.time()),
f.PROTOCOL_VERSION.nm: CURRENT_PROTOCOL_VERSION
}
rep = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, sdk_pool_handle, json.dumps(req))
with pytest.raises(RequestNackedException) as e:
sdk_get_and_check_replies(looper, [rep])
e.match('External audit requests are not allowed')
|
Test for audit txn sending
Signed-off-by: ArtObr <24870ba6726087be7be3af8a7040487294fd73a9@gmail.com>import json
import time
import pytest
from plenum.test.helper import sdk_get_and_check_replies
from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request
from plenum.common.exceptions import RequestNackedException
from plenum.common.constants import TXN_TYPE, AUDIT, CURRENT_PROTOCOL_VERSION
from plenum.common.types import OPERATION, f
def test_send_audit_txn(looper, sdk_wallet_client, sdk_pool_handle):
req = {
OPERATION: {
TXN_TYPE: AUDIT,
'data': 'data1'
},
f.IDENTIFIER.nm: sdk_wallet_client[1],
f.REQ_ID.nm: int(time.time()),
f.PROTOCOL_VERSION.nm: CURRENT_PROTOCOL_VERSION
}
rep = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, sdk_pool_handle, json.dumps(req))
with pytest.raises(RequestNackedException) as e:
sdk_get_and_check_replies(looper, [rep])
e.match('External audit requests are not allowed')
|
<commit_before><commit_msg>Test for audit txn sending
Signed-off-by: ArtObr <24870ba6726087be7be3af8a7040487294fd73a9@gmail.com><commit_after>import json
import time
import pytest
from plenum.test.helper import sdk_get_and_check_replies
from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request
from plenum.common.exceptions import RequestNackedException
from plenum.common.constants import TXN_TYPE, AUDIT, CURRENT_PROTOCOL_VERSION
from plenum.common.types import OPERATION, f
def test_send_audit_txn(looper, sdk_wallet_client, sdk_pool_handle):
req = {
OPERATION: {
TXN_TYPE: AUDIT,
'data': 'data1'
},
f.IDENTIFIER.nm: sdk_wallet_client[1],
f.REQ_ID.nm: int(time.time()),
f.PROTOCOL_VERSION.nm: CURRENT_PROTOCOL_VERSION
}
rep = sdk_sign_and_send_prepared_request(looper, sdk_wallet_client, sdk_pool_handle, json.dumps(req))
with pytest.raises(RequestNackedException) as e:
sdk_get_and_check_replies(looper, [rep])
e.match('External audit requests are not allowed')
|
|
3e5193f6dee511a8fd082da7e58705d4c825e079
|
utilities/data_migration/sms_import/sms-recovery.py
|
utilities/data_migration/sms_import/sms-recovery.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
sms-recovery.py
Created by Brian DeRenzi on 2010-04-27.
Copyright (c) 2010 __MyCompanyName__. All rights reserved.
"""
import sys
import os
import MySQLdb
from datetime import datetime, timedelta
DB_HOST = "localhost"
DB_USER = "changeme"
DB_PASSWORD = "changeme"
DB_NAME = "changeme"
INSERT = "insert into logger_message set connection_id='%s', is_incoming='1', text='%s', date='%s'"
def german_to_est_time(input_string):
format_string = "%Y-%m-%d %H:%M:%S"
german_date = datetime.strptime(input_string, format_string)
delta = timedelta(hours=6)
est_date = german_date - delta
output_string = est_date.strftime(format_string)
print "%s to %s" % (input_string, output_string)
return output_string
def main():
# connect to DB
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)
cursor = db.cursor()
counter = 0
error_count = 0
fin = open("sms-logs.txt", 'r')
for line in fin:
parts = line.partition(":")
values = parts[2].split("|")
# hardcode to ignore one we don't care about. this is a one
# time script, it's ok
if values[3] == '123':
continue
# values are in the format:
# timestamp, 0(?), dest#, from, message\n
message = values[4].strip()
date = german_to_est_time(values[0])
print "Adding message '%s' to db" % message
try:
sql = "select id from reporters_persistantconnection \
where identity='%s'" % values[3]
cursor.execute(sql)
results = cursor.fetchall()
conn_id = results[0][0] # first row, first column
sql = INSERT % (conn_id, message, date)
# print " sql: %s" % sql
cursor.execute(sql)
counter = counter + 1
except Exception, e:
print " ERROR adding record '%s' to db.\n %s" % (message, unicode(e))
error_count = error_count + 1
print "SUMMARY"
print "%s of 207 incoming messages added" % counter
print "%s errors logged" % error_count
if __name__ == '__main__':
main()
|
Add brian's sms import scripts + test data
|
Add brian's sms import scripts + test data
|
Python
|
bsd-3-clause
|
puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq,gmimano/commcaretest,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,gmimano/commcaretest,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,gmimano/commcaretest
|
Add brian's sms import scripts + test data
|
#!/usr/bin/env python
# encoding: utf-8
"""
sms-recovery.py
Created by Brian DeRenzi on 2010-04-27.
Copyright (c) 2010 __MyCompanyName__. All rights reserved.
"""
import sys
import os
import MySQLdb
from datetime import datetime, timedelta
DB_HOST = "localhost"
DB_USER = "changeme"
DB_PASSWORD = "changeme"
DB_NAME = "changeme"
INSERT = "insert into logger_message set connection_id='%s', is_incoming='1', text='%s', date='%s'"
def german_to_est_time(input_string):
format_string = "%Y-%m-%d %H:%M:%S"
german_date = datetime.strptime(input_string, format_string)
delta = timedelta(hours=6)
est_date = german_date - delta
output_string = est_date.strftime(format_string)
print "%s to %s" % (input_string, output_string)
return output_string
def main():
# connect to DB
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)
cursor = db.cursor()
counter = 0
error_count = 0
fin = open("sms-logs.txt", 'r')
for line in fin:
parts = line.partition(":")
values = parts[2].split("|")
# hardcode to ignore one we don't care about. this is a one
# time script, it's ok
if values[3] == '123':
continue
# values are in the format:
# timestamp, 0(?), dest#, from, message\n
message = values[4].strip()
date = german_to_est_time(values[0])
print "Adding message '%s' to db" % message
try:
sql = "select id from reporters_persistantconnection \
where identity='%s'" % values[3]
cursor.execute(sql)
results = cursor.fetchall()
conn_id = results[0][0] # first row, first column
sql = INSERT % (conn_id, message, date)
# print " sql: %s" % sql
cursor.execute(sql)
counter = counter + 1
except Exception, e:
print " ERROR adding record '%s' to db.\n %s" % (message, unicode(e))
error_count = error_count + 1
print "SUMMARY"
print "%s of 207 incoming messages added" % counter
print "%s errors logged" % error_count
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add brian's sms import scripts + test data<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
"""
sms-recovery.py
Created by Brian DeRenzi on 2010-04-27.
Copyright (c) 2010 __MyCompanyName__. All rights reserved.
"""
import sys
import os
import MySQLdb
from datetime import datetime, timedelta
DB_HOST = "localhost"
DB_USER = "changeme"
DB_PASSWORD = "changeme"
DB_NAME = "changeme"
INSERT = "insert into logger_message set connection_id='%s', is_incoming='1', text='%s', date='%s'"
def german_to_est_time(input_string):
format_string = "%Y-%m-%d %H:%M:%S"
german_date = datetime.strptime(input_string, format_string)
delta = timedelta(hours=6)
est_date = german_date - delta
output_string = est_date.strftime(format_string)
print "%s to %s" % (input_string, output_string)
return output_string
def main():
# connect to DB
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)
cursor = db.cursor()
counter = 0
error_count = 0
fin = open("sms-logs.txt", 'r')
for line in fin:
parts = line.partition(":")
values = parts[2].split("|")
# hardcode to ignore one we don't care about. this is a one
# time script, it's ok
if values[3] == '123':
continue
# values are in the format:
# timestamp, 0(?), dest#, from, message\n
message = values[4].strip()
date = german_to_est_time(values[0])
print "Adding message '%s' to db" % message
try:
sql = "select id from reporters_persistantconnection \
where identity='%s'" % values[3]
cursor.execute(sql)
results = cursor.fetchall()
conn_id = results[0][0] # first row, first column
sql = INSERT % (conn_id, message, date)
# print " sql: %s" % sql
cursor.execute(sql)
counter = counter + 1
except Exception, e:
print " ERROR adding record '%s' to db.\n %s" % (message, unicode(e))
error_count = error_count + 1
print "SUMMARY"
print "%s of 207 incoming messages added" % counter
print "%s errors logged" % error_count
if __name__ == '__main__':
main()
|
Add brian's sms import scripts + test data#!/usr/bin/env python
# encoding: utf-8
"""
sms-recovery.py
Created by Brian DeRenzi on 2010-04-27.
Copyright (c) 2010 __MyCompanyName__. All rights reserved.
"""
import sys
import os
import MySQLdb
from datetime import datetime, timedelta
DB_HOST = "localhost"
DB_USER = "changeme"
DB_PASSWORD = "changeme"
DB_NAME = "changeme"
INSERT = "insert into logger_message set connection_id='%s', is_incoming='1', text='%s', date='%s'"
def german_to_est_time(input_string):
format_string = "%Y-%m-%d %H:%M:%S"
german_date = datetime.strptime(input_string, format_string)
delta = timedelta(hours=6)
est_date = german_date - delta
output_string = est_date.strftime(format_string)
print "%s to %s" % (input_string, output_string)
return output_string
def main():
# connect to DB
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)
cursor = db.cursor()
counter = 0
error_count = 0
fin = open("sms-logs.txt", 'r')
for line in fin:
parts = line.partition(":")
values = parts[2].split("|")
# hardcode to ignore one we don't care about. this is a one
# time script, it's ok
if values[3] == '123':
continue
# values are in the format:
# timestamp, 0(?), dest#, from, message\n
message = values[4].strip()
date = german_to_est_time(values[0])
print "Adding message '%s' to db" % message
try:
sql = "select id from reporters_persistantconnection \
where identity='%s'" % values[3]
cursor.execute(sql)
results = cursor.fetchall()
conn_id = results[0][0] # first row, first column
sql = INSERT % (conn_id, message, date)
# print " sql: %s" % sql
cursor.execute(sql)
counter = counter + 1
except Exception, e:
print " ERROR adding record '%s' to db.\n %s" % (message, unicode(e))
error_count = error_count + 1
print "SUMMARY"
print "%s of 207 incoming messages added" % counter
print "%s errors logged" % error_count
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add brian's sms import scripts + test data<commit_after>#!/usr/bin/env python
# encoding: utf-8
"""
sms-recovery.py
Created by Brian DeRenzi on 2010-04-27.
Copyright (c) 2010 __MyCompanyName__. All rights reserved.
"""
import sys
import os
import MySQLdb
from datetime import datetime, timedelta
DB_HOST = "localhost"
DB_USER = "changeme"
DB_PASSWORD = "changeme"
DB_NAME = "changeme"
INSERT = "insert into logger_message set connection_id='%s', is_incoming='1', text='%s', date='%s'"
def german_to_est_time(input_string):
format_string = "%Y-%m-%d %H:%M:%S"
german_date = datetime.strptime(input_string, format_string)
delta = timedelta(hours=6)
est_date = german_date - delta
output_string = est_date.strftime(format_string)
print "%s to %s" % (input_string, output_string)
return output_string
def main():
# connect to DB
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)
cursor = db.cursor()
counter = 0
error_count = 0
fin = open("sms-logs.txt", 'r')
for line in fin:
parts = line.partition(":")
values = parts[2].split("|")
# hardcode to ignore one we don't care about. this is a one
# time script, it's ok
if values[3] == '123':
continue
# values are in the format:
# timestamp, 0(?), dest#, from, message\n
message = values[4].strip()
date = german_to_est_time(values[0])
print "Adding message '%s' to db" % message
try:
sql = "select id from reporters_persistantconnection \
where identity='%s'" % values[3]
cursor.execute(sql)
results = cursor.fetchall()
conn_id = results[0][0] # first row, first column
sql = INSERT % (conn_id, message, date)
# print " sql: %s" % sql
cursor.execute(sql)
counter = counter + 1
except Exception, e:
print " ERROR adding record '%s' to db.\n %s" % (message, unicode(e))
error_count = error_count + 1
print "SUMMARY"
print "%s of 207 incoming messages added" % counter
print "%s errors logged" % error_count
if __name__ == '__main__':
main()
|
|
5974e5a1518e26ffd1c0d77d8ca1ba1427319567
|
tests/integration/customer/test_dispatcher.py
|
tests/integration/customer/test_dispatcher.py
|
from django.test import TestCase
from django.core import mail
from oscar.core.compat import get_user_model
from oscar.apps.customer.utils import Dispatcher
from oscar.apps.customer.models import CommunicationEventType
from oscar.test.factories import create_order
User = get_user_model()
class TestDispatcher(TestCase):
def test_sending_a_order_related_messages(self):
email = 'testuser@example.com'
user = User.objects.create_user('testuser', email,
'somesimplepassword')
order_number = '12345'
order = create_order(number=order_number, user=user)
et = CommunicationEventType.objects.create(code="ORDER_PLACED",
name="Order Placed",
category="Order related")
messages = et.get_messages({
'order': order,
'lines': order.lines.all()
})
self.assertIn(order_number, messages['body'])
self.assertIn(order_number, messages['html'])
dispatcher = Dispatcher()
dispatcher.dispatch_order_messages(order, messages, et)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn(order_number, message.body)
# test sending messages to emails without account and text body
messages['body'] = None
dispatcher.dispatch_direct_messages(email, messages)
self.assertEqual(len(mail.outbox), 2)
|
from django.test import TestCase
from django.core import mail
from oscar.core.compat import get_user_model
from oscar.apps.customer.utils import Dispatcher
from oscar.apps.customer.models import CommunicationEventType
from oscar.test.factories import create_order
User = get_user_model()
class TestDispatcher(TestCase):
def test_sending_a_order_related_messages(self):
email = 'testuser@example.com'
user = User.objects.create_user('testuser', email,
'somesimplepassword')
order_number = '12345'
order = create_order(number=order_number, user=user)
et = CommunicationEventType.objects.create(code="ORDER_PLACED",
name="Order Placed",
category="Order related")
messages = et.get_messages({
'order': order,
'lines': order.lines.all()
})
self.assertIn(order_number, messages['body'])
self.assertIn(order_number, messages['html'])
dispatcher = Dispatcher()
dispatcher.dispatch_order_messages(order, messages, et)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn(order_number, message.body)
# test sending messages to emails without account and text body
messages['body'] = ''
dispatcher.dispatch_direct_messages(email, messages)
self.assertEqual(len(mail.outbox), 2)
|
Use empty message instead None.
|
Use empty message instead None.
|
Python
|
bsd-3-clause
|
solarissmoke/django-oscar,sasha0/django-oscar,sonofatailor/django-oscar,django-oscar/django-oscar,solarissmoke/django-oscar,sonofatailor/django-oscar,django-oscar/django-oscar,solarissmoke/django-oscar,okfish/django-oscar,okfish/django-oscar,sonofatailor/django-oscar,okfish/django-oscar,sasha0/django-oscar,sonofatailor/django-oscar,django-oscar/django-oscar,okfish/django-oscar,sasha0/django-oscar,solarissmoke/django-oscar,django-oscar/django-oscar,sasha0/django-oscar
|
from django.test import TestCase
from django.core import mail
from oscar.core.compat import get_user_model
from oscar.apps.customer.utils import Dispatcher
from oscar.apps.customer.models import CommunicationEventType
from oscar.test.factories import create_order
User = get_user_model()
class TestDispatcher(TestCase):
def test_sending_a_order_related_messages(self):
email = 'testuser@example.com'
user = User.objects.create_user('testuser', email,
'somesimplepassword')
order_number = '12345'
order = create_order(number=order_number, user=user)
et = CommunicationEventType.objects.create(code="ORDER_PLACED",
name="Order Placed",
category="Order related")
messages = et.get_messages({
'order': order,
'lines': order.lines.all()
})
self.assertIn(order_number, messages['body'])
self.assertIn(order_number, messages['html'])
dispatcher = Dispatcher()
dispatcher.dispatch_order_messages(order, messages, et)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn(order_number, message.body)
# test sending messages to emails without account and text body
messages['body'] = None
dispatcher.dispatch_direct_messages(email, messages)
self.assertEqual(len(mail.outbox), 2)
Use empty message instead None.
|
from django.test import TestCase
from django.core import mail
from oscar.core.compat import get_user_model
from oscar.apps.customer.utils import Dispatcher
from oscar.apps.customer.models import CommunicationEventType
from oscar.test.factories import create_order
User = get_user_model()
class TestDispatcher(TestCase):
def test_sending_a_order_related_messages(self):
email = 'testuser@example.com'
user = User.objects.create_user('testuser', email,
'somesimplepassword')
order_number = '12345'
order = create_order(number=order_number, user=user)
et = CommunicationEventType.objects.create(code="ORDER_PLACED",
name="Order Placed",
category="Order related")
messages = et.get_messages({
'order': order,
'lines': order.lines.all()
})
self.assertIn(order_number, messages['body'])
self.assertIn(order_number, messages['html'])
dispatcher = Dispatcher()
dispatcher.dispatch_order_messages(order, messages, et)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn(order_number, message.body)
# test sending messages to emails without account and text body
messages['body'] = ''
dispatcher.dispatch_direct_messages(email, messages)
self.assertEqual(len(mail.outbox), 2)
|
<commit_before>from django.test import TestCase
from django.core import mail
from oscar.core.compat import get_user_model
from oscar.apps.customer.utils import Dispatcher
from oscar.apps.customer.models import CommunicationEventType
from oscar.test.factories import create_order
User = get_user_model()
class TestDispatcher(TestCase):
def test_sending_a_order_related_messages(self):
email = 'testuser@example.com'
user = User.objects.create_user('testuser', email,
'somesimplepassword')
order_number = '12345'
order = create_order(number=order_number, user=user)
et = CommunicationEventType.objects.create(code="ORDER_PLACED",
name="Order Placed",
category="Order related")
messages = et.get_messages({
'order': order,
'lines': order.lines.all()
})
self.assertIn(order_number, messages['body'])
self.assertIn(order_number, messages['html'])
dispatcher = Dispatcher()
dispatcher.dispatch_order_messages(order, messages, et)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn(order_number, message.body)
# test sending messages to emails without account and text body
messages['body'] = None
dispatcher.dispatch_direct_messages(email, messages)
self.assertEqual(len(mail.outbox), 2)
<commit_msg>Use empty message instead None.<commit_after>
|
from django.test import TestCase
from django.core import mail
from oscar.core.compat import get_user_model
from oscar.apps.customer.utils import Dispatcher
from oscar.apps.customer.models import CommunicationEventType
from oscar.test.factories import create_order
User = get_user_model()
class TestDispatcher(TestCase):
def test_sending_a_order_related_messages(self):
email = 'testuser@example.com'
user = User.objects.create_user('testuser', email,
'somesimplepassword')
order_number = '12345'
order = create_order(number=order_number, user=user)
et = CommunicationEventType.objects.create(code="ORDER_PLACED",
name="Order Placed",
category="Order related")
messages = et.get_messages({
'order': order,
'lines': order.lines.all()
})
self.assertIn(order_number, messages['body'])
self.assertIn(order_number, messages['html'])
dispatcher = Dispatcher()
dispatcher.dispatch_order_messages(order, messages, et)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn(order_number, message.body)
# test sending messages to emails without account and text body
messages['body'] = ''
dispatcher.dispatch_direct_messages(email, messages)
self.assertEqual(len(mail.outbox), 2)
|
from django.test import TestCase
from django.core import mail
from oscar.core.compat import get_user_model
from oscar.apps.customer.utils import Dispatcher
from oscar.apps.customer.models import CommunicationEventType
from oscar.test.factories import create_order
User = get_user_model()
class TestDispatcher(TestCase):
def test_sending_a_order_related_messages(self):
email = 'testuser@example.com'
user = User.objects.create_user('testuser', email,
'somesimplepassword')
order_number = '12345'
order = create_order(number=order_number, user=user)
et = CommunicationEventType.objects.create(code="ORDER_PLACED",
name="Order Placed",
category="Order related")
messages = et.get_messages({
'order': order,
'lines': order.lines.all()
})
self.assertIn(order_number, messages['body'])
self.assertIn(order_number, messages['html'])
dispatcher = Dispatcher()
dispatcher.dispatch_order_messages(order, messages, et)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn(order_number, message.body)
# test sending messages to emails without account and text body
messages['body'] = None
dispatcher.dispatch_direct_messages(email, messages)
self.assertEqual(len(mail.outbox), 2)
Use empty message instead None.from django.test import TestCase
from django.core import mail
from oscar.core.compat import get_user_model
from oscar.apps.customer.utils import Dispatcher
from oscar.apps.customer.models import CommunicationEventType
from oscar.test.factories import create_order
User = get_user_model()
class TestDispatcher(TestCase):
def test_sending_a_order_related_messages(self):
email = 'testuser@example.com'
user = User.objects.create_user('testuser', email,
'somesimplepassword')
order_number = '12345'
order = create_order(number=order_number, user=user)
et = CommunicationEventType.objects.create(code="ORDER_PLACED",
name="Order Placed",
category="Order related")
messages = et.get_messages({
'order': order,
'lines': order.lines.all()
})
self.assertIn(order_number, messages['body'])
self.assertIn(order_number, messages['html'])
dispatcher = Dispatcher()
dispatcher.dispatch_order_messages(order, messages, et)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn(order_number, message.body)
# test sending messages to emails without account and text body
messages['body'] = ''
dispatcher.dispatch_direct_messages(email, messages)
self.assertEqual(len(mail.outbox), 2)
|
<commit_before>from django.test import TestCase
from django.core import mail
from oscar.core.compat import get_user_model
from oscar.apps.customer.utils import Dispatcher
from oscar.apps.customer.models import CommunicationEventType
from oscar.test.factories import create_order
User = get_user_model()
class TestDispatcher(TestCase):
def test_sending_a_order_related_messages(self):
email = 'testuser@example.com'
user = User.objects.create_user('testuser', email,
'somesimplepassword')
order_number = '12345'
order = create_order(number=order_number, user=user)
et = CommunicationEventType.objects.create(code="ORDER_PLACED",
name="Order Placed",
category="Order related")
messages = et.get_messages({
'order': order,
'lines': order.lines.all()
})
self.assertIn(order_number, messages['body'])
self.assertIn(order_number, messages['html'])
dispatcher = Dispatcher()
dispatcher.dispatch_order_messages(order, messages, et)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn(order_number, message.body)
# test sending messages to emails without account and text body
messages['body'] = None
dispatcher.dispatch_direct_messages(email, messages)
self.assertEqual(len(mail.outbox), 2)
<commit_msg>Use empty message instead None.<commit_after>from django.test import TestCase
from django.core import mail
from oscar.core.compat import get_user_model
from oscar.apps.customer.utils import Dispatcher
from oscar.apps.customer.models import CommunicationEventType
from oscar.test.factories import create_order
User = get_user_model()
class TestDispatcher(TestCase):
def test_sending_a_order_related_messages(self):
email = 'testuser@example.com'
user = User.objects.create_user('testuser', email,
'somesimplepassword')
order_number = '12345'
order = create_order(number=order_number, user=user)
et = CommunicationEventType.objects.create(code="ORDER_PLACED",
name="Order Placed",
category="Order related")
messages = et.get_messages({
'order': order,
'lines': order.lines.all()
})
self.assertIn(order_number, messages['body'])
self.assertIn(order_number, messages['html'])
dispatcher = Dispatcher()
dispatcher.dispatch_order_messages(order, messages, et)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertIn(order_number, message.body)
# test sending messages to emails without account and text body
messages['body'] = ''
dispatcher.dispatch_direct_messages(email, messages)
self.assertEqual(len(mail.outbox), 2)
|
810aee1682f16f8697943baf622abead57c707eb
|
portal/migrations/versions/d0b40bc8d7e6_.py
|
portal/migrations/versions/d0b40bc8d7e6_.py
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: d0b40bc8d7e6
Revises: 8ffec90e68a7
Create Date: 2017-09-20 05:59:45.168324
"""
# revision identifiers, used by Alembic.
revision = 'd0b40bc8d7e6'
down_revision = '8ffec90e68a7'
def upgrade():
# Work around site_persistence fragility. Replace a couple names
# as delete and recreate on these fails due to FK constraints
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN_baseline' "
" WHERE name = 'IRONMAN baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV_baseline' "
" WHERE name = 'CRV baseline'")
def downgrade():
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN baseline' "
" WHERE name = 'IRONMAN_baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV baseline' "
" WHERE name = 'CRV_baseline'")
|
Work around site_persistence fragility. Replace a couple names as delete and recreate on these fails due to FK constraints
|
Work around site_persistence fragility. Replace a couple names
as delete and recreate on these fails due to FK constraints
|
Python
|
bsd-3-clause
|
uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal
|
Work around site_persistence fragility. Replace a couple names
as delete and recreate on these fails due to FK constraints
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: d0b40bc8d7e6
Revises: 8ffec90e68a7
Create Date: 2017-09-20 05:59:45.168324
"""
# revision identifiers, used by Alembic.
revision = 'd0b40bc8d7e6'
down_revision = '8ffec90e68a7'
def upgrade():
# Work around site_persistence fragility. Replace a couple names
# as delete and recreate on these fails due to FK constraints
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN_baseline' "
" WHERE name = 'IRONMAN baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV_baseline' "
" WHERE name = 'CRV baseline'")
def downgrade():
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN baseline' "
" WHERE name = 'IRONMAN_baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV baseline' "
" WHERE name = 'CRV_baseline'")
|
<commit_before><commit_msg>Work around site_persistence fragility. Replace a couple names
as delete and recreate on these fails due to FK constraints<commit_after>
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: d0b40bc8d7e6
Revises: 8ffec90e68a7
Create Date: 2017-09-20 05:59:45.168324
"""
# revision identifiers, used by Alembic.
revision = 'd0b40bc8d7e6'
down_revision = '8ffec90e68a7'
def upgrade():
# Work around site_persistence fragility. Replace a couple names
# as delete and recreate on these fails due to FK constraints
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN_baseline' "
" WHERE name = 'IRONMAN baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV_baseline' "
" WHERE name = 'CRV baseline'")
def downgrade():
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN baseline' "
" WHERE name = 'IRONMAN_baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV baseline' "
" WHERE name = 'CRV_baseline'")
|
Work around site_persistence fragility. Replace a couple names
as delete and recreate on these fails due to FK constraintsfrom alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: d0b40bc8d7e6
Revises: 8ffec90e68a7
Create Date: 2017-09-20 05:59:45.168324
"""
# revision identifiers, used by Alembic.
revision = 'd0b40bc8d7e6'
down_revision = '8ffec90e68a7'
def upgrade():
# Work around site_persistence fragility. Replace a couple names
# as delete and recreate on these fails due to FK constraints
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN_baseline' "
" WHERE name = 'IRONMAN baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV_baseline' "
" WHERE name = 'CRV baseline'")
def downgrade():
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN baseline' "
" WHERE name = 'IRONMAN_baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV baseline' "
" WHERE name = 'CRV_baseline'")
|
<commit_before><commit_msg>Work around site_persistence fragility. Replace a couple names
as delete and recreate on these fails due to FK constraints<commit_after>from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: d0b40bc8d7e6
Revises: 8ffec90e68a7
Create Date: 2017-09-20 05:59:45.168324
"""
# revision identifiers, used by Alembic.
revision = 'd0b40bc8d7e6'
down_revision = '8ffec90e68a7'
def upgrade():
# Work around site_persistence fragility. Replace a couple names
# as delete and recreate on these fails due to FK constraints
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN_baseline' "
" WHERE name = 'IRONMAN baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV_baseline' "
" WHERE name = 'CRV baseline'")
def downgrade():
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN baseline' "
" WHERE name = 'IRONMAN_baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV baseline' "
" WHERE name = 'CRV_baseline'")
|
|
8c2eb34d1a1f70150b3f3e7c9bc7255e5178bda6
|
accounts/migrations/0003_migrate_api_keys.py
|
accounts/migrations/0003_migrate_api_keys.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def migrate_keys(apps, schema_editor):
Token = apps.get_model("authtoken", "Token")
ApiKey = apps.get_model("tastypie", "ApiKey")
for key in ApiKey.objects.all():
Token.objects.create(
user=key.user,
key=key.key,
created=key.created
)
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150717_2226'),
('tastypie', '0001_initial'),
('authtoken', '0001_initial'),
]
operations = [
migrations.RunPython(migrate_keys),
]
|
Write migration for API keys
|
Write migration for API keys
|
Python
|
agpl-3.0
|
lutris/website,Turupawn/website,lutris/website,Turupawn/website,Turupawn/website,lutris/website,Turupawn/website,lutris/website
|
Write migration for API keys
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def migrate_keys(apps, schema_editor):
Token = apps.get_model("authtoken", "Token")
ApiKey = apps.get_model("tastypie", "ApiKey")
for key in ApiKey.objects.all():
Token.objects.create(
user=key.user,
key=key.key,
created=key.created
)
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150717_2226'),
('tastypie', '0001_initial'),
('authtoken', '0001_initial'),
]
operations = [
migrations.RunPython(migrate_keys),
]
|
<commit_before><commit_msg>Write migration for API keys<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def migrate_keys(apps, schema_editor):
Token = apps.get_model("authtoken", "Token")
ApiKey = apps.get_model("tastypie", "ApiKey")
for key in ApiKey.objects.all():
Token.objects.create(
user=key.user,
key=key.key,
created=key.created
)
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150717_2226'),
('tastypie', '0001_initial'),
('authtoken', '0001_initial'),
]
operations = [
migrations.RunPython(migrate_keys),
]
|
Write migration for API keys# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def migrate_keys(apps, schema_editor):
Token = apps.get_model("authtoken", "Token")
ApiKey = apps.get_model("tastypie", "ApiKey")
for key in ApiKey.objects.all():
Token.objects.create(
user=key.user,
key=key.key,
created=key.created
)
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150717_2226'),
('tastypie', '0001_initial'),
('authtoken', '0001_initial'),
]
operations = [
migrations.RunPython(migrate_keys),
]
|
<commit_before><commit_msg>Write migration for API keys<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def migrate_keys(apps, schema_editor):
Token = apps.get_model("authtoken", "Token")
ApiKey = apps.get_model("tastypie", "ApiKey")
for key in ApiKey.objects.all():
Token.objects.create(
user=key.user,
key=key.key,
created=key.created
)
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150717_2226'),
('tastypie', '0001_initial'),
('authtoken', '0001_initial'),
]
operations = [
migrations.RunPython(migrate_keys),
]
|
|
1e83e4a47d0f97e0f20ab64b465c23483503d598
|
samples/magicbot_simple/tests/pyfrc_test.py
|
samples/magicbot_simple/tests/pyfrc_test.py
|
'''
This test module imports tests that come with pyfrc, and can be used
to test basic functionality of just about any robot.
'''
from pyfrc.tests import *
from magicbot.magicbot_tests import *
|
Add tests to magicbot example
|
Add tests to magicbot example
|
Python
|
bsd-3-clause
|
Twinters007/robotpy-wpilib-utilities,robotpy/robotpy-wpilib-utilities,robotpy/robotpy-wpilib-utilities,Twinters007/robotpy-wpilib-utilities
|
Add tests to magicbot example
|
'''
This test module imports tests that come with pyfrc, and can be used
to test basic functionality of just about any robot.
'''
from pyfrc.tests import *
from magicbot.magicbot_tests import *
|
<commit_before><commit_msg>Add tests to magicbot example<commit_after>
|
'''
This test module imports tests that come with pyfrc, and can be used
to test basic functionality of just about any robot.
'''
from pyfrc.tests import *
from magicbot.magicbot_tests import *
|
Add tests to magicbot example'''
This test module imports tests that come with pyfrc, and can be used
to test basic functionality of just about any robot.
'''
from pyfrc.tests import *
from magicbot.magicbot_tests import *
|
<commit_before><commit_msg>Add tests to magicbot example<commit_after>'''
This test module imports tests that come with pyfrc, and can be used
to test basic functionality of just about any robot.
'''
from pyfrc.tests import *
from magicbot.magicbot_tests import *
|
|
4460aee67c1d95fd896d131add5c99151b24573e
|
fileapi/tests/test_qunit.py
|
fileapi/tests/test_qunit.py
|
import os
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test.utils import override_settings
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
@override_settings(STATICFILES_DIRS=(os.path.join(os.path.dirname(__file__), 'static'), ))
class QunitTests(StaticLiveServerTestCase):
"""Iteractive tests with selenium."""
@classmethod
def setUpClass(cls):
cls.browser = webdriver.PhantomJS()
super().setUpClass()
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super().tearDownClass()
def test_qunit(self):
"""Load the QUnit tests and check for failures."""
self.browser.get(self.live_server_url + settings.STATIC_URL + 'index.html')
results = WebDriverWait(self.browser, 5).until(
expected_conditions.visibility_of_element_located((By.ID, 'qunit-testresult')))
total = int(results.find_element_by_class_name('total').text)
failed = int(results.find_element_by_class_name('failed').text)
self.assertTrue(total and not failed, results.text)
|
Add Django testcase to load QUnit test suite and assert there are no failures.
|
Add Django testcase to load QUnit test suite and assert there are no failures.
|
Python
|
bsd-2-clause
|
mlavin/fileapi,mlavin/fileapi,mlavin/fileapi
|
Add Django testcase to load QUnit test suite and assert there are no failures.
|
import os
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test.utils import override_settings
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
@override_settings(STATICFILES_DIRS=(os.path.join(os.path.dirname(__file__), 'static'), ))
class QunitTests(StaticLiveServerTestCase):
"""Iteractive tests with selenium."""
@classmethod
def setUpClass(cls):
cls.browser = webdriver.PhantomJS()
super().setUpClass()
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super().tearDownClass()
def test_qunit(self):
"""Load the QUnit tests and check for failures."""
self.browser.get(self.live_server_url + settings.STATIC_URL + 'index.html')
results = WebDriverWait(self.browser, 5).until(
expected_conditions.visibility_of_element_located((By.ID, 'qunit-testresult')))
total = int(results.find_element_by_class_name('total').text)
failed = int(results.find_element_by_class_name('failed').text)
self.assertTrue(total and not failed, results.text)
|
<commit_before><commit_msg>Add Django testcase to load QUnit test suite and assert there are no failures.<commit_after>
|
import os
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test.utils import override_settings
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
@override_settings(STATICFILES_DIRS=(os.path.join(os.path.dirname(__file__), 'static'), ))
class QunitTests(StaticLiveServerTestCase):
"""Iteractive tests with selenium."""
@classmethod
def setUpClass(cls):
cls.browser = webdriver.PhantomJS()
super().setUpClass()
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super().tearDownClass()
def test_qunit(self):
"""Load the QUnit tests and check for failures."""
self.browser.get(self.live_server_url + settings.STATIC_URL + 'index.html')
results = WebDriverWait(self.browser, 5).until(
expected_conditions.visibility_of_element_located((By.ID, 'qunit-testresult')))
total = int(results.find_element_by_class_name('total').text)
failed = int(results.find_element_by_class_name('failed').text)
self.assertTrue(total and not failed, results.text)
|
Add Django testcase to load QUnit test suite and assert there are no failures.import os
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test.utils import override_settings
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
@override_settings(STATICFILES_DIRS=(os.path.join(os.path.dirname(__file__), 'static'), ))
class QunitTests(StaticLiveServerTestCase):
"""Iteractive tests with selenium."""
@classmethod
def setUpClass(cls):
cls.browser = webdriver.PhantomJS()
super().setUpClass()
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super().tearDownClass()
def test_qunit(self):
"""Load the QUnit tests and check for failures."""
self.browser.get(self.live_server_url + settings.STATIC_URL + 'index.html')
results = WebDriverWait(self.browser, 5).until(
expected_conditions.visibility_of_element_located((By.ID, 'qunit-testresult')))
total = int(results.find_element_by_class_name('total').text)
failed = int(results.find_element_by_class_name('failed').text)
self.assertTrue(total and not failed, results.text)
|
<commit_before><commit_msg>Add Django testcase to load QUnit test suite and assert there are no failures.<commit_after>import os
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test.utils import override_settings
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
@override_settings(STATICFILES_DIRS=(os.path.join(os.path.dirname(__file__), 'static'), ))
class QunitTests(StaticLiveServerTestCase):
"""Iteractive tests with selenium."""
@classmethod
def setUpClass(cls):
cls.browser = webdriver.PhantomJS()
super().setUpClass()
@classmethod
def tearDownClass(cls):
cls.browser.quit()
super().tearDownClass()
def test_qunit(self):
"""Load the QUnit tests and check for failures."""
self.browser.get(self.live_server_url + settings.STATIC_URL + 'index.html')
results = WebDriverWait(self.browser, 5).until(
expected_conditions.visibility_of_element_located((By.ID, 'qunit-testresult')))
total = int(results.find_element_by_class_name('total').text)
failed = int(results.find_element_by_class_name('failed').text)
self.assertTrue(total and not failed, results.text)
|
|
e406c876e0668b1b2e6a0531d68249b579831d9b
|
apps/pyjob_check_finished_jobs.py
|
apps/pyjob_check_finished_jobs.py
|
#!/usr/bin/env python3
import os
def find_rms_dirs(dirpath):
dirs = [x for x in os.walk(dirpath)];
for i in range(len(dirs)):
if any('rms' in x for x in dirs[i][1]):
par_dir = dirs[i][0]
rms_dirs = [os.path.join(par_dir, x) for x in dirs[i][1] if 'rms' in x]
return par_dir, rms_dirs
par_dir, rms_dirs = find_rms_dirs(os.getcwd())
ma = []
ex = []
xy = []
for d in rms_dirs:
files = os.listdir(d)
if 'dynap_ma_out.txt' in files:
ma.append(d.split(os.sep)[-1])
if 'dynap_ex_out.txt' in files:
ex.append(d.split(os.sep)[-1])
if 'dynap_xy_out.txt' in files:
xy.append(d.split(os.sep)[-1])
print("\nDynamic aperture results found in :", par_dir)
print("xy: ", sorted(xy))
print("ex: ", sorted(ex))
print("ma: ", sorted(ma), "\n")
|
Add script to check finished jobs
|
Add script to check finished jobs
|
Python
|
mit
|
lnls-fac/job_manager
|
Add script to check finished jobs
|
#!/usr/bin/env python3
import os
def find_rms_dirs(dirpath):
dirs = [x for x in os.walk(dirpath)];
for i in range(len(dirs)):
if any('rms' in x for x in dirs[i][1]):
par_dir = dirs[i][0]
rms_dirs = [os.path.join(par_dir, x) for x in dirs[i][1] if 'rms' in x]
return par_dir, rms_dirs
par_dir, rms_dirs = find_rms_dirs(os.getcwd())
ma = []
ex = []
xy = []
for d in rms_dirs:
files = os.listdir(d)
if 'dynap_ma_out.txt' in files:
ma.append(d.split(os.sep)[-1])
if 'dynap_ex_out.txt' in files:
ex.append(d.split(os.sep)[-1])
if 'dynap_xy_out.txt' in files:
xy.append(d.split(os.sep)[-1])
print("\nDynamic aperture results found in :", par_dir)
print("xy: ", sorted(xy))
print("ex: ", sorted(ex))
print("ma: ", sorted(ma), "\n")
|
<commit_before><commit_msg>Add script to check finished jobs<commit_after>
|
#!/usr/bin/env python3
import os
def find_rms_dirs(dirpath):
dirs = [x for x in os.walk(dirpath)];
for i in range(len(dirs)):
if any('rms' in x for x in dirs[i][1]):
par_dir = dirs[i][0]
rms_dirs = [os.path.join(par_dir, x) for x in dirs[i][1] if 'rms' in x]
return par_dir, rms_dirs
par_dir, rms_dirs = find_rms_dirs(os.getcwd())
ma = []
ex = []
xy = []
for d in rms_dirs:
files = os.listdir(d)
if 'dynap_ma_out.txt' in files:
ma.append(d.split(os.sep)[-1])
if 'dynap_ex_out.txt' in files:
ex.append(d.split(os.sep)[-1])
if 'dynap_xy_out.txt' in files:
xy.append(d.split(os.sep)[-1])
print("\nDynamic aperture results found in :", par_dir)
print("xy: ", sorted(xy))
print("ex: ", sorted(ex))
print("ma: ", sorted(ma), "\n")
|
Add script to check finished jobs#!/usr/bin/env python3
import os
def find_rms_dirs(dirpath):
dirs = [x for x in os.walk(dirpath)];
for i in range(len(dirs)):
if any('rms' in x for x in dirs[i][1]):
par_dir = dirs[i][0]
rms_dirs = [os.path.join(par_dir, x) for x in dirs[i][1] if 'rms' in x]
return par_dir, rms_dirs
par_dir, rms_dirs = find_rms_dirs(os.getcwd())
ma = []
ex = []
xy = []
for d in rms_dirs:
files = os.listdir(d)
if 'dynap_ma_out.txt' in files:
ma.append(d.split(os.sep)[-1])
if 'dynap_ex_out.txt' in files:
ex.append(d.split(os.sep)[-1])
if 'dynap_xy_out.txt' in files:
xy.append(d.split(os.sep)[-1])
print("\nDynamic aperture results found in :", par_dir)
print("xy: ", sorted(xy))
print("ex: ", sorted(ex))
print("ma: ", sorted(ma), "\n")
|
<commit_before><commit_msg>Add script to check finished jobs<commit_after>#!/usr/bin/env python3
import os
def find_rms_dirs(dirpath):
dirs = [x for x in os.walk(dirpath)];
for i in range(len(dirs)):
if any('rms' in x for x in dirs[i][1]):
par_dir = dirs[i][0]
rms_dirs = [os.path.join(par_dir, x) for x in dirs[i][1] if 'rms' in x]
return par_dir, rms_dirs
par_dir, rms_dirs = find_rms_dirs(os.getcwd())
ma = []
ex = []
xy = []
for d in rms_dirs:
files = os.listdir(d)
if 'dynap_ma_out.txt' in files:
ma.append(d.split(os.sep)[-1])
if 'dynap_ex_out.txt' in files:
ex.append(d.split(os.sep)[-1])
if 'dynap_xy_out.txt' in files:
xy.append(d.split(os.sep)[-1])
print("\nDynamic aperture results found in :", par_dir)
print("xy: ", sorted(xy))
print("ex: ", sorted(ex))
print("ma: ", sorted(ma), "\n")
|
|
ff1b5a3bbfb1deb92d2b34d0951db35a48c1d630
|
cifar.py
|
cifar.py
|
import cProfile
import data_loader
import data_manipulator
import data_saver
import neural_net
def main():
test_batch, train_batch = data_loader.load_data()
data_manipulator.categorize(train_batch, test_batch)
model = neural_net.get_trained_model(train_batches=train_batch,
test_batch=test_batch,
weights_in='weights/1024_1024_256_64_epochs_45',
weights_out='weights/1024_1024_256_64_epochs_50')
predictions = neural_net.get_predictions(model, test_batch)
data_saver.save_results("results/result.csv", predictions)
def profiling():
cProfile.run('main()', sort='tottime')
if __name__ == "__main__":
main()
|
Add main module for solving CIFAR-10 classification problem
|
Add main module for solving CIFAR-10 classification problem
|
Python
|
mit
|
maciewar/AGH-Deep-Learning-CIFAR10
|
Add main module for solving CIFAR-10 classification problem
|
import cProfile
import data_loader
import data_manipulator
import data_saver
import neural_net
def main():
test_batch, train_batch = data_loader.load_data()
data_manipulator.categorize(train_batch, test_batch)
model = neural_net.get_trained_model(train_batches=train_batch,
test_batch=test_batch,
weights_in='weights/1024_1024_256_64_epochs_45',
weights_out='weights/1024_1024_256_64_epochs_50')
predictions = neural_net.get_predictions(model, test_batch)
data_saver.save_results("results/result.csv", predictions)
def profiling():
cProfile.run('main()', sort='tottime')
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add main module for solving CIFAR-10 classification problem<commit_after>
|
import cProfile
import data_loader
import data_manipulator
import data_saver
import neural_net
def main():
test_batch, train_batch = data_loader.load_data()
data_manipulator.categorize(train_batch, test_batch)
model = neural_net.get_trained_model(train_batches=train_batch,
test_batch=test_batch,
weights_in='weights/1024_1024_256_64_epochs_45',
weights_out='weights/1024_1024_256_64_epochs_50')
predictions = neural_net.get_predictions(model, test_batch)
data_saver.save_results("results/result.csv", predictions)
def profiling():
cProfile.run('main()', sort='tottime')
if __name__ == "__main__":
main()
|
Add main module for solving CIFAR-10 classification problemimport cProfile
import data_loader
import data_manipulator
import data_saver
import neural_net
def main():
test_batch, train_batch = data_loader.load_data()
data_manipulator.categorize(train_batch, test_batch)
model = neural_net.get_trained_model(train_batches=train_batch,
test_batch=test_batch,
weights_in='weights/1024_1024_256_64_epochs_45',
weights_out='weights/1024_1024_256_64_epochs_50')
predictions = neural_net.get_predictions(model, test_batch)
data_saver.save_results("results/result.csv", predictions)
def profiling():
cProfile.run('main()', sort='tottime')
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add main module for solving CIFAR-10 classification problem<commit_after>import cProfile
import data_loader
import data_manipulator
import data_saver
import neural_net
def main():
test_batch, train_batch = data_loader.load_data()
data_manipulator.categorize(train_batch, test_batch)
model = neural_net.get_trained_model(train_batches=train_batch,
test_batch=test_batch,
weights_in='weights/1024_1024_256_64_epochs_45',
weights_out='weights/1024_1024_256_64_epochs_50')
predictions = neural_net.get_predictions(model, test_batch)
data_saver.save_results("results/result.csv", predictions)
def profiling():
cProfile.run('main()', sort='tottime')
if __name__ == "__main__":
main()
|
|
2df69f87e92a9795aaf6095448e6222db485430d
|
automation/KMeansDataGenerator.py
|
automation/KMeansDataGenerator.py
|
import numpy as np
import sys
import random
def get_next(x):
i = 0
new_x = np.copy(x)
while new_x[i] == 1:
i = i + 1
new_x[i] = 1
for j in range(i):
new_x[j] = 0
return new_x
D = int(sys.argv[1])
K = int(sys.argv[2])
num = int(sys.argv[3])
point_file = open(sys.argv[4], "w")
center_file = open(sys.argv[5], "w")
c = np.zeros((K, D))
for i in range(1, K):
c[i] = get_next(c[i-1])
point = np.zeros(D)
count = 0
for k in range(num):
i = np.random.randint(K)
count = count + 1
print >> point_file, count,
for j in range(D):
point[j] = c[i][j] * 100.0 + np.random.random() * 40 - 20
print >> point_file, point[j],
print >> point_file
for i in range(K):
print >> center_file, i + 1,
for j in range(D):
point[j] = c[i][j] * 100.0 + np.random.random() * 60 - 30
print >> center_file, point[j],
print >> center_file
|
Add Multi-Dimension KMeans data generator
|
Add Multi-Dimension KMeans data generator
|
Python
|
apache-2.0
|
mjsax/performance,dataArtisans/performance,mxm/flink-perf,project-flink/flink-perf,dataArtisans/performance,project-flink/flink-perf,mjsax/performance,mxm/flink-perf,mjsax/performance,dataArtisans/performance,mxm/flink-perf,project-flink/flink-perf
|
Add Multi-Dimension KMeans data generator
|
import numpy as np
import sys
import random
def get_next(x):
i = 0
new_x = np.copy(x)
while new_x[i] == 1:
i = i + 1
new_x[i] = 1
for j in range(i):
new_x[j] = 0
return new_x
D = int(sys.argv[1])
K = int(sys.argv[2])
num = int(sys.argv[3])
point_file = open(sys.argv[4], "w")
center_file = open(sys.argv[5], "w")
c = np.zeros((K, D))
for i in range(1, K):
c[i] = get_next(c[i-1])
point = np.zeros(D)
count = 0
for k in range(num):
i = np.random.randint(K)
count = count + 1
print >> point_file, count,
for j in range(D):
point[j] = c[i][j] * 100.0 + np.random.random() * 40 - 20
print >> point_file, point[j],
print >> point_file
for i in range(K):
print >> center_file, i + 1,
for j in range(D):
point[j] = c[i][j] * 100.0 + np.random.random() * 60 - 30
print >> center_file, point[j],
print >> center_file
|
<commit_before><commit_msg>Add Multi-Dimension KMeans data generator<commit_after>
|
import numpy as np
import sys
import random
def get_next(x):
i = 0
new_x = np.copy(x)
while new_x[i] == 1:
i = i + 1
new_x[i] = 1
for j in range(i):
new_x[j] = 0
return new_x
D = int(sys.argv[1])
K = int(sys.argv[2])
num = int(sys.argv[3])
point_file = open(sys.argv[4], "w")
center_file = open(sys.argv[5], "w")
c = np.zeros((K, D))
for i in range(1, K):
c[i] = get_next(c[i-1])
point = np.zeros(D)
count = 0
for k in range(num):
i = np.random.randint(K)
count = count + 1
print >> point_file, count,
for j in range(D):
point[j] = c[i][j] * 100.0 + np.random.random() * 40 - 20
print >> point_file, point[j],
print >> point_file
for i in range(K):
print >> center_file, i + 1,
for j in range(D):
point[j] = c[i][j] * 100.0 + np.random.random() * 60 - 30
print >> center_file, point[j],
print >> center_file
|
Add Multi-Dimension KMeans data generatorimport numpy as np
import sys
import random
def get_next(x):
i = 0
new_x = np.copy(x)
while new_x[i] == 1:
i = i + 1
new_x[i] = 1
for j in range(i):
new_x[j] = 0
return new_x
D = int(sys.argv[1])
K = int(sys.argv[2])
num = int(sys.argv[3])
point_file = open(sys.argv[4], "w")
center_file = open(sys.argv[5], "w")
c = np.zeros((K, D))
for i in range(1, K):
c[i] = get_next(c[i-1])
point = np.zeros(D)
count = 0
for k in range(num):
i = np.random.randint(K)
count = count + 1
print >> point_file, count,
for j in range(D):
point[j] = c[i][j] * 100.0 + np.random.random() * 40 - 20
print >> point_file, point[j],
print >> point_file
for i in range(K):
print >> center_file, i + 1,
for j in range(D):
point[j] = c[i][j] * 100.0 + np.random.random() * 60 - 30
print >> center_file, point[j],
print >> center_file
|
<commit_before><commit_msg>Add Multi-Dimension KMeans data generator<commit_after>import numpy as np
import sys
import random
def get_next(x):
i = 0
new_x = np.copy(x)
while new_x[i] == 1:
i = i + 1
new_x[i] = 1
for j in range(i):
new_x[j] = 0
return new_x
D = int(sys.argv[1])
K = int(sys.argv[2])
num = int(sys.argv[3])
point_file = open(sys.argv[4], "w")
center_file = open(sys.argv[5], "w")
c = np.zeros((K, D))
for i in range(1, K):
c[i] = get_next(c[i-1])
point = np.zeros(D)
count = 0
for k in range(num):
i = np.random.randint(K)
count = count + 1
print >> point_file, count,
for j in range(D):
point[j] = c[i][j] * 100.0 + np.random.random() * 40 - 20
print >> point_file, point[j],
print >> point_file
for i in range(K):
print >> center_file, i + 1,
for j in range(D):
point[j] = c[i][j] * 100.0 + np.random.random() * 60 - 30
print >> center_file, point[j],
print >> center_file
|
|
85eaf8ead07e91187b1f52c86dec14395e6cd974
|
tdclient/test/server_status_api_test.py
|
tdclient/test/server_status_api_test.py
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import functools
import os
from tdclient import api
from tdclient import version
def setup_function(function):
try:
del os.environ["TD_API_SERVER"]
except KeyError:
pass
try:
del os.environ["HTTP_PROXY"]
except KeyError:
pass
class Response(object):
def __init__(self, status, body, headers):
self.status = status
self.body = body.encode("utf-8")
self.headers = headers
self.request_method = None
self.request_path = None
self.request_headers = None
def get(response, url, params={}):
response.request_method = "GET"
response.request_path = url
response.request_headers = params
return (response.status, response.body, response)
def test_server_status_success():
client = api.API("apikey")
body = """
{
"status": "ok"
}
"""
response = Response(200, body, {})
client.get = functools.partial(get, response)
assert client.server_status() == "ok"
assert response.request_method == "GET"
assert response.request_path == "/v3/system/server_status"
def test_server_status_failure():
client = api.API("apikey")
response = Response(500, "", {})
client.get = functools.partial(get, response)
assert client.server_status() == "Server is down (500)"
assert response.request_method == "GET"
assert response.request_path == "/v3/system/server_status"
|
Add test for `GET /v3/system/server_status`
|
Add test for `GET /v3/system/server_status`
|
Python
|
apache-2.0
|
treasure-data/td-client-python
|
Add test for `GET /v3/system/server_status`
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import functools
import os
from tdclient import api
from tdclient import version
def setup_function(function):
try:
del os.environ["TD_API_SERVER"]
except KeyError:
pass
try:
del os.environ["HTTP_PROXY"]
except KeyError:
pass
class Response(object):
def __init__(self, status, body, headers):
self.status = status
self.body = body.encode("utf-8")
self.headers = headers
self.request_method = None
self.request_path = None
self.request_headers = None
def get(response, url, params={}):
response.request_method = "GET"
response.request_path = url
response.request_headers = params
return (response.status, response.body, response)
def test_server_status_success():
client = api.API("apikey")
body = """
{
"status": "ok"
}
"""
response = Response(200, body, {})
client.get = functools.partial(get, response)
assert client.server_status() == "ok"
assert response.request_method == "GET"
assert response.request_path == "/v3/system/server_status"
def test_server_status_failure():
client = api.API("apikey")
response = Response(500, "", {})
client.get = functools.partial(get, response)
assert client.server_status() == "Server is down (500)"
assert response.request_method == "GET"
assert response.request_path == "/v3/system/server_status"
|
<commit_before><commit_msg>Add test for `GET /v3/system/server_status`<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import functools
import os
from tdclient import api
from tdclient import version
def setup_function(function):
try:
del os.environ["TD_API_SERVER"]
except KeyError:
pass
try:
del os.environ["HTTP_PROXY"]
except KeyError:
pass
class Response(object):
def __init__(self, status, body, headers):
self.status = status
self.body = body.encode("utf-8")
self.headers = headers
self.request_method = None
self.request_path = None
self.request_headers = None
def get(response, url, params={}):
response.request_method = "GET"
response.request_path = url
response.request_headers = params
return (response.status, response.body, response)
def test_server_status_success():
client = api.API("apikey")
body = """
{
"status": "ok"
}
"""
response = Response(200, body, {})
client.get = functools.partial(get, response)
assert client.server_status() == "ok"
assert response.request_method == "GET"
assert response.request_path == "/v3/system/server_status"
def test_server_status_failure():
client = api.API("apikey")
response = Response(500, "", {})
client.get = functools.partial(get, response)
assert client.server_status() == "Server is down (500)"
assert response.request_method == "GET"
assert response.request_path == "/v3/system/server_status"
|
Add test for `GET /v3/system/server_status`#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import functools
import os
from tdclient import api
from tdclient import version
def setup_function(function):
try:
del os.environ["TD_API_SERVER"]
except KeyError:
pass
try:
del os.environ["HTTP_PROXY"]
except KeyError:
pass
class Response(object):
def __init__(self, status, body, headers):
self.status = status
self.body = body.encode("utf-8")
self.headers = headers
self.request_method = None
self.request_path = None
self.request_headers = None
def get(response, url, params={}):
response.request_method = "GET"
response.request_path = url
response.request_headers = params
return (response.status, response.body, response)
def test_server_status_success():
client = api.API("apikey")
body = """
{
"status": "ok"
}
"""
response = Response(200, body, {})
client.get = functools.partial(get, response)
assert client.server_status() == "ok"
assert response.request_method == "GET"
assert response.request_path == "/v3/system/server_status"
def test_server_status_failure():
client = api.API("apikey")
response = Response(500, "", {})
client.get = functools.partial(get, response)
assert client.server_status() == "Server is down (500)"
assert response.request_method == "GET"
assert response.request_path == "/v3/system/server_status"
|
<commit_before><commit_msg>Add test for `GET /v3/system/server_status`<commit_after>#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import functools
import os
from tdclient import api
from tdclient import version
def setup_function(function):
try:
del os.environ["TD_API_SERVER"]
except KeyError:
pass
try:
del os.environ["HTTP_PROXY"]
except KeyError:
pass
class Response(object):
def __init__(self, status, body, headers):
self.status = status
self.body = body.encode("utf-8")
self.headers = headers
self.request_method = None
self.request_path = None
self.request_headers = None
def get(response, url, params={}):
response.request_method = "GET"
response.request_path = url
response.request_headers = params
return (response.status, response.body, response)
def test_server_status_success():
client = api.API("apikey")
body = """
{
"status": "ok"
}
"""
response = Response(200, body, {})
client.get = functools.partial(get, response)
assert client.server_status() == "ok"
assert response.request_method == "GET"
assert response.request_path == "/v3/system/server_status"
def test_server_status_failure():
client = api.API("apikey")
response = Response(500, "", {})
client.get = functools.partial(get, response)
assert client.server_status() == "Server is down (500)"
assert response.request_method == "GET"
assert response.request_path == "/v3/system/server_status"
|
|
8fce09979271c721586ecd4de94fed3bad712ce8
|
girder/utility/resource.py
|
girder/utility/resource.py
|
import cherrypy
import six
from girder.api.rest import Resource
def _walk_tree(node, path=[]):
route_map = {}
for k, v in six.iteritems(vars(node)):
if isinstance(v, Resource):
full_path = list(path)
full_path.append(k)
route_map[v] = full_path
path = []
if hasattr(v, 'exposed'):
new_path = list(path)
new_path.append(k)
route_map.update(_walk_tree(v, new_path))
return route_map
def _api_route_map():
'''
Returns a map of girder.api.rest.Resource to paths.
The function walks the tree starting at /api and follows any branch attribute
that has an 'exposed' attribute. Then a Resource is found the path to the
resource is added to the map.
This map can be used to lookup where a resource has been mounted.
'''
api = cherrypy.tree.apps['/api']
return _walk_tree(api.root.v1)
|
Add utility function to walk cherrypy tree
|
Add utility function to walk cherrypy tree
This function generates a map of Resource to mounted path.
|
Python
|
apache-2.0
|
Kitware/girder,jbeezley/girder,manthey/girder,data-exp-lab/girder,girder/girder,manthey/girder,Kitware/girder,Xarthisius/girder,data-exp-lab/girder,kotfic/girder,Kitware/girder,data-exp-lab/girder,kotfic/girder,data-exp-lab/girder,Xarthisius/girder,kotfic/girder,RafaelPalomar/girder,girder/girder,kotfic/girder,RafaelPalomar/girder,RafaelPalomar/girder,Xarthisius/girder,manthey/girder,Xarthisius/girder,Xarthisius/girder,RafaelPalomar/girder,girder/girder,data-exp-lab/girder,jbeezley/girder,Kitware/girder,kotfic/girder,jbeezley/girder,jbeezley/girder,manthey/girder,girder/girder,RafaelPalomar/girder
|
Add utility function to walk cherrypy tree
This function generates a map of Resource to mounted path.
|
import cherrypy
import six
from girder.api.rest import Resource
def _walk_tree(node, path=[]):
route_map = {}
for k, v in six.iteritems(vars(node)):
if isinstance(v, Resource):
full_path = list(path)
full_path.append(k)
route_map[v] = full_path
path = []
if hasattr(v, 'exposed'):
new_path = list(path)
new_path.append(k)
route_map.update(_walk_tree(v, new_path))
return route_map
def _api_route_map():
'''
Returns a map of girder.api.rest.Resource to paths.
The function walks the tree starting at /api and follows any branch attribute
that has an 'exposed' attribute. Then a Resource is found the path to the
resource is added to the map.
This map can be used to lookup where a resource has been mounted.
'''
api = cherrypy.tree.apps['/api']
return _walk_tree(api.root.v1)
|
<commit_before><commit_msg>Add utility function to walk cherrypy tree
This function generates a map of Resource to mounted path.<commit_after>
|
import cherrypy
import six
from girder.api.rest import Resource
def _walk_tree(node, path=[]):
route_map = {}
for k, v in six.iteritems(vars(node)):
if isinstance(v, Resource):
full_path = list(path)
full_path.append(k)
route_map[v] = full_path
path = []
if hasattr(v, 'exposed'):
new_path = list(path)
new_path.append(k)
route_map.update(_walk_tree(v, new_path))
return route_map
def _api_route_map():
'''
Returns a map of girder.api.rest.Resource to paths.
The function walks the tree starting at /api and follows any branch attribute
that has an 'exposed' attribute. Then a Resource is found the path to the
resource is added to the map.
This map can be used to lookup where a resource has been mounted.
'''
api = cherrypy.tree.apps['/api']
return _walk_tree(api.root.v1)
|
Add utility function to walk cherrypy tree
This function generates a map of Resource to mounted path.import cherrypy
import six
from girder.api.rest import Resource
def _walk_tree(node, path=[]):
route_map = {}
for k, v in six.iteritems(vars(node)):
if isinstance(v, Resource):
full_path = list(path)
full_path.append(k)
route_map[v] = full_path
path = []
if hasattr(v, 'exposed'):
new_path = list(path)
new_path.append(k)
route_map.update(_walk_tree(v, new_path))
return route_map
def _api_route_map():
'''
Returns a map of girder.api.rest.Resource to paths.
The function walks the tree starting at /api and follows any branch attribute
that has an 'exposed' attribute. Then a Resource is found the path to the
resource is added to the map.
This map can be used to lookup where a resource has been mounted.
'''
api = cherrypy.tree.apps['/api']
return _walk_tree(api.root.v1)
|
<commit_before><commit_msg>Add utility function to walk cherrypy tree
This function generates a map of Resource to mounted path.<commit_after>import cherrypy
import six
from girder.api.rest import Resource
def _walk_tree(node, path=[]):
route_map = {}
for k, v in six.iteritems(vars(node)):
if isinstance(v, Resource):
full_path = list(path)
full_path.append(k)
route_map[v] = full_path
path = []
if hasattr(v, 'exposed'):
new_path = list(path)
new_path.append(k)
route_map.update(_walk_tree(v, new_path))
return route_map
def _api_route_map():
'''
Returns a map of girder.api.rest.Resource to paths.
The function walks the tree starting at /api and follows any branch attribute
that has an 'exposed' attribute. Then a Resource is found the path to the
resource is added to the map.
This map can be used to lookup where a resource has been mounted.
'''
api = cherrypy.tree.apps['/api']
return _walk_tree(api.root.v1)
|
|
09d559f8eaa4b65c480d48a4459c5a38c3dc7fd4
|
katalogss/utils.py
|
katalogss/utils.py
|
import numpy as np
def centroid(x, flux):
mu = np.sum(x*flux)/np.sum(flux)
sd = np.sqrt(np.sum(flux * (x-mu)**2)/np.sum(flux))
return mu,sd
def approx_stokes_i(Axx,Ayy):
try: a = np.sqrt((Axx**2 + Ayy**2)/2.)
except TypeError:
a = type(Axx)()
a.header = Axx.header
a.data = np.sqrt((Axx.data**2 + Ayy.data**2)/2.)
return a
def sigma_clip(A,n_sigma,err=None, return_inds=False):
A=np.array(A)
if err is not None:
w=1/err**2
V1= np.sum(w)
V2 = np.sum(w**2.)
mu = np.sum(A*w)/np.sum(w)
var = np.sum(w*(A-mu)**2)/V1
s2 = var / (1-V2/V1**2.)
sig = np.sqrt(s2)
else: mu,sig = np.mean(A),np.std(A)
wa=np.where(abs(A-mu)<n_sigma*sig)[0]
if return_inds: return [A,wa]
else: return A[wa]
def weighted_mean(A,sig):
w=1./sig**2
V1= np.sum(w)
V2 = np.sum(w**2.)
mu = np.sum(A*w)/np.sum(w)
sig_mu = np.sqrt(1./np.sum(w))
var = np.sum(w*(A-mu)**2)/V1
s2 = var / (1-V2/V1**2.)
sig = np.sqrt(s2)
return [mu,sig_mu,sig]
def header_keys(file,case='None'):
h = open(file).readline().strip('#').strip('\n').split()
if case=='lower': h=[hi.lower() for hi in h]
if case=='upper': h=[hi.upper() for hi in h]
return h
def getbinsize(A):
return 3.5*np.std(A)/len(A)**(1/3.)
def getbins(A,binsize=None):
if binsize is None: bs=getbinsize(A)
nbins=np.ceil((max(A)-min(A))/binsize)+1
diff=nbins*binsize - (max(A)-min(A))
bins=np.arange(min(A)-diff/2,max(A)+diff/2+binsize,binsize)
return bins
def minmax(a):
return min(a),max(a)
def span(a):
return max(a)-min(a)
|
Add module with utility functions.
|
Add module with utility functions.
|
Python
|
bsd-2-clause
|
EoRImaging/katalogss
|
Add module with utility functions.
|
import numpy as np
def centroid(x, flux):
mu = np.sum(x*flux)/np.sum(flux)
sd = np.sqrt(np.sum(flux * (x-mu)**2)/np.sum(flux))
return mu,sd
def approx_stokes_i(Axx,Ayy):
try: a = np.sqrt((Axx**2 + Ayy**2)/2.)
except TypeError:
a = type(Axx)()
a.header = Axx.header
a.data = np.sqrt((Axx.data**2 + Ayy.data**2)/2.)
return a
def sigma_clip(A,n_sigma,err=None, return_inds=False):
A=np.array(A)
if err is not None:
w=1/err**2
V1= np.sum(w)
V2 = np.sum(w**2.)
mu = np.sum(A*w)/np.sum(w)
var = np.sum(w*(A-mu)**2)/V1
s2 = var / (1-V2/V1**2.)
sig = np.sqrt(s2)
else: mu,sig = np.mean(A),np.std(A)
wa=np.where(abs(A-mu)<n_sigma*sig)[0]
if return_inds: return [A,wa]
else: return A[wa]
def weighted_mean(A,sig):
w=1./sig**2
V1= np.sum(w)
V2 = np.sum(w**2.)
mu = np.sum(A*w)/np.sum(w)
sig_mu = np.sqrt(1./np.sum(w))
var = np.sum(w*(A-mu)**2)/V1
s2 = var / (1-V2/V1**2.)
sig = np.sqrt(s2)
return [mu,sig_mu,sig]
def header_keys(file,case='None'):
h = open(file).readline().strip('#').strip('\n').split()
if case=='lower': h=[hi.lower() for hi in h]
if case=='upper': h=[hi.upper() for hi in h]
return h
def getbinsize(A):
return 3.5*np.std(A)/len(A)**(1/3.)
def getbins(A,binsize=None):
if binsize is None: bs=getbinsize(A)
nbins=np.ceil((max(A)-min(A))/binsize)+1
diff=nbins*binsize - (max(A)-min(A))
bins=np.arange(min(A)-diff/2,max(A)+diff/2+binsize,binsize)
return bins
def minmax(a):
return min(a),max(a)
def span(a):
return max(a)-min(a)
|
<commit_before><commit_msg>Add module with utility functions.<commit_after>
|
import numpy as np
def centroid(x, flux):
mu = np.sum(x*flux)/np.sum(flux)
sd = np.sqrt(np.sum(flux * (x-mu)**2)/np.sum(flux))
return mu,sd
def approx_stokes_i(Axx,Ayy):
try: a = np.sqrt((Axx**2 + Ayy**2)/2.)
except TypeError:
a = type(Axx)()
a.header = Axx.header
a.data = np.sqrt((Axx.data**2 + Ayy.data**2)/2.)
return a
def sigma_clip(A,n_sigma,err=None, return_inds=False):
A=np.array(A)
if err is not None:
w=1/err**2
V1= np.sum(w)
V2 = np.sum(w**2.)
mu = np.sum(A*w)/np.sum(w)
var = np.sum(w*(A-mu)**2)/V1
s2 = var / (1-V2/V1**2.)
sig = np.sqrt(s2)
else: mu,sig = np.mean(A),np.std(A)
wa=np.where(abs(A-mu)<n_sigma*sig)[0]
if return_inds: return [A,wa]
else: return A[wa]
def weighted_mean(A,sig):
w=1./sig**2
V1= np.sum(w)
V2 = np.sum(w**2.)
mu = np.sum(A*w)/np.sum(w)
sig_mu = np.sqrt(1./np.sum(w))
var = np.sum(w*(A-mu)**2)/V1
s2 = var / (1-V2/V1**2.)
sig = np.sqrt(s2)
return [mu,sig_mu,sig]
def header_keys(file,case='None'):
h = open(file).readline().strip('#').strip('\n').split()
if case=='lower': h=[hi.lower() for hi in h]
if case=='upper': h=[hi.upper() for hi in h]
return h
def getbinsize(A):
return 3.5*np.std(A)/len(A)**(1/3.)
def getbins(A,binsize=None):
if binsize is None: bs=getbinsize(A)
nbins=np.ceil((max(A)-min(A))/binsize)+1
diff=nbins*binsize - (max(A)-min(A))
bins=np.arange(min(A)-diff/2,max(A)+diff/2+binsize,binsize)
return bins
def minmax(a):
return min(a),max(a)
def span(a):
return max(a)-min(a)
|
Add module with utility functions.import numpy as np
def centroid(x, flux):
mu = np.sum(x*flux)/np.sum(flux)
sd = np.sqrt(np.sum(flux * (x-mu)**2)/np.sum(flux))
return mu,sd
def approx_stokes_i(Axx,Ayy):
try: a = np.sqrt((Axx**2 + Ayy**2)/2.)
except TypeError:
a = type(Axx)()
a.header = Axx.header
a.data = np.sqrt((Axx.data**2 + Ayy.data**2)/2.)
return a
def sigma_clip(A,n_sigma,err=None, return_inds=False):
A=np.array(A)
if err is not None:
w=1/err**2
V1= np.sum(w)
V2 = np.sum(w**2.)
mu = np.sum(A*w)/np.sum(w)
var = np.sum(w*(A-mu)**2)/V1
s2 = var / (1-V2/V1**2.)
sig = np.sqrt(s2)
else: mu,sig = np.mean(A),np.std(A)
wa=np.where(abs(A-mu)<n_sigma*sig)[0]
if return_inds: return [A,wa]
else: return A[wa]
def weighted_mean(A,sig):
w=1./sig**2
V1= np.sum(w)
V2 = np.sum(w**2.)
mu = np.sum(A*w)/np.sum(w)
sig_mu = np.sqrt(1./np.sum(w))
var = np.sum(w*(A-mu)**2)/V1
s2 = var / (1-V2/V1**2.)
sig = np.sqrt(s2)
return [mu,sig_mu,sig]
def header_keys(file,case='None'):
h = open(file).readline().strip('#').strip('\n').split()
if case=='lower': h=[hi.lower() for hi in h]
if case=='upper': h=[hi.upper() for hi in h]
return h
def getbinsize(A):
return 3.5*np.std(A)/len(A)**(1/3.)
def getbins(A,binsize=None):
if binsize is None: bs=getbinsize(A)
nbins=np.ceil((max(A)-min(A))/binsize)+1
diff=nbins*binsize - (max(A)-min(A))
bins=np.arange(min(A)-diff/2,max(A)+diff/2+binsize,binsize)
return bins
def minmax(a):
return min(a),max(a)
def span(a):
return max(a)-min(a)
|
<commit_before><commit_msg>Add module with utility functions.<commit_after>import numpy as np
def centroid(x, flux):
mu = np.sum(x*flux)/np.sum(flux)
sd = np.sqrt(np.sum(flux * (x-mu)**2)/np.sum(flux))
return mu,sd
def approx_stokes_i(Axx,Ayy):
try: a = np.sqrt((Axx**2 + Ayy**2)/2.)
except TypeError:
a = type(Axx)()
a.header = Axx.header
a.data = np.sqrt((Axx.data**2 + Ayy.data**2)/2.)
return a
def sigma_clip(A,n_sigma,err=None, return_inds=False):
A=np.array(A)
if err is not None:
w=1/err**2
V1= np.sum(w)
V2 = np.sum(w**2.)
mu = np.sum(A*w)/np.sum(w)
var = np.sum(w*(A-mu)**2)/V1
s2 = var / (1-V2/V1**2.)
sig = np.sqrt(s2)
else: mu,sig = np.mean(A),np.std(A)
wa=np.where(abs(A-mu)<n_sigma*sig)[0]
if return_inds: return [A,wa]
else: return A[wa]
def weighted_mean(A,sig):
w=1./sig**2
V1= np.sum(w)
V2 = np.sum(w**2.)
mu = np.sum(A*w)/np.sum(w)
sig_mu = np.sqrt(1./np.sum(w))
var = np.sum(w*(A-mu)**2)/V1
s2 = var / (1-V2/V1**2.)
sig = np.sqrt(s2)
return [mu,sig_mu,sig]
def header_keys(file,case='None'):
h = open(file).readline().strip('#').strip('\n').split()
if case=='lower': h=[hi.lower() for hi in h]
if case=='upper': h=[hi.upper() for hi in h]
return h
def getbinsize(A):
return 3.5*np.std(A)/len(A)**(1/3.)
def getbins(A,binsize=None):
if binsize is None: bs=getbinsize(A)
nbins=np.ceil((max(A)-min(A))/binsize)+1
diff=nbins*binsize - (max(A)-min(A))
bins=np.arange(min(A)-diff/2,max(A)+diff/2+binsize,binsize)
return bins
def minmax(a):
return min(a),max(a)
def span(a):
return max(a)-min(a)
|
|
7c60724a93aa44e7afac3a59848f1abfa3598623
|
updater.py
|
updater.py
|
import sys
import argparse
from ceterach.api import MediaWiki
from difflib import Differ
from parse_equipment import AUTOGEN_HEADER, AUTOGEN_FOOTER
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin,
help='The file to read the new autogenerated contents from. Defaults to stdin.')
parser.add_argument('page', help='The page to update.')
parser.add_argument('-u', '--user', help='Your username on the wiki.')
parser.add_argument('-p', '--password', help='Your password on the wiki.')
parser.add_argument('-s', '--summary', default='', help='A summary of the edit.')
parser.add_argument('-m', '--minor', help='Mark this as a minor edit.', action='store_true')
parser.add_argument('--confirm', help='Confirm the page edit.', action='store_true')
args = parser.parse_args()
API_URL = 'http://dragonsdogma.wikia.com/api.php'
if args.confirm and not (args.user and args.password):
print('\nError: Must provide username and password in order to confirm an edit to a page.\n')
parser.print_help()
exit(1)
api = MediaWiki(API_URL)
page = api.page(args.page)
current_revision = page.content
if not (AUTOGEN_HEADER in current_revision and AUTOGEN_FOOTER in current_revision):
# It's safest to only update sections that have been previously autogenerated.
print('Error: This page does not have the autogen header or footer.')
exit(1)
header_index = current_revision.index(AUTOGEN_HEADER)
footer_index = current_revision.index(AUTOGEN_FOOTER) + len(AUTOGEN_FOOTER)
before = current_revision[:header_index]
after = current_revision[footer_index+1:]
updated = args.infile.read()
new_revision = before + updated + after
if current_revision == new_revision:
print('Error: No edits have been made.')
exit(1)
if not args.confirm:
diff = Differ()
result = diff.compare(current_revision.splitlines(keepends=True), new_revision.splitlines(keepends=True))
sys.stdout.writelines(result)
print('\n===========\nSummary:', args.summary)
print('Run this command with --confirm to confirm this page edit.')
exit(1)
else:
api.login(args.user, args.password)
page.edit(new_revision, args.summary or '', minor=args.minor)
print('%s successfully updated.' % args.page)
|
Add a script to automatically update an autogenerated table.
|
Add a script to automatically update an autogenerated table.
|
Python
|
mit
|
rcfox/DragonsDogmaWikiParser
|
Add a script to automatically update an autogenerated table.
|
import sys
import argparse
from ceterach.api import MediaWiki
from difflib import Differ
from parse_equipment import AUTOGEN_HEADER, AUTOGEN_FOOTER
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin,
help='The file to read the new autogenerated contents from. Defaults to stdin.')
parser.add_argument('page', help='The page to update.')
parser.add_argument('-u', '--user', help='Your username on the wiki.')
parser.add_argument('-p', '--password', help='Your password on the wiki.')
parser.add_argument('-s', '--summary', default='', help='A summary of the edit.')
parser.add_argument('-m', '--minor', help='Mark this as a minor edit.', action='store_true')
parser.add_argument('--confirm', help='Confirm the page edit.', action='store_true')
args = parser.parse_args()
API_URL = 'http://dragonsdogma.wikia.com/api.php'
if args.confirm and not (args.user and args.password):
print('\nError: Must provide username and password in order to confirm an edit to a page.\n')
parser.print_help()
exit(1)
api = MediaWiki(API_URL)
page = api.page(args.page)
current_revision = page.content
if not (AUTOGEN_HEADER in current_revision and AUTOGEN_FOOTER in current_revision):
# It's safest to only update sections that have been previously autogenerated.
print('Error: This page does not have the autogen header or footer.')
exit(1)
header_index = current_revision.index(AUTOGEN_HEADER)
footer_index = current_revision.index(AUTOGEN_FOOTER) + len(AUTOGEN_FOOTER)
before = current_revision[:header_index]
after = current_revision[footer_index+1:]
updated = args.infile.read()
new_revision = before + updated + after
if current_revision == new_revision:
print('Error: No edits have been made.')
exit(1)
if not args.confirm:
diff = Differ()
result = diff.compare(current_revision.splitlines(keepends=True), new_revision.splitlines(keepends=True))
sys.stdout.writelines(result)
print('\n===========\nSummary:', args.summary)
print('Run this command with --confirm to confirm this page edit.')
exit(1)
else:
api.login(args.user, args.password)
page.edit(new_revision, args.summary or '', minor=args.minor)
print('%s successfully updated.' % args.page)
|
<commit_before><commit_msg>Add a script to automatically update an autogenerated table.<commit_after>
|
import sys
import argparse
from ceterach.api import MediaWiki
from difflib import Differ
from parse_equipment import AUTOGEN_HEADER, AUTOGEN_FOOTER
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin,
help='The file to read the new autogenerated contents from. Defaults to stdin.')
parser.add_argument('page', help='The page to update.')
parser.add_argument('-u', '--user', help='Your username on the wiki.')
parser.add_argument('-p', '--password', help='Your password on the wiki.')
parser.add_argument('-s', '--summary', default='', help='A summary of the edit.')
parser.add_argument('-m', '--minor', help='Mark this as a minor edit.', action='store_true')
parser.add_argument('--confirm', help='Confirm the page edit.', action='store_true')
args = parser.parse_args()
API_URL = 'http://dragonsdogma.wikia.com/api.php'
if args.confirm and not (args.user and args.password):
print('\nError: Must provide username and password in order to confirm an edit to a page.\n')
parser.print_help()
exit(1)
api = MediaWiki(API_URL)
page = api.page(args.page)
current_revision = page.content
if not (AUTOGEN_HEADER in current_revision and AUTOGEN_FOOTER in current_revision):
# It's safest to only update sections that have been previously autogenerated.
print('Error: This page does not have the autogen header or footer.')
exit(1)
header_index = current_revision.index(AUTOGEN_HEADER)
footer_index = current_revision.index(AUTOGEN_FOOTER) + len(AUTOGEN_FOOTER)
before = current_revision[:header_index]
after = current_revision[footer_index+1:]
updated = args.infile.read()
new_revision = before + updated + after
if current_revision == new_revision:
print('Error: No edits have been made.')
exit(1)
if not args.confirm:
diff = Differ()
result = diff.compare(current_revision.splitlines(keepends=True), new_revision.splitlines(keepends=True))
sys.stdout.writelines(result)
print('\n===========\nSummary:', args.summary)
print('Run this command with --confirm to confirm this page edit.')
exit(1)
else:
api.login(args.user, args.password)
page.edit(new_revision, args.summary or '', minor=args.minor)
print('%s successfully updated.' % args.page)
|
Add a script to automatically update an autogenerated table.import sys
import argparse
from ceterach.api import MediaWiki
from difflib import Differ
from parse_equipment import AUTOGEN_HEADER, AUTOGEN_FOOTER
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin,
help='The file to read the new autogenerated contents from. Defaults to stdin.')
parser.add_argument('page', help='The page to update.')
parser.add_argument('-u', '--user', help='Your username on the wiki.')
parser.add_argument('-p', '--password', help='Your password on the wiki.')
parser.add_argument('-s', '--summary', default='', help='A summary of the edit.')
parser.add_argument('-m', '--minor', help='Mark this as a minor edit.', action='store_true')
parser.add_argument('--confirm', help='Confirm the page edit.', action='store_true')
args = parser.parse_args()
API_URL = 'http://dragonsdogma.wikia.com/api.php'
if args.confirm and not (args.user and args.password):
print('\nError: Must provide username and password in order to confirm an edit to a page.\n')
parser.print_help()
exit(1)
api = MediaWiki(API_URL)
page = api.page(args.page)
current_revision = page.content
if not (AUTOGEN_HEADER in current_revision and AUTOGEN_FOOTER in current_revision):
# It's safest to only update sections that have been previously autogenerated.
print('Error: This page does not have the autogen header or footer.')
exit(1)
header_index = current_revision.index(AUTOGEN_HEADER)
footer_index = current_revision.index(AUTOGEN_FOOTER) + len(AUTOGEN_FOOTER)
before = current_revision[:header_index]
after = current_revision[footer_index+1:]
updated = args.infile.read()
new_revision = before + updated + after
if current_revision == new_revision:
print('Error: No edits have been made.')
exit(1)
if not args.confirm:
diff = Differ()
result = diff.compare(current_revision.splitlines(keepends=True), new_revision.splitlines(keepends=True))
sys.stdout.writelines(result)
print('\n===========\nSummary:', args.summary)
print('Run this command with --confirm to confirm this page edit.')
exit(1)
else:
api.login(args.user, args.password)
page.edit(new_revision, args.summary or '', minor=args.minor)
print('%s successfully updated.' % args.page)
|
<commit_before><commit_msg>Add a script to automatically update an autogenerated table.<commit_after>import sys
import argparse
from ceterach.api import MediaWiki
from difflib import Differ
from parse_equipment import AUTOGEN_HEADER, AUTOGEN_FOOTER
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin,
help='The file to read the new autogenerated contents from. Defaults to stdin.')
parser.add_argument('page', help='The page to update.')
parser.add_argument('-u', '--user', help='Your username on the wiki.')
parser.add_argument('-p', '--password', help='Your password on the wiki.')
parser.add_argument('-s', '--summary', default='', help='A summary of the edit.')
parser.add_argument('-m', '--minor', help='Mark this as a minor edit.', action='store_true')
parser.add_argument('--confirm', help='Confirm the page edit.', action='store_true')
args = parser.parse_args()
API_URL = 'http://dragonsdogma.wikia.com/api.php'
if args.confirm and not (args.user and args.password):
print('\nError: Must provide username and password in order to confirm an edit to a page.\n')
parser.print_help()
exit(1)
api = MediaWiki(API_URL)
page = api.page(args.page)
current_revision = page.content
if not (AUTOGEN_HEADER in current_revision and AUTOGEN_FOOTER in current_revision):
# It's safest to only update sections that have been previously autogenerated.
print('Error: This page does not have the autogen header or footer.')
exit(1)
header_index = current_revision.index(AUTOGEN_HEADER)
footer_index = current_revision.index(AUTOGEN_FOOTER) + len(AUTOGEN_FOOTER)
before = current_revision[:header_index]
after = current_revision[footer_index+1:]
updated = args.infile.read()
new_revision = before + updated + after
if current_revision == new_revision:
print('Error: No edits have been made.')
exit(1)
if not args.confirm:
diff = Differ()
result = diff.compare(current_revision.splitlines(keepends=True), new_revision.splitlines(keepends=True))
sys.stdout.writelines(result)
print('\n===========\nSummary:', args.summary)
print('Run this command with --confirm to confirm this page edit.')
exit(1)
else:
api.login(args.user, args.password)
page.edit(new_revision, args.summary or '', minor=args.minor)
print('%s successfully updated.' % args.page)
|
|
d8d5ce4d1dd2228d70cc90025995a30dca7b075d
|
s2v3.py
|
s2v3.py
|
from s2v2 import *
def calculate_sum(data_sample):
total = 0
for row in data_sample[1:]: # slice to start at row two, but I think we should only skip row 1 if we're importing the full csv (data_from_csv), but if we use the data w/ the header (my_csv) we'll be skipping a row that we're not supposed to skip (the actual first row of non-header data).
price = float(row[2])
total += price
return total
print('the sum total of prices for all ties in the dataset = ' + str(calculate_sum(data_from_csv))) # ok we're using the right import, but having two imports is confusing.
|
Define function for determining the sum of price rows
|
Define function for determining the sum of price rows
|
Python
|
mit
|
alexmilesyounger/ds_basics
|
Define function for determining the sum of price rows
|
from s2v2 import *
def calculate_sum(data_sample):
total = 0
for row in data_sample[1:]: # slice to start at row two, but I think we should only skip row 1 if we're importing the full csv (data_from_csv), but if we use the data w/ the header (my_csv) we'll be skipping a row that we're not supposed to skip (the actual first row of non-header data).
price = float(row[2])
total += price
return total
print('the sum total of prices for all ties in the dataset = ' + str(calculate_sum(data_from_csv))) # ok we're using the right import, but having two imports is confusing.
|
<commit_before><commit_msg>Define function for determining the sum of price rows<commit_after>
|
from s2v2 import *
def calculate_sum(data_sample):
total = 0
for row in data_sample[1:]: # slice to start at row two, but I think we should only skip row 1 if we're importing the full csv (data_from_csv), but if we use the data w/ the header (my_csv) we'll be skipping a row that we're not supposed to skip (the actual first row of non-header data).
price = float(row[2])
total += price
return total
print('the sum total of prices for all ties in the dataset = ' + str(calculate_sum(data_from_csv))) # ok we're using the right import, but having two imports is confusing.
|
Define function for determining the sum of price rowsfrom s2v2 import *
def calculate_sum(data_sample):
total = 0
for row in data_sample[1:]: # slice to start at row two, but I think we should only skip row 1 if we're importing the full csv (data_from_csv), but if we use the data w/ the header (my_csv) we'll be skipping a row that we're not supposed to skip (the actual first row of non-header data).
price = float(row[2])
total += price
return total
print('the sum total of prices for all ties in the dataset = ' + str(calculate_sum(data_from_csv))) # ok we're using the right import, but having two imports is confusing.
|
<commit_before><commit_msg>Define function for determining the sum of price rows<commit_after>from s2v2 import *
def calculate_sum(data_sample):
total = 0
for row in data_sample[1:]: # slice to start at row two, but I think we should only skip row 1 if we're importing the full csv (data_from_csv), but if we use the data w/ the header (my_csv) we'll be skipping a row that we're not supposed to skip (the actual first row of non-header data).
price = float(row[2])
total += price
return total
print('the sum total of prices for all ties in the dataset = ' + str(calculate_sum(data_from_csv))) # ok we're using the right import, but having two imports is confusing.
|
|
1d76b22bc0090580e4ccbfb43e2f5d88d86f2bc7
|
tests/app/main/test_placeholder_form.py
|
tests/app/main/test_placeholder_form.py
|
from app.main.forms import get_placeholder_form_instance
from wtforms import Label
def test_form_class_not_mutated(app_):
with app_.test_request_context(
method='POST',
data={'placeholder_value': ''}
) as req:
form1 = get_placeholder_form_instance('name', {}, optional_placeholder=False)
form2 = get_placeholder_form_instance('city', {}, optional_placeholder=True)
assert not form1.validate_on_submit()
assert form2.validate_on_submit()
assert str(form1.placeholder_value.label) == '<label for="placeholder_value">name</label>'
assert str(form2.placeholder_value.label) == '<label for="placeholder_value">city</label>'
|
Add extra tests to make sure that the form is safe
|
Add extra tests to make sure that the form is safe
Previous implementations of this functionality mutated the base form
class, which broke a bunch of stuff.
I want to make sure that getting this form for one placeholder doesn’t
change other forms that have already been instantiated for other
placeholders.
Mutation is scary.
|
Python
|
mit
|
gov-cjwaszczuk/notifications-admin,alphagov/notifications-admin,gov-cjwaszczuk/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin,gov-cjwaszczuk/notifications-admin,gov-cjwaszczuk/notifications-admin,alphagov/notifications-admin
|
Add extra tests to make sure that the form is safe
Previous implementations of this functionality mutated the base form
class, which broke a bunch of stuff.
I want to make sure that getting this form for one placeholder doesn’t
change other forms that have already been instantiated for other
placeholders.
Mutation is scary.
|
from app.main.forms import get_placeholder_form_instance
from wtforms import Label
def test_form_class_not_mutated(app_):
with app_.test_request_context(
method='POST',
data={'placeholder_value': ''}
) as req:
form1 = get_placeholder_form_instance('name', {}, optional_placeholder=False)
form2 = get_placeholder_form_instance('city', {}, optional_placeholder=True)
assert not form1.validate_on_submit()
assert form2.validate_on_submit()
assert str(form1.placeholder_value.label) == '<label for="placeholder_value">name</label>'
assert str(form2.placeholder_value.label) == '<label for="placeholder_value">city</label>'
|
<commit_before><commit_msg>Add extra tests to make sure that the form is safe
Previous implementations of this functionality mutated the base form
class, which broke a bunch of stuff.
I want to make sure that getting this form for one placeholder doesn’t
change other forms that have already been instantiated for other
placeholders.
Mutation is scary.<commit_after>
|
from app.main.forms import get_placeholder_form_instance
from wtforms import Label
def test_form_class_not_mutated(app_):
with app_.test_request_context(
method='POST',
data={'placeholder_value': ''}
) as req:
form1 = get_placeholder_form_instance('name', {}, optional_placeholder=False)
form2 = get_placeholder_form_instance('city', {}, optional_placeholder=True)
assert not form1.validate_on_submit()
assert form2.validate_on_submit()
assert str(form1.placeholder_value.label) == '<label for="placeholder_value">name</label>'
assert str(form2.placeholder_value.label) == '<label for="placeholder_value">city</label>'
|
Add extra tests to make sure that the form is safe
Previous implementations of this functionality mutated the base form
class, which broke a bunch of stuff.
I want to make sure that getting this form for one placeholder doesn’t
change other forms that have already been instantiated for other
placeholders.
Mutation is scary.from app.main.forms import get_placeholder_form_instance
from wtforms import Label
def test_form_class_not_mutated(app_):
with app_.test_request_context(
method='POST',
data={'placeholder_value': ''}
) as req:
form1 = get_placeholder_form_instance('name', {}, optional_placeholder=False)
form2 = get_placeholder_form_instance('city', {}, optional_placeholder=True)
assert not form1.validate_on_submit()
assert form2.validate_on_submit()
assert str(form1.placeholder_value.label) == '<label for="placeholder_value">name</label>'
assert str(form2.placeholder_value.label) == '<label for="placeholder_value">city</label>'
|
<commit_before><commit_msg>Add extra tests to make sure that the form is safe
Previous implementations of this functionality mutated the base form
class, which broke a bunch of stuff.
I want to make sure that getting this form for one placeholder doesn’t
change other forms that have already been instantiated for other
placeholders.
Mutation is scary.<commit_after>from app.main.forms import get_placeholder_form_instance
from wtforms import Label
def test_form_class_not_mutated(app_):
with app_.test_request_context(
method='POST',
data={'placeholder_value': ''}
) as req:
form1 = get_placeholder_form_instance('name', {}, optional_placeholder=False)
form2 = get_placeholder_form_instance('city', {}, optional_placeholder=True)
assert not form1.validate_on_submit()
assert form2.validate_on_submit()
assert str(form1.placeholder_value.label) == '<label for="placeholder_value">name</label>'
assert str(form2.placeholder_value.label) == '<label for="placeholder_value">city</label>'
|
|
f335f0032b9eb0847de4fd1261f063012bc4d2f5
|
functest/tests/unit/features/test_promise.py
|
functest/tests/unit/features/test_promise.py
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import promise
from functest.utils import constants
class PromiseTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.promise = promise.Promise()
def test_init(self):
self.assertEqual(self.promise.project_name, "promise")
self.assertEqual(self.promise.case_name, "promise")
self.assertEqual(
self.promise.repo,
constants.CONST.__getattribute__("dir_repo_promise"))
self.assertEqual(
self.promise.cmd,
'cd {}/promise/test/functest && python ./run_tests.py'.format(
self.promise.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for promise
|
Add unit tests for promise
Change-Id: I538fcedbfbef46ae36b8eff5a20acaa28a8bfb85
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
Python
|
apache-2.0
|
mywulin/functest,opnfv/functest,mywulin/functest,opnfv/functest
|
Add unit tests for promise
Change-Id: I538fcedbfbef46ae36b8eff5a20acaa28a8bfb85
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import promise
from functest.utils import constants
class PromiseTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.promise = promise.Promise()
def test_init(self):
self.assertEqual(self.promise.project_name, "promise")
self.assertEqual(self.promise.case_name, "promise")
self.assertEqual(
self.promise.repo,
constants.CONST.__getattribute__("dir_repo_promise"))
self.assertEqual(
self.promise.cmd,
'cd {}/promise/test/functest && python ./run_tests.py'.format(
self.promise.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for promise
Change-Id: I538fcedbfbef46ae36b8eff5a20acaa28a8bfb85
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import promise
from functest.utils import constants
class PromiseTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.promise = promise.Promise()
def test_init(self):
self.assertEqual(self.promise.project_name, "promise")
self.assertEqual(self.promise.case_name, "promise")
self.assertEqual(
self.promise.repo,
constants.CONST.__getattribute__("dir_repo_promise"))
self.assertEqual(
self.promise.cmd,
'cd {}/promise/test/functest && python ./run_tests.py'.format(
self.promise.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for promise
Change-Id: I538fcedbfbef46ae36b8eff5a20acaa28a8bfb85
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import promise
from functest.utils import constants
class PromiseTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.promise = promise.Promise()
def test_init(self):
self.assertEqual(self.promise.project_name, "promise")
self.assertEqual(self.promise.case_name, "promise")
self.assertEqual(
self.promise.repo,
constants.CONST.__getattribute__("dir_repo_promise"))
self.assertEqual(
self.promise.cmd,
'cd {}/promise/test/functest && python ./run_tests.py'.format(
self.promise.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for promise
Change-Id: I538fcedbfbef46ae36b8eff5a20acaa28a8bfb85
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import promise
from functest.utils import constants
class PromiseTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.promise = promise.Promise()
def test_init(self):
self.assertEqual(self.promise.project_name, "promise")
self.assertEqual(self.promise.case_name, "promise")
self.assertEqual(
self.promise.repo,
constants.CONST.__getattribute__("dir_repo_promise"))
self.assertEqual(
self.promise.cmd,
'cd {}/promise/test/functest && python ./run_tests.py'.format(
self.promise.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
0c20abe4420b92b25acb28e67cd344f6b45d28ef
|
flake8diff/vcs/hg.py
|
flake8diff/vcs/hg.py
|
from __future__ import unicode_literals, print_function
import logging
import subprocess
from ..utils import _execute
from .base import VCSBase
logger = logging.getLogger(__name__)
class HgVCS(VCSBase):
"""
Mercurial support implementation
"""
name = 'hg'
def get_vcs(self):
"""
Get git binary executable path
"""
vcs = _execute('which hg', strict=True).strip()
self._check_extdiff_extension(vcs)
return vcs
def is_used(self):
"""
Determines if this VCS should be used
TODO: implement
"""
return True
def _check_extdiff_extension(self, vcs):
try:
return _execute('{vcs} extdiff'.format(vcs=vcs), strict=True)
except subprocess.CalledProcessError:
message = (
"Mercurial 'extdiff' extension is disabled.\n"
"Please add the following lines to your ~/.hgrc\n\n"
"[extensions]\n"
"extdiff = \n")
print(message)
raise Exception("Please enable 'extdiff' extension")
|
Introduce HgVCS, check for extdiff extension enabled
|
Introduce HgVCS, check for extdiff extension enabled
|
Python
|
mit
|
dealertrack/flake8-diff,miki725/flake8-diff
|
Introduce HgVCS, check for extdiff extension enabled
|
from __future__ import unicode_literals, print_function
import logging
import subprocess
from ..utils import _execute
from .base import VCSBase
logger = logging.getLogger(__name__)
class HgVCS(VCSBase):
"""
Mercurial support implementation
"""
name = 'hg'
def get_vcs(self):
"""
Get git binary executable path
"""
vcs = _execute('which hg', strict=True).strip()
self._check_extdiff_extension(vcs)
return vcs
def is_used(self):
"""
Determines if this VCS should be used
TODO: implement
"""
return True
def _check_extdiff_extension(self, vcs):
try:
return _execute('{vcs} extdiff'.format(vcs=vcs), strict=True)
except subprocess.CalledProcessError:
message = (
"Mercurial 'extdiff' extension is disabled.\n"
"Please add the following lines to your ~/.hgrc\n\n"
"[extensions]\n"
"extdiff = \n")
print(message)
raise Exception("Please enable 'extdiff' extension")
|
<commit_before><commit_msg>Introduce HgVCS, check for extdiff extension enabled<commit_after>
|
from __future__ import unicode_literals, print_function
import logging
import subprocess
from ..utils import _execute
from .base import VCSBase
logger = logging.getLogger(__name__)
class HgVCS(VCSBase):
"""
Mercurial support implementation
"""
name = 'hg'
def get_vcs(self):
"""
Get git binary executable path
"""
vcs = _execute('which hg', strict=True).strip()
self._check_extdiff_extension(vcs)
return vcs
def is_used(self):
"""
Determines if this VCS should be used
TODO: implement
"""
return True
def _check_extdiff_extension(self, vcs):
try:
return _execute('{vcs} extdiff'.format(vcs=vcs), strict=True)
except subprocess.CalledProcessError:
message = (
"Mercurial 'extdiff' extension is disabled.\n"
"Please add the following lines to your ~/.hgrc\n\n"
"[extensions]\n"
"extdiff = \n")
print(message)
raise Exception("Please enable 'extdiff' extension")
|
Introduce HgVCS, check for extdiff extension enabledfrom __future__ import unicode_literals, print_function
import logging
import subprocess
from ..utils import _execute
from .base import VCSBase
logger = logging.getLogger(__name__)
class HgVCS(VCSBase):
"""
Mercurial support implementation
"""
name = 'hg'
def get_vcs(self):
"""
Get git binary executable path
"""
vcs = _execute('which hg', strict=True).strip()
self._check_extdiff_extension(vcs)
return vcs
def is_used(self):
"""
Determines if this VCS should be used
TODO: implement
"""
return True
def _check_extdiff_extension(self, vcs):
try:
return _execute('{vcs} extdiff'.format(vcs=vcs), strict=True)
except subprocess.CalledProcessError:
message = (
"Mercurial 'extdiff' extension is disabled.\n"
"Please add the following lines to your ~/.hgrc\n\n"
"[extensions]\n"
"extdiff = \n")
print(message)
raise Exception("Please enable 'extdiff' extension")
|
<commit_before><commit_msg>Introduce HgVCS, check for extdiff extension enabled<commit_after>from __future__ import unicode_literals, print_function
import logging
import subprocess
from ..utils import _execute
from .base import VCSBase
logger = logging.getLogger(__name__)
class HgVCS(VCSBase):
"""
Mercurial support implementation
"""
name = 'hg'
def get_vcs(self):
"""
Get git binary executable path
"""
vcs = _execute('which hg', strict=True).strip()
self._check_extdiff_extension(vcs)
return vcs
def is_used(self):
"""
Determines if this VCS should be used
TODO: implement
"""
return True
def _check_extdiff_extension(self, vcs):
try:
return _execute('{vcs} extdiff'.format(vcs=vcs), strict=True)
except subprocess.CalledProcessError:
message = (
"Mercurial 'extdiff' extension is disabled.\n"
"Please add the following lines to your ~/.hgrc\n\n"
"[extensions]\n"
"extdiff = \n")
print(message)
raise Exception("Please enable 'extdiff' extension")
|
|
1b3c0e108fed7eb33edc2ee3646819e75267de69
|
bot/action/standard/info/formatter/__init__.py
|
bot/action/standard/info/formatter/__init__.py
|
from typing import List
from bot.action.util.format import DateFormatter
from bot.action.util.textformat import FormattedText
from bot.api.api import Api
from bot.api.domain import ApiObject
class ApiObjectInfoFormatter:
def __init__(self, api: Api, api_object: ApiObject):
self.api = api
self.api_object = api_object
self.info_items = []
def format(self):
raise NotImplementedError()
def get_formatted(self):
return FormattedText().newline().join(self.info_items)
def _add_title(self, title: str):
self._add(
FormattedText().bold(title)
)
def _add_info(self, label: str, value, additional_text: str = ""):
info = FormattedText()\
.normal("{label}: {value}")\
.start_format()\
.normal(label=label)\
.bold(value=value)\
.end_format()
if additional_text:
info.normal(" ").normal(additional_text)
self._add(info)
def _add_empty(self):
self._add(FormattedText())
def _add(self, text: FormattedText):
self.info_items.append(text)
@staticmethod
def _yes_no(data, yes_emoji: str = "✅", no_emoji: str = "❌", unknown_emoji: str = "❔"):
if data:
return "Yes " + yes_emoji
elif data is not None:
return "No " + no_emoji
else:
return "Unknown " + unknown_emoji
@staticmethod
def _username(username: str):
return ("@" + username) if username is not None else ""
@staticmethod
def _invite_link(invite_link: str):
return invite_link if invite_link is not None else "<Inaccessible or not defined>"
@staticmethod
def _pinned_message(message: ApiObject):
return "<{id}>".format(id=message.message_id) if message is not None else "<No pinned message>"
@staticmethod
def _group_sticker_set(sticker_set_name: str):
return sticker_set_name if sticker_set_name is not None else "<No group sticker set defined>"
@staticmethod
def _date(date: int, default_text: str = "No date"):
return DateFormatter.format_full(date) if date is not None else "<{text}>".format(text=default_text)
@staticmethod
def _is_admin(user: ApiObject, admin_chat_member_list: List[ApiObject]):
for admin_chat_member in admin_chat_member_list:
if admin_chat_member.user.id == user.id:
return True
return False
|
Create info.formatter subpackage, and add ApiObjectInfoFormatter base class
|
Create info.formatter subpackage, and add ApiObjectInfoFormatter base class
|
Python
|
agpl-3.0
|
alvarogzp/telegram-bot,alvarogzp/telegram-bot
|
Create info.formatter subpackage, and add ApiObjectInfoFormatter base class
|
from typing import List
from bot.action.util.format import DateFormatter
from bot.action.util.textformat import FormattedText
from bot.api.api import Api
from bot.api.domain import ApiObject
class ApiObjectInfoFormatter:
def __init__(self, api: Api, api_object: ApiObject):
self.api = api
self.api_object = api_object
self.info_items = []
def format(self):
raise NotImplementedError()
def get_formatted(self):
return FormattedText().newline().join(self.info_items)
def _add_title(self, title: str):
self._add(
FormattedText().bold(title)
)
def _add_info(self, label: str, value, additional_text: str = ""):
info = FormattedText()\
.normal("{label}: {value}")\
.start_format()\
.normal(label=label)\
.bold(value=value)\
.end_format()
if additional_text:
info.normal(" ").normal(additional_text)
self._add(info)
def _add_empty(self):
self._add(FormattedText())
def _add(self, text: FormattedText):
self.info_items.append(text)
@staticmethod
def _yes_no(data, yes_emoji: str = "✅", no_emoji: str = "❌", unknown_emoji: str = "❔"):
if data:
return "Yes " + yes_emoji
elif data is not None:
return "No " + no_emoji
else:
return "Unknown " + unknown_emoji
@staticmethod
def _username(username: str):
return ("@" + username) if username is not None else ""
@staticmethod
def _invite_link(invite_link: str):
return invite_link if invite_link is not None else "<Inaccessible or not defined>"
@staticmethod
def _pinned_message(message: ApiObject):
return "<{id}>".format(id=message.message_id) if message is not None else "<No pinned message>"
@staticmethod
def _group_sticker_set(sticker_set_name: str):
return sticker_set_name if sticker_set_name is not None else "<No group sticker set defined>"
@staticmethod
def _date(date: int, default_text: str = "No date"):
return DateFormatter.format_full(date) if date is not None else "<{text}>".format(text=default_text)
@staticmethod
def _is_admin(user: ApiObject, admin_chat_member_list: List[ApiObject]):
for admin_chat_member in admin_chat_member_list:
if admin_chat_member.user.id == user.id:
return True
return False
|
<commit_before><commit_msg>Create info.formatter subpackage, and add ApiObjectInfoFormatter base class<commit_after>
|
from typing import List
from bot.action.util.format import DateFormatter
from bot.action.util.textformat import FormattedText
from bot.api.api import Api
from bot.api.domain import ApiObject
class ApiObjectInfoFormatter:
def __init__(self, api: Api, api_object: ApiObject):
self.api = api
self.api_object = api_object
self.info_items = []
def format(self):
raise NotImplementedError()
def get_formatted(self):
return FormattedText().newline().join(self.info_items)
def _add_title(self, title: str):
self._add(
FormattedText().bold(title)
)
def _add_info(self, label: str, value, additional_text: str = ""):
info = FormattedText()\
.normal("{label}: {value}")\
.start_format()\
.normal(label=label)\
.bold(value=value)\
.end_format()
if additional_text:
info.normal(" ").normal(additional_text)
self._add(info)
def _add_empty(self):
self._add(FormattedText())
def _add(self, text: FormattedText):
self.info_items.append(text)
@staticmethod
def _yes_no(data, yes_emoji: str = "✅", no_emoji: str = "❌", unknown_emoji: str = "❔"):
if data:
return "Yes " + yes_emoji
elif data is not None:
return "No " + no_emoji
else:
return "Unknown " + unknown_emoji
@staticmethod
def _username(username: str):
return ("@" + username) if username is not None else ""
@staticmethod
def _invite_link(invite_link: str):
return invite_link if invite_link is not None else "<Inaccessible or not defined>"
@staticmethod
def _pinned_message(message: ApiObject):
return "<{id}>".format(id=message.message_id) if message is not None else "<No pinned message>"
@staticmethod
def _group_sticker_set(sticker_set_name: str):
return sticker_set_name if sticker_set_name is not None else "<No group sticker set defined>"
@staticmethod
def _date(date: int, default_text: str = "No date"):
return DateFormatter.format_full(date) if date is not None else "<{text}>".format(text=default_text)
@staticmethod
def _is_admin(user: ApiObject, admin_chat_member_list: List[ApiObject]):
for admin_chat_member in admin_chat_member_list:
if admin_chat_member.user.id == user.id:
return True
return False
|
Create info.formatter subpackage, and add ApiObjectInfoFormatter base classfrom typing import List
from bot.action.util.format import DateFormatter
from bot.action.util.textformat import FormattedText
from bot.api.api import Api
from bot.api.domain import ApiObject
class ApiObjectInfoFormatter:
def __init__(self, api: Api, api_object: ApiObject):
self.api = api
self.api_object = api_object
self.info_items = []
def format(self):
raise NotImplementedError()
def get_formatted(self):
return FormattedText().newline().join(self.info_items)
def _add_title(self, title: str):
self._add(
FormattedText().bold(title)
)
def _add_info(self, label: str, value, additional_text: str = ""):
info = FormattedText()\
.normal("{label}: {value}")\
.start_format()\
.normal(label=label)\
.bold(value=value)\
.end_format()
if additional_text:
info.normal(" ").normal(additional_text)
self._add(info)
def _add_empty(self):
self._add(FormattedText())
def _add(self, text: FormattedText):
self.info_items.append(text)
@staticmethod
def _yes_no(data, yes_emoji: str = "✅", no_emoji: str = "❌", unknown_emoji: str = "❔"):
if data:
return "Yes " + yes_emoji
elif data is not None:
return "No " + no_emoji
else:
return "Unknown " + unknown_emoji
@staticmethod
def _username(username: str):
return ("@" + username) if username is not None else ""
@staticmethod
def _invite_link(invite_link: str):
return invite_link if invite_link is not None else "<Inaccessible or not defined>"
@staticmethod
def _pinned_message(message: ApiObject):
return "<{id}>".format(id=message.message_id) if message is not None else "<No pinned message>"
@staticmethod
def _group_sticker_set(sticker_set_name: str):
return sticker_set_name if sticker_set_name is not None else "<No group sticker set defined>"
@staticmethod
def _date(date: int, default_text: str = "No date"):
return DateFormatter.format_full(date) if date is not None else "<{text}>".format(text=default_text)
@staticmethod
def _is_admin(user: ApiObject, admin_chat_member_list: List[ApiObject]):
for admin_chat_member in admin_chat_member_list:
if admin_chat_member.user.id == user.id:
return True
return False
|
<commit_before><commit_msg>Create info.formatter subpackage, and add ApiObjectInfoFormatter base class<commit_after>from typing import List
from bot.action.util.format import DateFormatter
from bot.action.util.textformat import FormattedText
from bot.api.api import Api
from bot.api.domain import ApiObject
class ApiObjectInfoFormatter:
def __init__(self, api: Api, api_object: ApiObject):
self.api = api
self.api_object = api_object
self.info_items = []
def format(self):
raise NotImplementedError()
def get_formatted(self):
return FormattedText().newline().join(self.info_items)
def _add_title(self, title: str):
self._add(
FormattedText().bold(title)
)
def _add_info(self, label: str, value, additional_text: str = ""):
info = FormattedText()\
.normal("{label}: {value}")\
.start_format()\
.normal(label=label)\
.bold(value=value)\
.end_format()
if additional_text:
info.normal(" ").normal(additional_text)
self._add(info)
def _add_empty(self):
self._add(FormattedText())
def _add(self, text: FormattedText):
self.info_items.append(text)
@staticmethod
def _yes_no(data, yes_emoji: str = "✅", no_emoji: str = "❌", unknown_emoji: str = "❔"):
if data:
return "Yes " + yes_emoji
elif data is not None:
return "No " + no_emoji
else:
return "Unknown " + unknown_emoji
@staticmethod
def _username(username: str):
return ("@" + username) if username is not None else ""
@staticmethod
def _invite_link(invite_link: str):
return invite_link if invite_link is not None else "<Inaccessible or not defined>"
@staticmethod
def _pinned_message(message: ApiObject):
return "<{id}>".format(id=message.message_id) if message is not None else "<No pinned message>"
@staticmethod
def _group_sticker_set(sticker_set_name: str):
return sticker_set_name if sticker_set_name is not None else "<No group sticker set defined>"
@staticmethod
def _date(date: int, default_text: str = "No date"):
return DateFormatter.format_full(date) if date is not None else "<{text}>".format(text=default_text)
@staticmethod
def _is_admin(user: ApiObject, admin_chat_member_list: List[ApiObject]):
for admin_chat_member in admin_chat_member_list:
if admin_chat_member.user.id == user.id:
return True
return False
|
|
7ff3a40d5cdc9fe8d7a960377a7d2f4ea2fb411d
|
Ratings-Counter.py
|
Ratings-Counter.py
|
from pyspark import SparkConf, SparkContext
import collections
conf = SparkConf().setMaster("local").setAppName("RatingsHistogram")
sc = SparkContext(conf = conf)
lines = sc.textFile("ml-100k/u.data")
ratings = lines.map(lambda x: x.split()[2])
result = ratings.countByValue()
sortedResults = collections.OrderedDict(sorted(result.items()))
for key, value in sortedResults.iteritems():
print "%s %i" % (key, value)
|
Add a test for running spark
|
Add a test for running spark
We're using data from http://grouplens.org/datasets/movielens/
For this test we're using the MovieLens 100K Dataset
|
Python
|
mit
|
tonirilix/apache-spark-hands-on
|
Add a test for running spark
We're using data from http://grouplens.org/datasets/movielens/
For this test we're using the MovieLens 100K Dataset
|
from pyspark import SparkConf, SparkContext
import collections
conf = SparkConf().setMaster("local").setAppName("RatingsHistogram")
sc = SparkContext(conf = conf)
lines = sc.textFile("ml-100k/u.data")
ratings = lines.map(lambda x: x.split()[2])
result = ratings.countByValue()
sortedResults = collections.OrderedDict(sorted(result.items()))
for key, value in sortedResults.iteritems():
print "%s %i" % (key, value)
|
<commit_before><commit_msg>Add a test for running spark
We're using data from http://grouplens.org/datasets/movielens/
For this test we're using the MovieLens 100K Dataset<commit_after>
|
from pyspark import SparkConf, SparkContext
import collections
conf = SparkConf().setMaster("local").setAppName("RatingsHistogram")
sc = SparkContext(conf = conf)
lines = sc.textFile("ml-100k/u.data")
ratings = lines.map(lambda x: x.split()[2])
result = ratings.countByValue()
sortedResults = collections.OrderedDict(sorted(result.items()))
for key, value in sortedResults.iteritems():
print "%s %i" % (key, value)
|
Add a test for running spark
We're using data from http://grouplens.org/datasets/movielens/
For this test we're using the MovieLens 100K Datasetfrom pyspark import SparkConf, SparkContext
import collections
conf = SparkConf().setMaster("local").setAppName("RatingsHistogram")
sc = SparkContext(conf = conf)
lines = sc.textFile("ml-100k/u.data")
ratings = lines.map(lambda x: x.split()[2])
result = ratings.countByValue()
sortedResults = collections.OrderedDict(sorted(result.items()))
for key, value in sortedResults.iteritems():
print "%s %i" % (key, value)
|
<commit_before><commit_msg>Add a test for running spark
We're using data from http://grouplens.org/datasets/movielens/
For this test we're using the MovieLens 100K Dataset<commit_after>from pyspark import SparkConf, SparkContext
import collections
conf = SparkConf().setMaster("local").setAppName("RatingsHistogram")
sc = SparkContext(conf = conf)
lines = sc.textFile("ml-100k/u.data")
ratings = lines.map(lambda x: x.split()[2])
result = ratings.countByValue()
sortedResults = collections.OrderedDict(sorted(result.items()))
for key, value in sortedResults.iteritems():
print "%s %i" % (key, value)
|
|
717bafb43870b45d49bfc9d89408544feecb1a78
|
hooks/update-nrpe.py
|
hooks/update-nrpe.py
|
#!/usr/bin/env python
import sys
from charmhelpers.contrib.charmsupport import nrpe
def update_nrpe_checks():
nrpe_compat = nrpe.NRPE()
# The use of port 80 assumes the 'secure' charm configuration
# value is false, which is the scenario for our deployment on
# staging and production. If testing this functionality on a
# free-standing GUI charm deployment be sure to change the secure
# setting.
port = 80
ip_address = '127.0.0.1'
uri = '/static/gui/build/app/version.json'
success = 'version'
check_cmd = 'check_http -I {} -p {} -r {} -u {}'.format(
ip_address, port, success, uri)
nrpe_compat.add_check(
shortname='gui-is-accessible',
description='Check_the_GUI_responds',
check_cmd=check_cmd)
nrpe_compat.write()
def remove_nrpe_check():
nrpe_compat = nrpe.NRPE()
nrpe_compat.remove_checks()
if __name__ == '__main__':
hook_name = sys.argv[0]
if 'departed' in hook_name or 'broken' in hook_name:
remove_nrpe_check()
else:
update_nrpe_checks()
|
Update description of the nagios port for check_http.
|
Update description of the nagios port for check_http.
|
Python
|
agpl-3.0
|
juju/juju-gui-charm,juju/juju-gui-charm
|
Update description of the nagios port for check_http.
|
#!/usr/bin/env python
import sys
from charmhelpers.contrib.charmsupport import nrpe
def update_nrpe_checks():
nrpe_compat = nrpe.NRPE()
# The use of port 80 assumes the 'secure' charm configuration
# value is false, which is the scenario for our deployment on
# staging and production. If testing this functionality on a
# free-standing GUI charm deployment be sure to change the secure
# setting.
port = 80
ip_address = '127.0.0.1'
uri = '/static/gui/build/app/version.json'
success = 'version'
check_cmd = 'check_http -I {} -p {} -r {} -u {}'.format(
ip_address, port, success, uri)
nrpe_compat.add_check(
shortname='gui-is-accessible',
description='Check_the_GUI_responds',
check_cmd=check_cmd)
nrpe_compat.write()
def remove_nrpe_check():
nrpe_compat = nrpe.NRPE()
nrpe_compat.remove_checks()
if __name__ == '__main__':
hook_name = sys.argv[0]
if 'departed' in hook_name or 'broken' in hook_name:
remove_nrpe_check()
else:
update_nrpe_checks()
|
<commit_before><commit_msg>Update description of the nagios port for check_http.<commit_after>
|
#!/usr/bin/env python
import sys
from charmhelpers.contrib.charmsupport import nrpe
def update_nrpe_checks():
nrpe_compat = nrpe.NRPE()
# The use of port 80 assumes the 'secure' charm configuration
# value is false, which is the scenario for our deployment on
# staging and production. If testing this functionality on a
# free-standing GUI charm deployment be sure to change the secure
# setting.
port = 80
ip_address = '127.0.0.1'
uri = '/static/gui/build/app/version.json'
success = 'version'
check_cmd = 'check_http -I {} -p {} -r {} -u {}'.format(
ip_address, port, success, uri)
nrpe_compat.add_check(
shortname='gui-is-accessible',
description='Check_the_GUI_responds',
check_cmd=check_cmd)
nrpe_compat.write()
def remove_nrpe_check():
nrpe_compat = nrpe.NRPE()
nrpe_compat.remove_checks()
if __name__ == '__main__':
hook_name = sys.argv[0]
if 'departed' in hook_name or 'broken' in hook_name:
remove_nrpe_check()
else:
update_nrpe_checks()
|
Update description of the nagios port for check_http.#!/usr/bin/env python
import sys
from charmhelpers.contrib.charmsupport import nrpe
def update_nrpe_checks():
nrpe_compat = nrpe.NRPE()
# The use of port 80 assumes the 'secure' charm configuration
# value is false, which is the scenario for our deployment on
# staging and production. If testing this functionality on a
# free-standing GUI charm deployment be sure to change the secure
# setting.
port = 80
ip_address = '127.0.0.1'
uri = '/static/gui/build/app/version.json'
success = 'version'
check_cmd = 'check_http -I {} -p {} -r {} -u {}'.format(
ip_address, port, success, uri)
nrpe_compat.add_check(
shortname='gui-is-accessible',
description='Check_the_GUI_responds',
check_cmd=check_cmd)
nrpe_compat.write()
def remove_nrpe_check():
nrpe_compat = nrpe.NRPE()
nrpe_compat.remove_checks()
if __name__ == '__main__':
hook_name = sys.argv[0]
if 'departed' in hook_name or 'broken' in hook_name:
remove_nrpe_check()
else:
update_nrpe_checks()
|
<commit_before><commit_msg>Update description of the nagios port for check_http.<commit_after>#!/usr/bin/env python
import sys
from charmhelpers.contrib.charmsupport import nrpe
def update_nrpe_checks():
nrpe_compat = nrpe.NRPE()
# The use of port 80 assumes the 'secure' charm configuration
# value is false, which is the scenario for our deployment on
# staging and production. If testing this functionality on a
# free-standing GUI charm deployment be sure to change the secure
# setting.
port = 80
ip_address = '127.0.0.1'
uri = '/static/gui/build/app/version.json'
success = 'version'
check_cmd = 'check_http -I {} -p {} -r {} -u {}'.format(
ip_address, port, success, uri)
nrpe_compat.add_check(
shortname='gui-is-accessible',
description='Check_the_GUI_responds',
check_cmd=check_cmd)
nrpe_compat.write()
def remove_nrpe_check():
nrpe_compat = nrpe.NRPE()
nrpe_compat.remove_checks()
if __name__ == '__main__':
hook_name = sys.argv[0]
if 'departed' in hook_name or 'broken' in hook_name:
remove_nrpe_check()
else:
update_nrpe_checks()
|
|
8bdd32b41b1a89aac50eb25ed5bd32c3c15b49c7
|
leetcode/171-Excel-Sheet-Column-Number/ExcelSheetColNum_001.py
|
leetcode/171-Excel-Sheet-Column-Number/ExcelSheetColNum_001.py
|
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
res = 0
for i in range(len(s) - 1, -1, -1):
res += (ord(s[i]) - ord('A') + 1) * 26 ** (len(s) - 1 - i)
return res
|
Create Simplified ExcelSheetColNum for Leetcode
|
Create Simplified ExcelSheetColNum for Leetcode
|
Python
|
mit
|
cc13ny/Allin,cc13ny/Allin,Chasego/codirit,cc13ny/Allin,cc13ny/algo,Chasego/cod,cc13ny/Allin,Chasego/codi,cc13ny/algo,Chasego/codirit,cc13ny/Allin,Chasego/codi,Chasego/codi,Chasego/codi,cc13ny/algo,Chasego/cod,Chasego/cod,cc13ny/algo,Chasego/cod,Chasego/codirit,Chasego/codi,Chasego/cod,cc13ny/algo,Chasego/codirit,Chasego/codirit
|
Create Simplified ExcelSheetColNum for Leetcode
|
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
res = 0
for i in range(len(s) - 1, -1, -1):
res += (ord(s[i]) - ord('A') + 1) * 26 ** (len(s) - 1 - i)
return res
|
<commit_before><commit_msg>Create Simplified ExcelSheetColNum for Leetcode<commit_after>
|
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
res = 0
for i in range(len(s) - 1, -1, -1):
res += (ord(s[i]) - ord('A') + 1) * 26 ** (len(s) - 1 - i)
return res
|
Create Simplified ExcelSheetColNum for Leetcodeclass Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
res = 0
for i in range(len(s) - 1, -1, -1):
res += (ord(s[i]) - ord('A') + 1) * 26 ** (len(s) - 1 - i)
return res
|
<commit_before><commit_msg>Create Simplified ExcelSheetColNum for Leetcode<commit_after>class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
res = 0
for i in range(len(s) - 1, -1, -1):
res += (ord(s[i]) - ord('A') + 1) * 26 ** (len(s) - 1 - i)
return res
|
|
7b5de280562f5984b04c63432de8f28e03b57cbd
|
firecares/firestation/migrations/0020_update_greeley_headquarters_location.py
|
firecares/firestation/migrations/0020_update_greeley_headquarters_location.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.gis.geos import Point
from django.db import models, migrations
from genericm2m.utils import monkey_patch
class Migration(migrations.Migration):
dependencies = [
('firestation', '0019_assign-station-number-2'),
('usgs', '0003_auto_20151105_2156')
]
def update_greeley_headquarters_location(apps, schema_editor):
FD = apps.get_model("firestation", "firedepartment")
IP = apps.get_model("usgs", "IncorporatedPlace")
# Have to patch this in since RelatedObjectsDescriptor won't be attached
monkey_patch(FD, 'government_unit')
greeley = IP.objects.filter(place_name='Greeley', state_name='Colorado').first()
fd = FD.objects.filter(id=97668).first()
if fd:
fd.headquarters_address.geom = Point(-104.694001, 40.426638)
fd.headquarters_address.save()
fd.geom = greeley.geom
fd.government_unit.connect(greeley)
fd.population = greeley.population
fd.save()
operations = [
migrations.RunPython(update_greeley_headquarters_location)
]
|
Move Union Colony Fire rescue authority to correct location
|
Move Union Colony Fire rescue authority to correct location
|
Python
|
mit
|
HunterConnelly/firecares,HunterConnelly/firecares,HunterConnelly/firecares,meilinger/firecares,FireCARES/firecares,FireCARES/firecares,HunterConnelly/firecares,FireCARES/firecares,FireCARES/firecares,meilinger/firecares,meilinger/firecares,meilinger/firecares,FireCARES/firecares
|
Move Union Colony Fire rescue authority to correct location
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.gis.geos import Point
from django.db import models, migrations
from genericm2m.utils import monkey_patch
class Migration(migrations.Migration):
dependencies = [
('firestation', '0019_assign-station-number-2'),
('usgs', '0003_auto_20151105_2156')
]
def update_greeley_headquarters_location(apps, schema_editor):
FD = apps.get_model("firestation", "firedepartment")
IP = apps.get_model("usgs", "IncorporatedPlace")
# Have to patch this in since RelatedObjectsDescriptor won't be attached
monkey_patch(FD, 'government_unit')
greeley = IP.objects.filter(place_name='Greeley', state_name='Colorado').first()
fd = FD.objects.filter(id=97668).first()
if fd:
fd.headquarters_address.geom = Point(-104.694001, 40.426638)
fd.headquarters_address.save()
fd.geom = greeley.geom
fd.government_unit.connect(greeley)
fd.population = greeley.population
fd.save()
operations = [
migrations.RunPython(update_greeley_headquarters_location)
]
|
<commit_before><commit_msg>Move Union Colony Fire rescue authority to correct location<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.gis.geos import Point
from django.db import models, migrations
from genericm2m.utils import monkey_patch
class Migration(migrations.Migration):
dependencies = [
('firestation', '0019_assign-station-number-2'),
('usgs', '0003_auto_20151105_2156')
]
def update_greeley_headquarters_location(apps, schema_editor):
FD = apps.get_model("firestation", "firedepartment")
IP = apps.get_model("usgs", "IncorporatedPlace")
# Have to patch this in since RelatedObjectsDescriptor won't be attached
monkey_patch(FD, 'government_unit')
greeley = IP.objects.filter(place_name='Greeley', state_name='Colorado').first()
fd = FD.objects.filter(id=97668).first()
if fd:
fd.headquarters_address.geom = Point(-104.694001, 40.426638)
fd.headquarters_address.save()
fd.geom = greeley.geom
fd.government_unit.connect(greeley)
fd.population = greeley.population
fd.save()
operations = [
migrations.RunPython(update_greeley_headquarters_location)
]
|
Move Union Colony Fire rescue authority to correct location# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.gis.geos import Point
from django.db import models, migrations
from genericm2m.utils import monkey_patch
class Migration(migrations.Migration):
dependencies = [
('firestation', '0019_assign-station-number-2'),
('usgs', '0003_auto_20151105_2156')
]
def update_greeley_headquarters_location(apps, schema_editor):
FD = apps.get_model("firestation", "firedepartment")
IP = apps.get_model("usgs", "IncorporatedPlace")
# Have to patch this in since RelatedObjectsDescriptor won't be attached
monkey_patch(FD, 'government_unit')
greeley = IP.objects.filter(place_name='Greeley', state_name='Colorado').first()
fd = FD.objects.filter(id=97668).first()
if fd:
fd.headquarters_address.geom = Point(-104.694001, 40.426638)
fd.headquarters_address.save()
fd.geom = greeley.geom
fd.government_unit.connect(greeley)
fd.population = greeley.population
fd.save()
operations = [
migrations.RunPython(update_greeley_headquarters_location)
]
|
<commit_before><commit_msg>Move Union Colony Fire rescue authority to correct location<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.gis.geos import Point
from django.db import models, migrations
from genericm2m.utils import monkey_patch
class Migration(migrations.Migration):
dependencies = [
('firestation', '0019_assign-station-number-2'),
('usgs', '0003_auto_20151105_2156')
]
def update_greeley_headquarters_location(apps, schema_editor):
FD = apps.get_model("firestation", "firedepartment")
IP = apps.get_model("usgs", "IncorporatedPlace")
# Have to patch this in since RelatedObjectsDescriptor won't be attached
monkey_patch(FD, 'government_unit')
greeley = IP.objects.filter(place_name='Greeley', state_name='Colorado').first()
fd = FD.objects.filter(id=97668).first()
if fd:
fd.headquarters_address.geom = Point(-104.694001, 40.426638)
fd.headquarters_address.save()
fd.geom = greeley.geom
fd.government_unit.connect(greeley)
fd.population = greeley.population
fd.save()
operations = [
migrations.RunPython(update_greeley_headquarters_location)
]
|
|
27b43a8e46ad7a47415942587ab23e391c72b269
|
tests/functional/core/test_brocker.py
|
tests/functional/core/test_brocker.py
|
from circus.client import CircusClient
from onitu.utils import get_circusctl_endpoint
from tests.utils.testdriver import TestDriver
from tests.utils.loop import BooleanLoop
def test_abort_if_no_source(setup, launcher):
A = TestDriver('A', speed_bump=True)
B = TestDriver('B', speed_bump=True)
setup.add(A)
setup.add(B)
try:
launcher()
loop = BooleanLoop()
launcher.on_transfer_started(loop.stop, d_to='B', filename='test')
A.generate(A.path('default', 'test'), 20)
loop.run(timeout=1)
A.unlink(A.path('default', 'test'), notify=False)
loop.restart()
launcher.on_transfer_aborted(loop.stop, d_to='B', filename='test')
loop.run(timeout=10)
finally:
launcher.close()
def test_work_if_secondary_source(setup, launcher):
A = TestDriver('A')
B = TestDriver('B', speed_bump=True)
C = TestDriver('C')
setup.add(A)
setup.add(B)
setup.add(C)
circus = CircusClient(endpoint=get_circusctl_endpoint(setup.name))
try:
launcher()
circus.call({
'command': "stop",
'properties': {
'name': 'B',
'waiting': True
}
})
launcher.copy_file('default', 'test', 20, A, C)
loop = BooleanLoop()
launcher.on_transfer_started(loop.stop, d_to='B', filename='test')
circus.call({
'command': "start",
'properties': {
'name': 'B',
'waiting': True
}
})
loop.run(timeout=1)
A.unlink(A.path('default', 'test'), notify=False)
loop.restart()
launcher.on_transfer_ended(loop.stop, d_to='B', filename='test')
loop.run(timeout=10)
finally:
launcher.close()
|
Add some tests for the brocker
|
Add some tests for the brocker
|
Python
|
mit
|
onitu/onitu,onitu/onitu,onitu/onitu
|
Add some tests for the brocker
|
from circus.client import CircusClient
from onitu.utils import get_circusctl_endpoint
from tests.utils.testdriver import TestDriver
from tests.utils.loop import BooleanLoop
def test_abort_if_no_source(setup, launcher):
A = TestDriver('A', speed_bump=True)
B = TestDriver('B', speed_bump=True)
setup.add(A)
setup.add(B)
try:
launcher()
loop = BooleanLoop()
launcher.on_transfer_started(loop.stop, d_to='B', filename='test')
A.generate(A.path('default', 'test'), 20)
loop.run(timeout=1)
A.unlink(A.path('default', 'test'), notify=False)
loop.restart()
launcher.on_transfer_aborted(loop.stop, d_to='B', filename='test')
loop.run(timeout=10)
finally:
launcher.close()
def test_work_if_secondary_source(setup, launcher):
A = TestDriver('A')
B = TestDriver('B', speed_bump=True)
C = TestDriver('C')
setup.add(A)
setup.add(B)
setup.add(C)
circus = CircusClient(endpoint=get_circusctl_endpoint(setup.name))
try:
launcher()
circus.call({
'command': "stop",
'properties': {
'name': 'B',
'waiting': True
}
})
launcher.copy_file('default', 'test', 20, A, C)
loop = BooleanLoop()
launcher.on_transfer_started(loop.stop, d_to='B', filename='test')
circus.call({
'command': "start",
'properties': {
'name': 'B',
'waiting': True
}
})
loop.run(timeout=1)
A.unlink(A.path('default', 'test'), notify=False)
loop.restart()
launcher.on_transfer_ended(loop.stop, d_to='B', filename='test')
loop.run(timeout=10)
finally:
launcher.close()
|
<commit_before><commit_msg>Add some tests for the brocker<commit_after>
|
from circus.client import CircusClient
from onitu.utils import get_circusctl_endpoint
from tests.utils.testdriver import TestDriver
from tests.utils.loop import BooleanLoop
def test_abort_if_no_source(setup, launcher):
A = TestDriver('A', speed_bump=True)
B = TestDriver('B', speed_bump=True)
setup.add(A)
setup.add(B)
try:
launcher()
loop = BooleanLoop()
launcher.on_transfer_started(loop.stop, d_to='B', filename='test')
A.generate(A.path('default', 'test'), 20)
loop.run(timeout=1)
A.unlink(A.path('default', 'test'), notify=False)
loop.restart()
launcher.on_transfer_aborted(loop.stop, d_to='B', filename='test')
loop.run(timeout=10)
finally:
launcher.close()
def test_work_if_secondary_source(setup, launcher):
A = TestDriver('A')
B = TestDriver('B', speed_bump=True)
C = TestDriver('C')
setup.add(A)
setup.add(B)
setup.add(C)
circus = CircusClient(endpoint=get_circusctl_endpoint(setup.name))
try:
launcher()
circus.call({
'command': "stop",
'properties': {
'name': 'B',
'waiting': True
}
})
launcher.copy_file('default', 'test', 20, A, C)
loop = BooleanLoop()
launcher.on_transfer_started(loop.stop, d_to='B', filename='test')
circus.call({
'command': "start",
'properties': {
'name': 'B',
'waiting': True
}
})
loop.run(timeout=1)
A.unlink(A.path('default', 'test'), notify=False)
loop.restart()
launcher.on_transfer_ended(loop.stop, d_to='B', filename='test')
loop.run(timeout=10)
finally:
launcher.close()
|
Add some tests for the brockerfrom circus.client import CircusClient
from onitu.utils import get_circusctl_endpoint
from tests.utils.testdriver import TestDriver
from tests.utils.loop import BooleanLoop
def test_abort_if_no_source(setup, launcher):
A = TestDriver('A', speed_bump=True)
B = TestDriver('B', speed_bump=True)
setup.add(A)
setup.add(B)
try:
launcher()
loop = BooleanLoop()
launcher.on_transfer_started(loop.stop, d_to='B', filename='test')
A.generate(A.path('default', 'test'), 20)
loop.run(timeout=1)
A.unlink(A.path('default', 'test'), notify=False)
loop.restart()
launcher.on_transfer_aborted(loop.stop, d_to='B', filename='test')
loop.run(timeout=10)
finally:
launcher.close()
def test_work_if_secondary_source(setup, launcher):
A = TestDriver('A')
B = TestDriver('B', speed_bump=True)
C = TestDriver('C')
setup.add(A)
setup.add(B)
setup.add(C)
circus = CircusClient(endpoint=get_circusctl_endpoint(setup.name))
try:
launcher()
circus.call({
'command': "stop",
'properties': {
'name': 'B',
'waiting': True
}
})
launcher.copy_file('default', 'test', 20, A, C)
loop = BooleanLoop()
launcher.on_transfer_started(loop.stop, d_to='B', filename='test')
circus.call({
'command': "start",
'properties': {
'name': 'B',
'waiting': True
}
})
loop.run(timeout=1)
A.unlink(A.path('default', 'test'), notify=False)
loop.restart()
launcher.on_transfer_ended(loop.stop, d_to='B', filename='test')
loop.run(timeout=10)
finally:
launcher.close()
|
<commit_before><commit_msg>Add some tests for the brocker<commit_after>from circus.client import CircusClient
from onitu.utils import get_circusctl_endpoint
from tests.utils.testdriver import TestDriver
from tests.utils.loop import BooleanLoop
def test_abort_if_no_source(setup, launcher):
A = TestDriver('A', speed_bump=True)
B = TestDriver('B', speed_bump=True)
setup.add(A)
setup.add(B)
try:
launcher()
loop = BooleanLoop()
launcher.on_transfer_started(loop.stop, d_to='B', filename='test')
A.generate(A.path('default', 'test'), 20)
loop.run(timeout=1)
A.unlink(A.path('default', 'test'), notify=False)
loop.restart()
launcher.on_transfer_aborted(loop.stop, d_to='B', filename='test')
loop.run(timeout=10)
finally:
launcher.close()
def test_work_if_secondary_source(setup, launcher):
A = TestDriver('A')
B = TestDriver('B', speed_bump=True)
C = TestDriver('C')
setup.add(A)
setup.add(B)
setup.add(C)
circus = CircusClient(endpoint=get_circusctl_endpoint(setup.name))
try:
launcher()
circus.call({
'command': "stop",
'properties': {
'name': 'B',
'waiting': True
}
})
launcher.copy_file('default', 'test', 20, A, C)
loop = BooleanLoop()
launcher.on_transfer_started(loop.stop, d_to='B', filename='test')
circus.call({
'command': "start",
'properties': {
'name': 'B',
'waiting': True
}
})
loop.run(timeout=1)
A.unlink(A.path('default', 'test'), notify=False)
loop.restart()
launcher.on_transfer_ended(loop.stop, d_to='B', filename='test')
loop.run(timeout=10)
finally:
launcher.close()
|
|
72ea0dc2c55ac139e006eebc0fed29a20d3900ad
|
tests/test_django_default_settings.py
|
tests/test_django_default_settings.py
|
import unittest
from django.conf import global_settings as default
import cbs
class MySettings:
@property
def INSTALLED_APPS(self):
# Customize an empty global setting.
return list(default.INSTALLED_APPS) + ['test']
@property
def CACHES(self):
# Customize a non-empty global setting.
caches = default.CACHES
caches['custom'] = {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}
return caches
class GlobalSettingsTest(unittest.TestCase):
def test_precedence_empty_global_setting(self):
g = {}
cbs.apply(MySettings, g)
self.assertEqual(['test'], g['INSTALLED_APPS'])
def test_precedence_non_empty_global_setting(self):
g = {}
cbs.apply(MySettings, g)
self.assertIn('default', g['CACHES'])
self.assertIn('custom', g['CACHES'])
|
Add a test demonstrating how to use the django global settings.
|
Add a test demonstrating how to use the django global settings.
|
Python
|
bsd-2-clause
|
funkybob/django-classy-settings
|
Add a test demonstrating how to use the django global settings.
|
import unittest
from django.conf import global_settings as default
import cbs
class MySettings:
@property
def INSTALLED_APPS(self):
# Customize an empty global setting.
return list(default.INSTALLED_APPS) + ['test']
@property
def CACHES(self):
# Customize a non-empty global setting.
caches = default.CACHES
caches['custom'] = {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}
return caches
class GlobalSettingsTest(unittest.TestCase):
def test_precedence_empty_global_setting(self):
g = {}
cbs.apply(MySettings, g)
self.assertEqual(['test'], g['INSTALLED_APPS'])
def test_precedence_non_empty_global_setting(self):
g = {}
cbs.apply(MySettings, g)
self.assertIn('default', g['CACHES'])
self.assertIn('custom', g['CACHES'])
|
<commit_before><commit_msg>Add a test demonstrating how to use the django global settings.<commit_after>
|
import unittest
from django.conf import global_settings as default
import cbs
class MySettings:
@property
def INSTALLED_APPS(self):
# Customize an empty global setting.
return list(default.INSTALLED_APPS) + ['test']
@property
def CACHES(self):
# Customize a non-empty global setting.
caches = default.CACHES
caches['custom'] = {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}
return caches
class GlobalSettingsTest(unittest.TestCase):
def test_precedence_empty_global_setting(self):
g = {}
cbs.apply(MySettings, g)
self.assertEqual(['test'], g['INSTALLED_APPS'])
def test_precedence_non_empty_global_setting(self):
g = {}
cbs.apply(MySettings, g)
self.assertIn('default', g['CACHES'])
self.assertIn('custom', g['CACHES'])
|
Add a test demonstrating how to use the django global settings.import unittest
from django.conf import global_settings as default
import cbs
class MySettings:
@property
def INSTALLED_APPS(self):
# Customize an empty global setting.
return list(default.INSTALLED_APPS) + ['test']
@property
def CACHES(self):
# Customize a non-empty global setting.
caches = default.CACHES
caches['custom'] = {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}
return caches
class GlobalSettingsTest(unittest.TestCase):
def test_precedence_empty_global_setting(self):
g = {}
cbs.apply(MySettings, g)
self.assertEqual(['test'], g['INSTALLED_APPS'])
def test_precedence_non_empty_global_setting(self):
g = {}
cbs.apply(MySettings, g)
self.assertIn('default', g['CACHES'])
self.assertIn('custom', g['CACHES'])
|
<commit_before><commit_msg>Add a test demonstrating how to use the django global settings.<commit_after>import unittest
from django.conf import global_settings as default
import cbs
class MySettings:
@property
def INSTALLED_APPS(self):
# Customize an empty global setting.
return list(default.INSTALLED_APPS) + ['test']
@property
def CACHES(self):
# Customize a non-empty global setting.
caches = default.CACHES
caches['custom'] = {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}
return caches
class GlobalSettingsTest(unittest.TestCase):
def test_precedence_empty_global_setting(self):
g = {}
cbs.apply(MySettings, g)
self.assertEqual(['test'], g['INSTALLED_APPS'])
def test_precedence_non_empty_global_setting(self):
g = {}
cbs.apply(MySettings, g)
self.assertIn('default', g['CACHES'])
self.assertIn('custom', g['CACHES'])
|
|
a142dfd0ec94785df58d766abc97df837106a736
|
tests/test_50_xarray_to_grib_regular_ll.py
|
tests/test_50_xarray_to_grib_regular_ll.py
|
import numpy as np
import pytest
import xarray as xr
from cfgrib import xarray_store
@pytest.fixture()
def canonic_dataarray():
da = xr.DataArray(
np.arange(20.).reshape((4, 5)),
coords=[np.linspace(90., -90., 4), np.linspace(0., 360., 5, endpoint=False)],
dims=['latitude', 'longitude'],
)
return da
def test_canonical_dataarray_to_grib_with_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
grib_keys = {
'gridType': 'regular_ll',
'typeOfLevel': 'surface',
}
with open(str(out_path), 'wb') as file:
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray, grib_keys=grib_keys)
def test_canonical_dataarray_to_grib_detect_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
with open(str(out_path), 'wb') as file:
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray)
def test_canonical_dataarray_to_grib_conflicting_detect_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
grib_keys = {
'gridType': 'reduced_ll',
}
with open(str(out_path), 'wb') as file:
with pytest.raises(ValueError):
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray, grib_keys=grib_keys)
|
Add some simple tests for grib_keys auto-detection and user definition.
|
Add some simple tests for grib_keys auto-detection and user definition.
|
Python
|
apache-2.0
|
ecmwf/cfgrib
|
Add some simple tests for grib_keys auto-detection and user definition.
|
import numpy as np
import pytest
import xarray as xr
from cfgrib import xarray_store
@pytest.fixture()
def canonic_dataarray():
da = xr.DataArray(
np.arange(20.).reshape((4, 5)),
coords=[np.linspace(90., -90., 4), np.linspace(0., 360., 5, endpoint=False)],
dims=['latitude', 'longitude'],
)
return da
def test_canonical_dataarray_to_grib_with_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
grib_keys = {
'gridType': 'regular_ll',
'typeOfLevel': 'surface',
}
with open(str(out_path), 'wb') as file:
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray, grib_keys=grib_keys)
def test_canonical_dataarray_to_grib_detect_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
with open(str(out_path), 'wb') as file:
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray)
def test_canonical_dataarray_to_grib_conflicting_detect_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
grib_keys = {
'gridType': 'reduced_ll',
}
with open(str(out_path), 'wb') as file:
with pytest.raises(ValueError):
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray, grib_keys=grib_keys)
|
<commit_before><commit_msg>Add some simple tests for grib_keys auto-detection and user definition.<commit_after>
|
import numpy as np
import pytest
import xarray as xr
from cfgrib import xarray_store
@pytest.fixture()
def canonic_dataarray():
da = xr.DataArray(
np.arange(20.).reshape((4, 5)),
coords=[np.linspace(90., -90., 4), np.linspace(0., 360., 5, endpoint=False)],
dims=['latitude', 'longitude'],
)
return da
def test_canonical_dataarray_to_grib_with_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
grib_keys = {
'gridType': 'regular_ll',
'typeOfLevel': 'surface',
}
with open(str(out_path), 'wb') as file:
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray, grib_keys=grib_keys)
def test_canonical_dataarray_to_grib_detect_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
with open(str(out_path), 'wb') as file:
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray)
def test_canonical_dataarray_to_grib_conflicting_detect_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
grib_keys = {
'gridType': 'reduced_ll',
}
with open(str(out_path), 'wb') as file:
with pytest.raises(ValueError):
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray, grib_keys=grib_keys)
|
Add some simple tests for grib_keys auto-detection and user definition.
import numpy as np
import pytest
import xarray as xr
from cfgrib import xarray_store
@pytest.fixture()
def canonic_dataarray():
da = xr.DataArray(
np.arange(20.).reshape((4, 5)),
coords=[np.linspace(90., -90., 4), np.linspace(0., 360., 5, endpoint=False)],
dims=['latitude', 'longitude'],
)
return da
def test_canonical_dataarray_to_grib_with_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
grib_keys = {
'gridType': 'regular_ll',
'typeOfLevel': 'surface',
}
with open(str(out_path), 'wb') as file:
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray, grib_keys=grib_keys)
def test_canonical_dataarray_to_grib_detect_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
with open(str(out_path), 'wb') as file:
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray)
def test_canonical_dataarray_to_grib_conflicting_detect_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
grib_keys = {
'gridType': 'reduced_ll',
}
with open(str(out_path), 'wb') as file:
with pytest.raises(ValueError):
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray, grib_keys=grib_keys)
|
<commit_before><commit_msg>Add some simple tests for grib_keys auto-detection and user definition.<commit_after>
import numpy as np
import pytest
import xarray as xr
from cfgrib import xarray_store
@pytest.fixture()
def canonic_dataarray():
da = xr.DataArray(
np.arange(20.).reshape((4, 5)),
coords=[np.linspace(90., -90., 4), np.linspace(0., 360., 5, endpoint=False)],
dims=['latitude', 'longitude'],
)
return da
def test_canonical_dataarray_to_grib_with_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
grib_keys = {
'gridType': 'regular_ll',
'typeOfLevel': 'surface',
}
with open(str(out_path), 'wb') as file:
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray, grib_keys=grib_keys)
def test_canonical_dataarray_to_grib_detect_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
with open(str(out_path), 'wb') as file:
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray)
def test_canonical_dataarray_to_grib_conflicting_detect_grik_keys(canonic_dataarray, tmpdir):
out_path = tmpdir.join('res.grib')
grib_keys = {
'gridType': 'reduced_ll',
}
with open(str(out_path), 'wb') as file:
with pytest.raises(ValueError):
xarray_store.canonical_dataarray_to_grib(file, canonic_dataarray, grib_keys=grib_keys)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.