commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4e4d07254dfdbc86a2a9ed4211aacb0165cf8411
|
domain_api/migrations/0016_auto_20170407_0815.py
|
domain_api/migrations/0016_auto_20170407_0815.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-07 08:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('domain_api', '0015_auto_20170406_0841'),
]
operations = [
migrations.AlterUniqueTogether(
name='accountdetail',
unique_together=set([('project_id', 'default_registrant')]),
),
]
|
Migrate db to make default_registrant unique for project
|
Migrate db to make default_registrant unique for project
|
Python
|
mit
|
heytrav/drs-project
|
Migrate db to make default_registrant unique for project
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-07 08:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('domain_api', '0015_auto_20170406_0841'),
]
operations = [
migrations.AlterUniqueTogether(
name='accountdetail',
unique_together=set([('project_id', 'default_registrant')]),
),
]
|
<commit_before><commit_msg>Migrate db to make default_registrant unique for project<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-07 08:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('domain_api', '0015_auto_20170406_0841'),
]
operations = [
migrations.AlterUniqueTogether(
name='accountdetail',
unique_together=set([('project_id', 'default_registrant')]),
),
]
|
Migrate db to make default_registrant unique for project# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-07 08:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('domain_api', '0015_auto_20170406_0841'),
]
operations = [
migrations.AlterUniqueTogether(
name='accountdetail',
unique_together=set([('project_id', 'default_registrant')]),
),
]
|
<commit_before><commit_msg>Migrate db to make default_registrant unique for project<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-07 08:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('domain_api', '0015_auto_20170406_0841'),
]
operations = [
migrations.AlterUniqueTogether(
name='accountdetail',
unique_together=set([('project_id', 'default_registrant')]),
),
]
|
|
1e87a9803c76128eec0c4a8f895f163682c8591e
|
examples/application/app_with_kv_in_template1.py
|
examples/application/app_with_kv_in_template1.py
|
'''
Application from a .kv
======================
The root application is created from the corresponding .kv. Check the test.kv
file to see what will be the root widget.
This example show how you can change the directory where the .kv live.
'''
import kivy
kivy.require('1.0.7')
from kivy.app import App
class TestApp(App):
kv_directory = 'template1'
if __name__ == '__main__':
TestApp().run()
|
'''
Application from a .kv
======================
The root application is created from the corresponding .kv. Check the test.kv
file to see what will be the root widget.
This example shows how you can change the directory where the .kv lives.
'''
import kivy
kivy.require('1.0.7')
from kivy.app import App
class TestApp(App):
kv_directory = 'template1'
if __name__ == '__main__':
TestApp().run()
|
Correct description comment, add a line break
|
Correct description comment, add a line break
|
Python
|
mit
|
cbenhagen/kivy,niavlys/kivy,JohnHowland/kivy,ernstp/kivy,edubrunaldi/kivy,akshayaurora/kivy,manthansharma/kivy,andnovar/kivy,andnovar/kivy,inclement/kivy,arlowhite/kivy,CuriousLearner/kivy,Farkal/kivy,tony/kivy,yoelk/kivy,angryrancor/kivy,darkopevec/kivy,MiyamotoAkira/kivy,CuriousLearner/kivy,janssen/kivy,CuriousLearner/kivy,bob-the-hamster/kivy,manthansharma/kivy,inclement/kivy,kivy/kivy,ernstp/kivy,dirkjot/kivy,Shyam10/kivy,thezawad/kivy,bliz937/kivy,xpndlabs/kivy,LogicalDash/kivy,cbenhagen/kivy,mSenyor/kivy,ehealthafrica-ci/kivy,denys-duchier/kivy,arcticshores/kivy,Shyam10/kivy,arcticshores/kivy,Farkal/kivy,Ramalus/kivy,aron-bordin/kivy,yoelk/kivy,aron-bordin/kivy,VinGarcia/kivy,youprofit/kivy,vitorio/kivy,yoelk/kivy,matham/kivy,edubrunaldi/kivy,adamkh/kivy,el-ethan/kivy,iamutkarshtiwari/kivy,vipulroxx/kivy,angryrancor/kivy,tony/kivy,bionoid/kivy,bionoid/kivy,MiyamotoAkira/kivy,kived/kivy,youprofit/kivy,el-ethan/kivy,bob-the-hamster/kivy,viralpandey/kivy,adamkh/kivy,Ramalus/kivy,xiaoyanit/kivy,Shyam10/kivy,adamkh/kivy,ehealthafrica-ci/kivy,kivy/kivy,jehutting/kivy,jkankiewicz/kivy,janssen/kivy,KeyWeeUsr/kivy,vipulroxx/kivy,janssen/kivy,niavlys/kivy,jegger/kivy,vipulroxx/kivy,rafalo1333/kivy,ehealthafrica-ci/kivy,angryrancor/kivy,bhargav2408/kivy,manthansharma/kivy,manashmndl/kivy,rafalo1333/kivy,jffernandez/kivy,jkankiewicz/kivy,xiaoyanit/kivy,jkankiewicz/kivy,eHealthAfrica/kivy,jehutting/kivy,bhargav2408/kivy,matham/kivy,gonzafirewall/kivy,Cheaterman/kivy,Shyam10/kivy,autosportlabs/kivy,manashmndl/kivy,ernstp/kivy,angryrancor/kivy,habibmasuro/kivy,KeyWeeUsr/kivy,vitorio/kivy,gonzafirewall/kivy,adamkh/kivy,gonzafirewall/kivy,Cheaterman/kivy,kived/kivy,thezawad/kivy,ernstp/kivy,mSenyor/kivy,darkopevec/kivy,gonzafirewall/kivy,Cheaterman/kivy,MiyamotoAkira/kivy,inclement/kivy,iamutkarshtiwari/kivy,xpndlabs/kivy,manashmndl/kivy,bionoid/kivy,JohnHowland/kivy,arcticshores/kivy,xiaoyanit/kivy,eHealthAfrica/kivy,habibmasuro/kivy,autosportlabs/kivy,el-ethan/kivy,denys-duchier/kivy,akshayaurora/kivy,eHealthAfrica/kivy,akshayaurora/kivy,jegger/kivy,kived/kivy,LogicalDash/kivy,bob-the-hamster/kivy,niavlys/kivy,arlowhite/kivy,vipulroxx/kivy,edubrunaldi/kivy,dirkjot/kivy,eHealthAfrica/kivy,jffernandez/kivy,JohnHowland/kivy,jffernandez/kivy,jegger/kivy,autosportlabs/kivy,VinGarcia/kivy,janssen/kivy,rnixx/kivy,jkankiewicz/kivy,Farkal/kivy,rnixx/kivy,denys-duchier/kivy,dirkjot/kivy,aron-bordin/kivy,jehutting/kivy,thezawad/kivy,kivy/kivy,habibmasuro/kivy,LogicalDash/kivy,aron-bordin/kivy,bob-the-hamster/kivy,ehealthafrica-ci/kivy,viralpandey/kivy,arcticshores/kivy,denys-duchier/kivy,tony/kivy,VinGarcia/kivy,cbenhagen/kivy,viralpandey/kivy,xpndlabs/kivy,niavlys/kivy,matham/kivy,Cheaterman/kivy,KeyWeeUsr/kivy,bionoid/kivy,jegger/kivy,youprofit/kivy,KeyWeeUsr/kivy,manthansharma/kivy,matham/kivy,arlowhite/kivy,vitorio/kivy,dirkjot/kivy,jffernandez/kivy,mSenyor/kivy,andnovar/kivy,JohnHowland/kivy,rafalo1333/kivy,bliz937/kivy,bliz937/kivy,Ramalus/kivy,Farkal/kivy,yoelk/kivy,rnixx/kivy,LogicalDash/kivy,MiyamotoAkira/kivy,bhargav2408/kivy,darkopevec/kivy,darkopevec/kivy,iamutkarshtiwari/kivy
|
'''
Application from a .kv
======================
The root application is created from the corresponding .kv. Check the test.kv
file to see what will be the root widget.
This example show how you can change the directory where the .kv live.
'''
import kivy
kivy.require('1.0.7')
from kivy.app import App
class TestApp(App):
kv_directory = 'template1'
if __name__ == '__main__':
TestApp().run()
Correct description comment, add a line break
|
'''
Application from a .kv
======================
The root application is created from the corresponding .kv. Check the test.kv
file to see what will be the root widget.
This example shows how you can change the directory where the .kv lives.
'''
import kivy
kivy.require('1.0.7')
from kivy.app import App
class TestApp(App):
kv_directory = 'template1'
if __name__ == '__main__':
TestApp().run()
|
<commit_before>'''
Application from a .kv
======================
The root application is created from the corresponding .kv. Check the test.kv
file to see what will be the root widget.
This example show how you can change the directory where the .kv live.
'''
import kivy
kivy.require('1.0.7')
from kivy.app import App
class TestApp(App):
kv_directory = 'template1'
if __name__ == '__main__':
TestApp().run()
<commit_msg>Correct description comment, add a line break<commit_after>
|
'''
Application from a .kv
======================
The root application is created from the corresponding .kv. Check the test.kv
file to see what will be the root widget.
This example shows how you can change the directory where the .kv lives.
'''
import kivy
kivy.require('1.0.7')
from kivy.app import App
class TestApp(App):
kv_directory = 'template1'
if __name__ == '__main__':
TestApp().run()
|
'''
Application from a .kv
======================
The root application is created from the corresponding .kv. Check the test.kv
file to see what will be the root widget.
This example show how you can change the directory where the .kv live.
'''
import kivy
kivy.require('1.0.7')
from kivy.app import App
class TestApp(App):
kv_directory = 'template1'
if __name__ == '__main__':
TestApp().run()
Correct description comment, add a line break'''
Application from a .kv
======================
The root application is created from the corresponding .kv. Check the test.kv
file to see what will be the root widget.
This example shows how you can change the directory where the .kv lives.
'''
import kivy
kivy.require('1.0.7')
from kivy.app import App
class TestApp(App):
kv_directory = 'template1'
if __name__ == '__main__':
TestApp().run()
|
<commit_before>'''
Application from a .kv
======================
The root application is created from the corresponding .kv. Check the test.kv
file to see what will be the root widget.
This example show how you can change the directory where the .kv live.
'''
import kivy
kivy.require('1.0.7')
from kivy.app import App
class TestApp(App):
kv_directory = 'template1'
if __name__ == '__main__':
TestApp().run()
<commit_msg>Correct description comment, add a line break<commit_after>'''
Application from a .kv
======================
The root application is created from the corresponding .kv. Check the test.kv
file to see what will be the root widget.
This example shows how you can change the directory where the .kv lives.
'''
import kivy
kivy.require('1.0.7')
from kivy.app import App
class TestApp(App):
kv_directory = 'template1'
if __name__ == '__main__':
TestApp().run()
|
23ebe6036ae0590ee62c8eb738a9e88a81989204
|
tests/test_path.py
|
tests/test_path.py
|
from django.test import TestCase
from binder.views import split_path, join_path
class PathTest(TestCase):
def _test_path(self, path_str, path_keys):
with self.subTest('str to keys'):
self.assertEqual(tuple(split_path(path_str)), path_keys)
with self.subTest('keys to str'):
self.assertEqual(join_path(path_keys), path_str)
def test_single_key(self):
self._test_path('foo', ('foo',))
def test_multiple_keys(self):
self._test_path('foo.bar.baz', ('foo', 'bar', 'baz'))
def test_escape_dot(self):
self._test_path('foo.bar\\.baz', ('foo', 'bar.baz'))
def test_escape_backslash(self):
self._test_path('foo.bar\\\\baz', ('foo', 'bar\\baz'))
|
Add tests for split_path/join_path functionality
|
Add tests for split_path/join_path functionality
|
Python
|
mit
|
CodeYellowBV/django-binder
|
Add tests for split_path/join_path functionality
|
from django.test import TestCase
from binder.views import split_path, join_path
class PathTest(TestCase):
def _test_path(self, path_str, path_keys):
with self.subTest('str to keys'):
self.assertEqual(tuple(split_path(path_str)), path_keys)
with self.subTest('keys to str'):
self.assertEqual(join_path(path_keys), path_str)
def test_single_key(self):
self._test_path('foo', ('foo',))
def test_multiple_keys(self):
self._test_path('foo.bar.baz', ('foo', 'bar', 'baz'))
def test_escape_dot(self):
self._test_path('foo.bar\\.baz', ('foo', 'bar.baz'))
def test_escape_backslash(self):
self._test_path('foo.bar\\\\baz', ('foo', 'bar\\baz'))
|
<commit_before><commit_msg>Add tests for split_path/join_path functionality<commit_after>
|
from django.test import TestCase
from binder.views import split_path, join_path
class PathTest(TestCase):
def _test_path(self, path_str, path_keys):
with self.subTest('str to keys'):
self.assertEqual(tuple(split_path(path_str)), path_keys)
with self.subTest('keys to str'):
self.assertEqual(join_path(path_keys), path_str)
def test_single_key(self):
self._test_path('foo', ('foo',))
def test_multiple_keys(self):
self._test_path('foo.bar.baz', ('foo', 'bar', 'baz'))
def test_escape_dot(self):
self._test_path('foo.bar\\.baz', ('foo', 'bar.baz'))
def test_escape_backslash(self):
self._test_path('foo.bar\\\\baz', ('foo', 'bar\\baz'))
|
Add tests for split_path/join_path functionalityfrom django.test import TestCase
from binder.views import split_path, join_path
class PathTest(TestCase):
def _test_path(self, path_str, path_keys):
with self.subTest('str to keys'):
self.assertEqual(tuple(split_path(path_str)), path_keys)
with self.subTest('keys to str'):
self.assertEqual(join_path(path_keys), path_str)
def test_single_key(self):
self._test_path('foo', ('foo',))
def test_multiple_keys(self):
self._test_path('foo.bar.baz', ('foo', 'bar', 'baz'))
def test_escape_dot(self):
self._test_path('foo.bar\\.baz', ('foo', 'bar.baz'))
def test_escape_backslash(self):
self._test_path('foo.bar\\\\baz', ('foo', 'bar\\baz'))
|
<commit_before><commit_msg>Add tests for split_path/join_path functionality<commit_after>from django.test import TestCase
from binder.views import split_path, join_path
class PathTest(TestCase):
def _test_path(self, path_str, path_keys):
with self.subTest('str to keys'):
self.assertEqual(tuple(split_path(path_str)), path_keys)
with self.subTest('keys to str'):
self.assertEqual(join_path(path_keys), path_str)
def test_single_key(self):
self._test_path('foo', ('foo',))
def test_multiple_keys(self):
self._test_path('foo.bar.baz', ('foo', 'bar', 'baz'))
def test_escape_dot(self):
self._test_path('foo.bar\\.baz', ('foo', 'bar.baz'))
def test_escape_backslash(self):
self._test_path('foo.bar\\\\baz', ('foo', 'bar\\baz'))
|
|
c93da26c35607518f286dbdf9023034288074fab
|
tests/test_unix.py
|
tests/test_unix.py
|
import asyncio
import os
import socket
import tempfile
import uvloop
from uvloop import _testbase as tb
class _TestUnix:
def test_create_server_1(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 100 # total number of clients that test will create
TIMEOUT = 5.0 # timeout for this test
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(4)
self.assertEqual(data, b'AAAA')
writer.write(b'OK')
data = await reader.readexactly(4)
self.assertEqual(data, b'BBBB')
writer.write(b'SPAM')
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
sock = socket.socket(socket.AF_UNIX)
with sock:
sock.setblocking(False)
await self.loop.sock_connect(sock, addr)
await self.loop.sock_sendall(sock, b'AAAA')
data = await self.loop.sock_recv(sock, 2)
self.assertEqual(data, b'OK')
await self.loop.sock_sendall(sock, b'BBBB')
data = await self.loop.sock_recv(sock, 4)
self.assertEqual(data, b'SPAM')
async def start_server():
with tempfile.TemporaryDirectory() as td:
sock_name = os.path.join(td, 'sock')
try:
srv = await asyncio.start_unix_server(
handle_client,
sock_name,
loop=self.loop)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(sock_name))
try:
await asyncio.wait_for(
asyncio.gather(*tasks, loop=self.loop),
TIMEOUT, loop=self.loop)
finally:
self.loop.stop()
finally:
srv.close()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
except:
self.loop.stop() # We don't want this test to stuck when
# it fails.
raise
self.loop.create_task(start_server())
self.loop.run_forever()
self.assertEqual(CNT, TOTAL_CNT)
class Test_UV_Unix(_TestUnix, tb.UVTestCase):
pass
class Test_AIO_Unix(_TestUnix, tb.AIOTestCase):
pass
|
Add a test for loop.create_unix_server
|
tests: Add a test for loop.create_unix_server
|
Python
|
apache-2.0
|
MagicStack/uvloop,1st1/uvloop,MagicStack/uvloop
|
tests: Add a test for loop.create_unix_server
|
import asyncio
import os
import socket
import tempfile
import uvloop
from uvloop import _testbase as tb
class _TestUnix:
def test_create_server_1(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 100 # total number of clients that test will create
TIMEOUT = 5.0 # timeout for this test
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(4)
self.assertEqual(data, b'AAAA')
writer.write(b'OK')
data = await reader.readexactly(4)
self.assertEqual(data, b'BBBB')
writer.write(b'SPAM')
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
sock = socket.socket(socket.AF_UNIX)
with sock:
sock.setblocking(False)
await self.loop.sock_connect(sock, addr)
await self.loop.sock_sendall(sock, b'AAAA')
data = await self.loop.sock_recv(sock, 2)
self.assertEqual(data, b'OK')
await self.loop.sock_sendall(sock, b'BBBB')
data = await self.loop.sock_recv(sock, 4)
self.assertEqual(data, b'SPAM')
async def start_server():
with tempfile.TemporaryDirectory() as td:
sock_name = os.path.join(td, 'sock')
try:
srv = await asyncio.start_unix_server(
handle_client,
sock_name,
loop=self.loop)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(sock_name))
try:
await asyncio.wait_for(
asyncio.gather(*tasks, loop=self.loop),
TIMEOUT, loop=self.loop)
finally:
self.loop.stop()
finally:
srv.close()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
except:
self.loop.stop() # We don't want this test to stuck when
# it fails.
raise
self.loop.create_task(start_server())
self.loop.run_forever()
self.assertEqual(CNT, TOTAL_CNT)
class Test_UV_Unix(_TestUnix, tb.UVTestCase):
pass
class Test_AIO_Unix(_TestUnix, tb.AIOTestCase):
pass
|
<commit_before><commit_msg>tests: Add a test for loop.create_unix_server<commit_after>
|
import asyncio
import os
import socket
import tempfile
import uvloop
from uvloop import _testbase as tb
class _TestUnix:
def test_create_server_1(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 100 # total number of clients that test will create
TIMEOUT = 5.0 # timeout for this test
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(4)
self.assertEqual(data, b'AAAA')
writer.write(b'OK')
data = await reader.readexactly(4)
self.assertEqual(data, b'BBBB')
writer.write(b'SPAM')
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
sock = socket.socket(socket.AF_UNIX)
with sock:
sock.setblocking(False)
await self.loop.sock_connect(sock, addr)
await self.loop.sock_sendall(sock, b'AAAA')
data = await self.loop.sock_recv(sock, 2)
self.assertEqual(data, b'OK')
await self.loop.sock_sendall(sock, b'BBBB')
data = await self.loop.sock_recv(sock, 4)
self.assertEqual(data, b'SPAM')
async def start_server():
with tempfile.TemporaryDirectory() as td:
sock_name = os.path.join(td, 'sock')
try:
srv = await asyncio.start_unix_server(
handle_client,
sock_name,
loop=self.loop)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(sock_name))
try:
await asyncio.wait_for(
asyncio.gather(*tasks, loop=self.loop),
TIMEOUT, loop=self.loop)
finally:
self.loop.stop()
finally:
srv.close()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
except:
self.loop.stop() # We don't want this test to stuck when
# it fails.
raise
self.loop.create_task(start_server())
self.loop.run_forever()
self.assertEqual(CNT, TOTAL_CNT)
class Test_UV_Unix(_TestUnix, tb.UVTestCase):
pass
class Test_AIO_Unix(_TestUnix, tb.AIOTestCase):
pass
|
tests: Add a test for loop.create_unix_serverimport asyncio
import os
import socket
import tempfile
import uvloop
from uvloop import _testbase as tb
class _TestUnix:
def test_create_server_1(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 100 # total number of clients that test will create
TIMEOUT = 5.0 # timeout for this test
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(4)
self.assertEqual(data, b'AAAA')
writer.write(b'OK')
data = await reader.readexactly(4)
self.assertEqual(data, b'BBBB')
writer.write(b'SPAM')
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
sock = socket.socket(socket.AF_UNIX)
with sock:
sock.setblocking(False)
await self.loop.sock_connect(sock, addr)
await self.loop.sock_sendall(sock, b'AAAA')
data = await self.loop.sock_recv(sock, 2)
self.assertEqual(data, b'OK')
await self.loop.sock_sendall(sock, b'BBBB')
data = await self.loop.sock_recv(sock, 4)
self.assertEqual(data, b'SPAM')
async def start_server():
with tempfile.TemporaryDirectory() as td:
sock_name = os.path.join(td, 'sock')
try:
srv = await asyncio.start_unix_server(
handle_client,
sock_name,
loop=self.loop)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(sock_name))
try:
await asyncio.wait_for(
asyncio.gather(*tasks, loop=self.loop),
TIMEOUT, loop=self.loop)
finally:
self.loop.stop()
finally:
srv.close()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
except:
self.loop.stop() # We don't want this test to stuck when
# it fails.
raise
self.loop.create_task(start_server())
self.loop.run_forever()
self.assertEqual(CNT, TOTAL_CNT)
class Test_UV_Unix(_TestUnix, tb.UVTestCase):
pass
class Test_AIO_Unix(_TestUnix, tb.AIOTestCase):
pass
|
<commit_before><commit_msg>tests: Add a test for loop.create_unix_server<commit_after>import asyncio
import os
import socket
import tempfile
import uvloop
from uvloop import _testbase as tb
class _TestUnix:
def test_create_server_1(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 100 # total number of clients that test will create
TIMEOUT = 5.0 # timeout for this test
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(4)
self.assertEqual(data, b'AAAA')
writer.write(b'OK')
data = await reader.readexactly(4)
self.assertEqual(data, b'BBBB')
writer.write(b'SPAM')
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
sock = socket.socket(socket.AF_UNIX)
with sock:
sock.setblocking(False)
await self.loop.sock_connect(sock, addr)
await self.loop.sock_sendall(sock, b'AAAA')
data = await self.loop.sock_recv(sock, 2)
self.assertEqual(data, b'OK')
await self.loop.sock_sendall(sock, b'BBBB')
data = await self.loop.sock_recv(sock, 4)
self.assertEqual(data, b'SPAM')
async def start_server():
with tempfile.TemporaryDirectory() as td:
sock_name = os.path.join(td, 'sock')
try:
srv = await asyncio.start_unix_server(
handle_client,
sock_name,
loop=self.loop)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(sock_name))
try:
await asyncio.wait_for(
asyncio.gather(*tasks, loop=self.loop),
TIMEOUT, loop=self.loop)
finally:
self.loop.stop()
finally:
srv.close()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
except:
self.loop.stop() # We don't want this test to stuck when
# it fails.
raise
self.loop.create_task(start_server())
self.loop.run_forever()
self.assertEqual(CNT, TOTAL_CNT)
class Test_UV_Unix(_TestUnix, tb.UVTestCase):
pass
class Test_AIO_Unix(_TestUnix, tb.AIOTestCase):
pass
|
|
0e4a6550984d2a244cd0a816101697d174eb3df2
|
tests/aggregate/test_search_vectors.py
|
tests/aggregate/test_search_vectors.py
|
import sqlalchemy as sa
from sqlalchemy_utils import aggregated, TSVectorType
from tests import TestCase
def tsvector_reduce_concat(vectors):
return sa.sql.expression.cast(
sa.func.coalesce(
sa.func.array_to_string(sa.func.array_agg(vectors), ' ')
),
TSVectorType
)
class TestSearchVectorAggregates(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Catalog(self.Base):
__tablename__ = 'catalog'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('products', sa.Column(TSVectorType))
def product_search_vector(self):
return tsvector_reduce_concat(
sa.func.to_tsvector(Product.name)
)
products = sa.orm.relationship('Product', backref='catalog')
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
price = sa.Column(sa.Numeric)
catalog_id = sa.Column(sa.Integer, sa.ForeignKey('catalog.id'))
self.Catalog = Catalog
self.Product = Product
def test_assigns_aggregates_on_insert(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Product XYZ',
catalog=catalog
)
self.session.add(product)
self.session.commit()
self.session.refresh(catalog)
assert catalog.product_search_vector == "'product':1 'xyz':2"
|
Add search vector agg test case
|
Add search vector agg test case
|
Python
|
bsd-3-clause
|
tonyseek/sqlalchemy-utils,rmoorman/sqlalchemy-utils,spoqa/sqlalchemy-utils,cheungpat/sqlalchemy-utils,joshfriend/sqlalchemy-utils,tonyseek/sqlalchemy-utils,konstantinoskostis/sqlalchemy-utils,JackWink/sqlalchemy-utils,marrybird/sqlalchemy-utils,joshfriend/sqlalchemy-utils
|
Add search vector agg test case
|
import sqlalchemy as sa
from sqlalchemy_utils import aggregated, TSVectorType
from tests import TestCase
def tsvector_reduce_concat(vectors):
return sa.sql.expression.cast(
sa.func.coalesce(
sa.func.array_to_string(sa.func.array_agg(vectors), ' ')
),
TSVectorType
)
class TestSearchVectorAggregates(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Catalog(self.Base):
__tablename__ = 'catalog'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('products', sa.Column(TSVectorType))
def product_search_vector(self):
return tsvector_reduce_concat(
sa.func.to_tsvector(Product.name)
)
products = sa.orm.relationship('Product', backref='catalog')
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
price = sa.Column(sa.Numeric)
catalog_id = sa.Column(sa.Integer, sa.ForeignKey('catalog.id'))
self.Catalog = Catalog
self.Product = Product
def test_assigns_aggregates_on_insert(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Product XYZ',
catalog=catalog
)
self.session.add(product)
self.session.commit()
self.session.refresh(catalog)
assert catalog.product_search_vector == "'product':1 'xyz':2"
|
<commit_before><commit_msg>Add search vector agg test case<commit_after>
|
import sqlalchemy as sa
from sqlalchemy_utils import aggregated, TSVectorType
from tests import TestCase
def tsvector_reduce_concat(vectors):
return sa.sql.expression.cast(
sa.func.coalesce(
sa.func.array_to_string(sa.func.array_agg(vectors), ' ')
),
TSVectorType
)
class TestSearchVectorAggregates(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Catalog(self.Base):
__tablename__ = 'catalog'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('products', sa.Column(TSVectorType))
def product_search_vector(self):
return tsvector_reduce_concat(
sa.func.to_tsvector(Product.name)
)
products = sa.orm.relationship('Product', backref='catalog')
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
price = sa.Column(sa.Numeric)
catalog_id = sa.Column(sa.Integer, sa.ForeignKey('catalog.id'))
self.Catalog = Catalog
self.Product = Product
def test_assigns_aggregates_on_insert(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Product XYZ',
catalog=catalog
)
self.session.add(product)
self.session.commit()
self.session.refresh(catalog)
assert catalog.product_search_vector == "'product':1 'xyz':2"
|
Add search vector agg test caseimport sqlalchemy as sa
from sqlalchemy_utils import aggregated, TSVectorType
from tests import TestCase
def tsvector_reduce_concat(vectors):
return sa.sql.expression.cast(
sa.func.coalesce(
sa.func.array_to_string(sa.func.array_agg(vectors), ' ')
),
TSVectorType
)
class TestSearchVectorAggregates(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Catalog(self.Base):
__tablename__ = 'catalog'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('products', sa.Column(TSVectorType))
def product_search_vector(self):
return tsvector_reduce_concat(
sa.func.to_tsvector(Product.name)
)
products = sa.orm.relationship('Product', backref='catalog')
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
price = sa.Column(sa.Numeric)
catalog_id = sa.Column(sa.Integer, sa.ForeignKey('catalog.id'))
self.Catalog = Catalog
self.Product = Product
def test_assigns_aggregates_on_insert(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Product XYZ',
catalog=catalog
)
self.session.add(product)
self.session.commit()
self.session.refresh(catalog)
assert catalog.product_search_vector == "'product':1 'xyz':2"
|
<commit_before><commit_msg>Add search vector agg test case<commit_after>import sqlalchemy as sa
from sqlalchemy_utils import aggregated, TSVectorType
from tests import TestCase
def tsvector_reduce_concat(vectors):
return sa.sql.expression.cast(
sa.func.coalesce(
sa.func.array_to_string(sa.func.array_agg(vectors), ' ')
),
TSVectorType
)
class TestSearchVectorAggregates(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Catalog(self.Base):
__tablename__ = 'catalog'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@aggregated('products', sa.Column(TSVectorType))
def product_search_vector(self):
return tsvector_reduce_concat(
sa.func.to_tsvector(Product.name)
)
products = sa.orm.relationship('Product', backref='catalog')
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
price = sa.Column(sa.Numeric)
catalog_id = sa.Column(sa.Integer, sa.ForeignKey('catalog.id'))
self.Catalog = Catalog
self.Product = Product
def test_assigns_aggregates_on_insert(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Product XYZ',
catalog=catalog
)
self.session.add(product)
self.session.commit()
self.session.refresh(catalog)
assert catalog.product_search_vector == "'product':1 'xyz':2"
|
|
b1949e4c0984c1f254c5877da1b977c01567bf4d
|
tests/unicode/unicode_index.py
|
tests/unicode/unicode_index.py
|
print("Привет".find("т"))
print("Привет".find("П"))
print("Привет".rfind("т"))
print("Привет".rfind("П"))
print("Привет".index("т"))
print("Привет".index("П"))
|
Add tests for unicode find()/rfind()/index().
|
tests: Add tests for unicode find()/rfind()/index().
|
Python
|
mit
|
turbinenreiter/micropython,deshipu/micropython,jlillest/micropython,xuxiaoxin/micropython,dxxb/micropython,ChuckM/micropython,EcmaXp/micropython,henriknelson/micropython,alex-robbins/micropython,HenrikSolver/micropython,tdautc19841202/micropython,blmorris/micropython,emfcamp/micropython,selste/micropython,aethaniel/micropython,blazewicz/micropython,feilongfl/micropython,cnoviello/micropython,danicampora/micropython,blmorris/micropython,infinnovation/micropython,mpalomer/micropython,toolmacher/micropython,blazewicz/micropython,feilongfl/micropython,HenrikSolver/micropython,aethaniel/micropython,Peetz0r/micropython-esp32,martinribelotta/micropython,ernesto-g/micropython,noahwilliamsson/micropython,chrisdearman/micropython,slzatz/micropython,pozetroninc/micropython,adafruit/circuitpython,dxxb/micropython,henriknelson/micropython,kerneltask/micropython,henriknelson/micropython,MrSurly/micropython,dinau/micropython,deshipu/micropython,turbinenreiter/micropython,oopy/micropython,PappaPeppar/micropython,jimkmc/micropython,Peetz0r/micropython-esp32,hosaka/micropython,noahwilliamsson/micropython,firstval/micropython,noahchense/micropython,mgyenik/micropython,Timmenem/micropython,adafruit/micropython,neilh10/micropython,mianos/micropython,xyb/micropython,utopiaprince/micropython,dmazzella/micropython,ahotam/micropython,oopy/micropython,blmorris/micropython,drrk/micropython,AriZuu/micropython,warner83/micropython,pfalcon/micropython,bvernoux/micropython,puuu/micropython,misterdanb/micropython,AriZuu/micropython,tuc-osg/micropython,mpalomer/micropython,alex-robbins/micropython,hosaka/micropython,ericsnowcurrently/micropython,vitiral/micropython,hiway/micropython,kostyll/micropython,matthewelse/micropython,suda/micropython,oopy/micropython,KISSMonX/micropython,ganshun666/micropython,slzatz/micropython,pramasoul/micropython,ryannathans/micropython,drrk/micropython,lbattraw/micropython,mianos/micropython,ceramos/micropython,orionrobots/micropython,MrSurly/micropython-esp32,alex-robbins/micropython,lowRISC/micropython,paul-xxx/micropython,galenhz/micropython,MrSurly/micropython,ericsnowcurrently/micropython,xuxiaoxin/micropython,cwyark/micropython,blmorris/micropython,ernesto-g/micropython,xyb/micropython,emfcamp/micropython,cnoviello/micropython,torwag/micropython,AriZuu/micropython,kostyll/micropython,tralamazza/micropython,jimkmc/micropython,pfalcon/micropython,dinau/micropython,bvernoux/micropython,torwag/micropython,mianos/micropython,micropython/micropython-esp32,jimkmc/micropython,xhat/micropython,heisewangluo/micropython,infinnovation/micropython,noahwilliamsson/micropython,skybird6672/micropython,MrSurly/micropython-esp32,EcmaXp/micropython,hiway/micropython,drrk/micropython,danicampora/micropython,omtinez/micropython,MrSurly/micropython-esp32,Timmenem/micropython,lbattraw/micropython,TDAbboud/micropython,deshipu/micropython,xyb/micropython,feilongfl/micropython,rubencabrera/micropython,noahchense/micropython,vriera/micropython,orionrobots/micropython,jlillest/micropython,vriera/micropython,warner83/micropython,pramasoul/micropython,MrSurly/micropython,ernesto-g/micropython,KISSMonX/micropython,martinribelotta/micropython,SHA2017-badge/micropython-esp32,redbear/micropython,EcmaXp/micropython,rubencabrera/micropython,pozetroninc/micropython,orionrobots/micropython,Vogtinator/micropython,redbear/micropython,toolmacher/micropython,trezor/micropython,pozetroninc/micropython,adamkh/micropython,danicampora/micropython,Timmenem/micropython,firstval/micropython,ceramos/micropython,jlillest/micropython,noahchense/micropython,lbattraw/micropython,jmarcelino/pycom-micropython,feilongfl/micropython,PappaPeppar/micropython,TDAbboud/micropython,vriera/micropython,SungEun-Steve-Kim/test-mp,praemdonck/micropython,KISSMonX/micropython,lbattraw/micropython,supergis/micropython,infinnovation/micropython,ernesto-g/micropython,HenrikSolver/micropython,puuu/micropython,alex-robbins/micropython,adafruit/circuitpython,KISSMonX/micropython,cloudformdesign/micropython,vitiral/micropython,mhoffma/micropython,pramasoul/micropython,SHA2017-badge/micropython-esp32,kerneltask/micropython,Vogtinator/micropython,noahwilliamsson/micropython,jmarcelino/pycom-micropython,ahotam/micropython,jlillest/micropython,mpalomer/micropython,PappaPeppar/micropython,omtinez/micropython,tdautc19841202/micropython,ceramos/micropython,xuxiaoxin/micropython,ruffy91/micropython,micropython/micropython-esp32,galenhz/micropython,ChuckM/micropython,jmarcelino/pycom-micropython,ahotam/micropython,toolmacher/micropython,matthewelse/micropython,torwag/micropython,chrisdearman/micropython,SungEun-Steve-Kim/test-mp,trezor/micropython,swegener/micropython,hiway/micropython,puuu/micropython,feilongfl/micropython,skybird6672/micropython,mpalomer/micropython,matthewelse/micropython,HenrikSolver/micropython,redbear/micropython,martinribelotta/micropython,rubencabrera/micropython,tralamazza/micropython,matthewelse/micropython,xuxiaoxin/micropython,blmorris/micropython,praemdonck/micropython,mpalomer/micropython,vitiral/micropython,lowRISC/micropython,KISSMonX/micropython,drrk/micropython,adamkh/micropython,henriknelson/micropython,selste/micropython,misterdanb/micropython,vriera/micropython,hiway/micropython,ceramos/micropython,swegener/micropython,misterdanb/micropython,misterdanb/micropython,cwyark/micropython,dxxb/micropython,tdautc19841202/micropython,pramasoul/micropython,SungEun-Steve-Kim/test-mp,supergis/micropython,turbinenreiter/micropython,kerneltask/micropython,pozetroninc/micropython,ganshun666/micropython,swegener/micropython,trezor/micropython,SHA2017-badge/micropython-esp32,matthewelse/micropython,firstval/micropython,cnoviello/micropython,EcmaXp/micropython,vriera/micropython,cnoviello/micropython,paul-xxx/micropython,orionrobots/micropython,pfalcon/micropython,dhylands/micropython,xuxiaoxin/micropython,cloudformdesign/micropython,neilh10/micropython,vitiral/micropython,hosaka/micropython,Peetz0r/micropython-esp32,SHA2017-badge/micropython-esp32,jmarcelino/pycom-micropython,stonegithubs/micropython,blazewicz/micropython,adafruit/circuitpython,hosaka/micropython,TDAbboud/micropython,xyb/micropython,danicampora/micropython,ruffy91/micropython,misterdanb/micropython,galenhz/micropython,cloudformdesign/micropython,stonegithubs/micropython,mhoffma/micropython,stonegithubs/micropython,aethaniel/micropython,ganshun666/micropython,mgyenik/micropython,hosaka/micropython,omtinez/micropython,paul-xxx/micropython,aethaniel/micropython,SungEun-Steve-Kim/test-mp,tralamazza/micropython,trezor/micropython,deshipu/micropython,noahchense/micropython,tuc-osg/micropython,infinnovation/micropython,oopy/micropython,ChuckM/micropython,puuu/micropython,jmarcelino/pycom-micropython,EcmaXp/micropython,ernesto-g/micropython,tuc-osg/micropython,skybird6672/micropython,dxxb/micropython,omtinez/micropython,adafruit/micropython,Timmenem/micropython,redbear/micropython,xhat/micropython,tobbad/micropython,jimkmc/micropython,lowRISC/micropython,adafruit/circuitpython,tdautc19841202/micropython,adamkh/micropython,emfcamp/micropython,redbear/micropython,swegener/micropython,alex-march/micropython,supergis/micropython,ericsnowcurrently/micropython,tuc-osg/micropython,dmazzella/micropython,tobbad/micropython,emfcamp/micropython,swegener/micropython,pramasoul/micropython,praemdonck/micropython,ryannathans/micropython,mgyenik/micropython,lowRISC/micropython,MrSurly/micropython,oopy/micropython,TDAbboud/micropython,dhylands/micropython,Peetz0r/micropython-esp32,utopiaprince/micropython,omtinez/micropython,adafruit/micropython,kostyll/micropython,martinribelotta/micropython,methoxid/micropystat,slzatz/micropython,cloudformdesign/micropython,micropython/micropython-esp32,jimkmc/micropython,ahotam/micropython,aethaniel/micropython,stonegithubs/micropython,supergis/micropython,vitiral/micropython,deshipu/micropython,methoxid/micropystat,alex-robbins/micropython,paul-xxx/micropython,ericsnowcurrently/micropython,adafruit/micropython,cwyark/micropython,drrk/micropython,cloudformdesign/micropython,ChuckM/micropython,heisewangluo/micropython,Vogtinator/micropython,bvernoux/micropython,toolmacher/micropython,heisewangluo/micropython,ryannathans/micropython,matthewelse/micropython,ChuckM/micropython,adamkh/micropython,xhat/micropython,Peetz0r/micropython-esp32,praemdonck/micropython,tobbad/micropython,dhylands/micropython,turbinenreiter/micropython,mianos/micropython,galenhz/micropython,pfalcon/micropython,mhoffma/micropython,dhylands/micropython,slzatz/micropython,heisewangluo/micropython,galenhz/micropython,utopiaprince/micropython,chrisdearman/micropython,suda/micropython,pozetroninc/micropython,mhoffma/micropython,chrisdearman/micropython,mgyenik/micropython,ryannathans/micropython,orionrobots/micropython,PappaPeppar/micropython,warner83/micropython,xyb/micropython,praemdonck/micropython,methoxid/micropystat,mhoffma/micropython,kostyll/micropython,micropython/micropython-esp32,lowRISC/micropython,AriZuu/micropython,henriknelson/micropython,selste/micropython,neilh10/micropython,suda/micropython,bvernoux/micropython,Vogtinator/micropython,paul-xxx/micropython,neilh10/micropython,emfcamp/micropython,kerneltask/micropython,selste/micropython,neilh10/micropython,jlillest/micropython,selste/micropython,cwyark/micropython,tralamazza/micropython,SHA2017-badge/micropython-esp32,MrSurly/micropython-esp32,noahchense/micropython,alex-march/micropython,PappaPeppar/micropython,chrisdearman/micropython,puuu/micropython,infinnovation/micropython,micropython/micropython-esp32,blazewicz/micropython,dmazzella/micropython,ganshun666/micropython,utopiaprince/micropython,firstval/micropython,bvernoux/micropython,utopiaprince/micropython,adafruit/circuitpython,alex-march/micropython,ryannathans/micropython,ruffy91/micropython,xhat/micropython,kerneltask/micropython,alex-march/micropython,tobbad/micropython,dinau/micropython,adamkh/micropython,firstval/micropython,mianos/micropython,dmazzella/micropython,toolmacher/micropython,ganshun666/micropython,ceramos/micropython,noahwilliamsson/micropython,TDAbboud/micropython,danicampora/micropython,blazewicz/micropython,Vogtinator/micropython,SungEun-Steve-Kim/test-mp,adafruit/micropython,lbattraw/micropython,HenrikSolver/micropython,alex-march/micropython,martinribelotta/micropython,ahotam/micropython,warner83/micropython,cnoviello/micropython,rubencabrera/micropython,MrSurly/micropython-esp32,pfalcon/micropython,warner83/micropython,rubencabrera/micropython,torwag/micropython,methoxid/micropystat,dinau/micropython,cwyark/micropython,dhylands/micropython,ruffy91/micropython,skybird6672/micropython,suda/micropython,xhat/micropython,torwag/micropython,dinau/micropython,ruffy91/micropython,heisewangluo/micropython,AriZuu/micropython,tuc-osg/micropython,supergis/micropython,ericsnowcurrently/micropython,hiway/micropython,slzatz/micropython,MrSurly/micropython,stonegithubs/micropython,suda/micropython,skybird6672/micropython,tobbad/micropython,adafruit/circuitpython,turbinenreiter/micropython,kostyll/micropython,trezor/micropython,mgyenik/micropython,dxxb/micropython,tdautc19841202/micropython,methoxid/micropystat,Timmenem/micropython
|
tests: Add tests for unicode find()/rfind()/index().
|
print("Привет".find("т"))
print("Привет".find("П"))
print("Привет".rfind("т"))
print("Привет".rfind("П"))
print("Привет".index("т"))
print("Привет".index("П"))
|
<commit_before><commit_msg>tests: Add tests for unicode find()/rfind()/index().<commit_after>
|
print("Привет".find("т"))
print("Привет".find("П"))
print("Привет".rfind("т"))
print("Привет".rfind("П"))
print("Привет".index("т"))
print("Привет".index("П"))
|
tests: Add tests for unicode find()/rfind()/index().print("Привет".find("т"))
print("Привет".find("П"))
print("Привет".rfind("т"))
print("Привет".rfind("П"))
print("Привет".index("т"))
print("Привет".index("П"))
|
<commit_before><commit_msg>tests: Add tests for unicode find()/rfind()/index().<commit_after>print("Привет".find("т"))
print("Привет".find("П"))
print("Привет".rfind("т"))
print("Привет".rfind("П"))
print("Привет".index("т"))
print("Привет".index("П"))
|
|
f8988e956577bf838663346412d223d4cd1351d5
|
django_orm/postgresql/expressions.py
|
django_orm/postgresql/expressions.py
|
# -*- coding: utf-8 -*-
from django.db.models.expressions import F as BaseF
class F(BaseF):
def __invert__(self):
self._invert = True
return self
def evaluate(self, evaluator, qn, connection):
result = evaluator.evaluate_leaf(self, qn, connection)
if self._invert and len(result) == 2 and connection.vendor == 'postgresql':
result = list(result)
result[0] = "NOT %s" % result[0]
result = tuple(result)
return result
|
Add new modifier of F expression class (~F("boolean field"))
|
Add new modifier of F expression class (~F("boolean field"))
|
Python
|
bsd-3-clause
|
EnTeQuAk/django-orm,EnTeQuAk/django-orm
|
Add new modifier of F expression class (~F("boolean field"))
|
# -*- coding: utf-8 -*-
from django.db.models.expressions import F as BaseF
class F(BaseF):
def __invert__(self):
self._invert = True
return self
def evaluate(self, evaluator, qn, connection):
result = evaluator.evaluate_leaf(self, qn, connection)
if self._invert and len(result) == 2 and connection.vendor == 'postgresql':
result = list(result)
result[0] = "NOT %s" % result[0]
result = tuple(result)
return result
|
<commit_before><commit_msg>Add new modifier of F expression class (~F("boolean field"))<commit_after>
|
# -*- coding: utf-8 -*-
from django.db.models.expressions import F as BaseF
class F(BaseF):
def __invert__(self):
self._invert = True
return self
def evaluate(self, evaluator, qn, connection):
result = evaluator.evaluate_leaf(self, qn, connection)
if self._invert and len(result) == 2 and connection.vendor == 'postgresql':
result = list(result)
result[0] = "NOT %s" % result[0]
result = tuple(result)
return result
|
Add new modifier of F expression class (~F("boolean field"))# -*- coding: utf-8 -*-
from django.db.models.expressions import F as BaseF
class F(BaseF):
def __invert__(self):
self._invert = True
return self
def evaluate(self, evaluator, qn, connection):
result = evaluator.evaluate_leaf(self, qn, connection)
if self._invert and len(result) == 2 and connection.vendor == 'postgresql':
result = list(result)
result[0] = "NOT %s" % result[0]
result = tuple(result)
return result
|
<commit_before><commit_msg>Add new modifier of F expression class (~F("boolean field"))<commit_after># -*- coding: utf-8 -*-
from django.db.models.expressions import F as BaseF
class F(BaseF):
def __invert__(self):
self._invert = True
return self
def evaluate(self, evaluator, qn, connection):
result = evaluator.evaluate_leaf(self, qn, connection)
if self._invert and len(result) == 2 and connection.vendor == 'postgresql':
result = list(result)
result[0] = "NOT %s" % result[0]
result = tuple(result)
return result
|
|
2471742baac1a452efb4ab532e0aa1bb873a2913
|
rail-system-optimizer.py
|
rail-system-optimizer.py
|
"""
Train station Optimizer!
This program optimizes the efficiency of a train station by simulating normal
operations and varying train schedules and escalator ediquette.
"""
from CreateRailSystem import *
from StationPopWithoutTrain import *
from StationPopWithTrain import *
from MonteCarloSim import *
runs = eval(input("Enter the number of simulations you want to run: "))
failure_threshold = (eval(input("Enter acceptable system failure percentage: ")) //
100) * runs
def main():
station_1 = station()
station_2 = station_1
train_1 = train()
escalator_1 = escalator()
escalator_2 = escalator()
system_state = log(0,0,0,0)
rail_system(station_1, train_1, escalator_1, escalator_2)
failures = 0
for i in range(runs):
old_wait_time = station_1.train_wait
failures = failures + im_no_train(station_1, train_1, escalator_1)[0]
if failures > failure_threshold:
failures = 0
station_1.train_wait = station_1.train_wait - (old_wait_time / 2)
sans_train_overflow = True
else:
failures = failures + sim_with_train(station_1, train_1, escalator_1)
if failures > failure_threshold:
station_1.train_wait = 2 * station_1.train_wait
if sans_train_overflow:
time_dfference = old_wait_time - station_1.train_wait
for j in range(time_difference):
failure = sim_with_train(station_1, train_1, escalator_1)[0]
if failures > failure_threshold:
station_1.train_wait = station_1.train_wait + j
|
Add main function of the rail system optimizer
|
Add main function of the rail system optimizer
def issue#17
|
Python
|
mit
|
ForestPride/rail-problem
|
Add main function of the rail system optimizer
def issue#17
|
"""
Train station Optimizer!
This program optimizes the efficiency of a train station by simulating normal
operations and varying train schedules and escalator ediquette.
"""
from CreateRailSystem import *
from StationPopWithoutTrain import *
from StationPopWithTrain import *
from MonteCarloSim import *
runs = eval(input("Enter the number of simulations you want to run: "))
failure_threshold = (eval(input("Enter acceptable system failure percentage: ")) //
100) * runs
def main():
station_1 = station()
station_2 = station_1
train_1 = train()
escalator_1 = escalator()
escalator_2 = escalator()
system_state = log(0,0,0,0)
rail_system(station_1, train_1, escalator_1, escalator_2)
failures = 0
for i in range(runs):
old_wait_time = station_1.train_wait
failures = failures + im_no_train(station_1, train_1, escalator_1)[0]
if failures > failure_threshold:
failures = 0
station_1.train_wait = station_1.train_wait - (old_wait_time / 2)
sans_train_overflow = True
else:
failures = failures + sim_with_train(station_1, train_1, escalator_1)
if failures > failure_threshold:
station_1.train_wait = 2 * station_1.train_wait
if sans_train_overflow:
time_dfference = old_wait_time - station_1.train_wait
for j in range(time_difference):
failure = sim_with_train(station_1, train_1, escalator_1)[0]
if failures > failure_threshold:
station_1.train_wait = station_1.train_wait + j
|
<commit_before><commit_msg>Add main function of the rail system optimizer
def issue#17<commit_after>
|
"""
Train station Optimizer!
This program optimizes the efficiency of a train station by simulating normal
operations and varying train schedules and escalator ediquette.
"""
from CreateRailSystem import *
from StationPopWithoutTrain import *
from StationPopWithTrain import *
from MonteCarloSim import *
runs = eval(input("Enter the number of simulations you want to run: "))
failure_threshold = (eval(input("Enter acceptable system failure percentage: ")) //
100) * runs
def main():
station_1 = station()
station_2 = station_1
train_1 = train()
escalator_1 = escalator()
escalator_2 = escalator()
system_state = log(0,0,0,0)
rail_system(station_1, train_1, escalator_1, escalator_2)
failures = 0
for i in range(runs):
old_wait_time = station_1.train_wait
failures = failures + im_no_train(station_1, train_1, escalator_1)[0]
if failures > failure_threshold:
failures = 0
station_1.train_wait = station_1.train_wait - (old_wait_time / 2)
sans_train_overflow = True
else:
failures = failures + sim_with_train(station_1, train_1, escalator_1)
if failures > failure_threshold:
station_1.train_wait = 2 * station_1.train_wait
if sans_train_overflow:
time_dfference = old_wait_time - station_1.train_wait
for j in range(time_difference):
failure = sim_with_train(station_1, train_1, escalator_1)[0]
if failures > failure_threshold:
station_1.train_wait = station_1.train_wait + j
|
Add main function of the rail system optimizer
def issue#17"""
Train station Optimizer!
This program optimizes the efficiency of a train station by simulating normal
operations and varying train schedules and escalator ediquette.
"""
from CreateRailSystem import *
from StationPopWithoutTrain import *
from StationPopWithTrain import *
from MonteCarloSim import *
runs = eval(input("Enter the number of simulations you want to run: "))
failure_threshold = (eval(input("Enter acceptable system failure percentage: ")) //
100) * runs
def main():
station_1 = station()
station_2 = station_1
train_1 = train()
escalator_1 = escalator()
escalator_2 = escalator()
system_state = log(0,0,0,0)
rail_system(station_1, train_1, escalator_1, escalator_2)
failures = 0
for i in range(runs):
old_wait_time = station_1.train_wait
failures = failures + im_no_train(station_1, train_1, escalator_1)[0]
if failures > failure_threshold:
failures = 0
station_1.train_wait = station_1.train_wait - (old_wait_time / 2)
sans_train_overflow = True
else:
failures = failures + sim_with_train(station_1, train_1, escalator_1)
if failures > failure_threshold:
station_1.train_wait = 2 * station_1.train_wait
if sans_train_overflow:
time_dfference = old_wait_time - station_1.train_wait
for j in range(time_difference):
failure = sim_with_train(station_1, train_1, escalator_1)[0]
if failures > failure_threshold:
station_1.train_wait = station_1.train_wait + j
|
<commit_before><commit_msg>Add main function of the rail system optimizer
def issue#17<commit_after>"""
Train station Optimizer!
This program optimizes the efficiency of a train station by simulating normal
operations and varying train schedules and escalator ediquette.
"""
from CreateRailSystem import *
from StationPopWithoutTrain import *
from StationPopWithTrain import *
from MonteCarloSim import *
runs = eval(input("Enter the number of simulations you want to run: "))
failure_threshold = (eval(input("Enter acceptable system failure percentage: ")) //
100) * runs
def main():
station_1 = station()
station_2 = station_1
train_1 = train()
escalator_1 = escalator()
escalator_2 = escalator()
system_state = log(0,0,0,0)
rail_system(station_1, train_1, escalator_1, escalator_2)
failures = 0
for i in range(runs):
old_wait_time = station_1.train_wait
failures = failures + im_no_train(station_1, train_1, escalator_1)[0]
if failures > failure_threshold:
failures = 0
station_1.train_wait = station_1.train_wait - (old_wait_time / 2)
sans_train_overflow = True
else:
failures = failures + sim_with_train(station_1, train_1, escalator_1)
if failures > failure_threshold:
station_1.train_wait = 2 * station_1.train_wait
if sans_train_overflow:
time_dfference = old_wait_time - station_1.train_wait
for j in range(time_difference):
failure = sim_with_train(station_1, train_1, escalator_1)[0]
if failures > failure_threshold:
station_1.train_wait = station_1.train_wait + j
|
|
60889b58596e81fd84d9b4fab5f8c04f0382f99c
|
examples/summary.py
|
examples/summary.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Use IMAP CLI to gt a summary of IMAP account state."""
import argparse
import getpass
import logging
import os
import sys
from imap_cli import config
from imap_cli.imap import connection
from imap_cli.imap import search
from imap_cli import list_mail
from imap_cli import status
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def truncate_string(string, length):
minus_than_position = string.find('<')
if minus_than_position > 0 and string.find('>') > minus_than_position:
string = string[0:minus_than_position]
return string if len(string) < length else u'{0}…'.format(string[0:length])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('imap_server', help="IMAP Server hostname")
parser.add_argument('-l', '--login', help="Login for IMAP account")
parser.add_argument('--no-ssl', action='store_true', help="Don't use SSL")
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
args = parser.parse_args()
password = getpass.getpass()
ctx = config.new_context({
'hostname': args.imap_server,
'username': args.login,
'password': password,
'ssl': not args.no_ssl,
})
connection.connect(ctx)
for directory_status in status.status(ctx):
if int(directory_status['unseen']) > 0:
sys.stdout.write(directory_status['directory'])
sys.stdout.write('\n')
ctx.mail_account.select(directory_status['directory'], True)
mail_set = search.search(ctx, search_criterion=[search.create_search_criteria_by_tag(['unseen'])])
for mail_info in list_mail.list_mail(ctx, mail_set=mail_set):
sys.stdout.write(u' From : {:<35} To : {:<35} Subject : {}\n'.format(
truncate_string(mail_info['mail_from'], 35),
truncate_string(mail_info['to'], 35),
mail_info['subject'],
))
connection.disconnect(ctx)
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add an example script showing how to use this library
|
Add an example script showing how to use this library
|
Python
|
mit
|
Gentux/imap-cli,Gentux/imap-cli
|
Add an example script showing how to use this library
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Use IMAP CLI to gt a summary of IMAP account state."""
import argparse
import getpass
import logging
import os
import sys
from imap_cli import config
from imap_cli.imap import connection
from imap_cli.imap import search
from imap_cli import list_mail
from imap_cli import status
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def truncate_string(string, length):
minus_than_position = string.find('<')
if minus_than_position > 0 and string.find('>') > minus_than_position:
string = string[0:minus_than_position]
return string if len(string) < length else u'{0}…'.format(string[0:length])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('imap_server', help="IMAP Server hostname")
parser.add_argument('-l', '--login', help="Login for IMAP account")
parser.add_argument('--no-ssl', action='store_true', help="Don't use SSL")
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
args = parser.parse_args()
password = getpass.getpass()
ctx = config.new_context({
'hostname': args.imap_server,
'username': args.login,
'password': password,
'ssl': not args.no_ssl,
})
connection.connect(ctx)
for directory_status in status.status(ctx):
if int(directory_status['unseen']) > 0:
sys.stdout.write(directory_status['directory'])
sys.stdout.write('\n')
ctx.mail_account.select(directory_status['directory'], True)
mail_set = search.search(ctx, search_criterion=[search.create_search_criteria_by_tag(['unseen'])])
for mail_info in list_mail.list_mail(ctx, mail_set=mail_set):
sys.stdout.write(u' From : {:<35} To : {:<35} Subject : {}\n'.format(
truncate_string(mail_info['mail_from'], 35),
truncate_string(mail_info['to'], 35),
mail_info['subject'],
))
connection.disconnect(ctx)
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add an example script showing how to use this library<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Use IMAP CLI to gt a summary of IMAP account state."""
import argparse
import getpass
import logging
import os
import sys
from imap_cli import config
from imap_cli.imap import connection
from imap_cli.imap import search
from imap_cli import list_mail
from imap_cli import status
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def truncate_string(string, length):
minus_than_position = string.find('<')
if minus_than_position > 0 and string.find('>') > minus_than_position:
string = string[0:minus_than_position]
return string if len(string) < length else u'{0}…'.format(string[0:length])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('imap_server', help="IMAP Server hostname")
parser.add_argument('-l', '--login', help="Login for IMAP account")
parser.add_argument('--no-ssl', action='store_true', help="Don't use SSL")
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
args = parser.parse_args()
password = getpass.getpass()
ctx = config.new_context({
'hostname': args.imap_server,
'username': args.login,
'password': password,
'ssl': not args.no_ssl,
})
connection.connect(ctx)
for directory_status in status.status(ctx):
if int(directory_status['unseen']) > 0:
sys.stdout.write(directory_status['directory'])
sys.stdout.write('\n')
ctx.mail_account.select(directory_status['directory'], True)
mail_set = search.search(ctx, search_criterion=[search.create_search_criteria_by_tag(['unseen'])])
for mail_info in list_mail.list_mail(ctx, mail_set=mail_set):
sys.stdout.write(u' From : {:<35} To : {:<35} Subject : {}\n'.format(
truncate_string(mail_info['mail_from'], 35),
truncate_string(mail_info['to'], 35),
mail_info['subject'],
))
connection.disconnect(ctx)
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add an example script showing how to use this library#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Use IMAP CLI to gt a summary of IMAP account state."""
import argparse
import getpass
import logging
import os
import sys
from imap_cli import config
from imap_cli.imap import connection
from imap_cli.imap import search
from imap_cli import list_mail
from imap_cli import status
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def truncate_string(string, length):
minus_than_position = string.find('<')
if minus_than_position > 0 and string.find('>') > minus_than_position:
string = string[0:minus_than_position]
return string if len(string) < length else u'{0}…'.format(string[0:length])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('imap_server', help="IMAP Server hostname")
parser.add_argument('-l', '--login', help="Login for IMAP account")
parser.add_argument('--no-ssl', action='store_true', help="Don't use SSL")
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
args = parser.parse_args()
password = getpass.getpass()
ctx = config.new_context({
'hostname': args.imap_server,
'username': args.login,
'password': password,
'ssl': not args.no_ssl,
})
connection.connect(ctx)
for directory_status in status.status(ctx):
if int(directory_status['unseen']) > 0:
sys.stdout.write(directory_status['directory'])
sys.stdout.write('\n')
ctx.mail_account.select(directory_status['directory'], True)
mail_set = search.search(ctx, search_criterion=[search.create_search_criteria_by_tag(['unseen'])])
for mail_info in list_mail.list_mail(ctx, mail_set=mail_set):
sys.stdout.write(u' From : {:<35} To : {:<35} Subject : {}\n'.format(
truncate_string(mail_info['mail_from'], 35),
truncate_string(mail_info['to'], 35),
mail_info['subject'],
))
connection.disconnect(ctx)
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add an example script showing how to use this library<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Use IMAP CLI to gt a summary of IMAP account state."""
import argparse
import getpass
import logging
import os
import sys
from imap_cli import config
from imap_cli.imap import connection
from imap_cli.imap import search
from imap_cli import list_mail
from imap_cli import status
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def truncate_string(string, length):
minus_than_position = string.find('<')
if minus_than_position > 0 and string.find('>') > minus_than_position:
string = string[0:minus_than_position]
return string if len(string) < length else u'{0}…'.format(string[0:length])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('imap_server', help="IMAP Server hostname")
parser.add_argument('-l', '--login', help="Login for IMAP account")
parser.add_argument('--no-ssl', action='store_true', help="Don't use SSL")
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
args = parser.parse_args()
password = getpass.getpass()
ctx = config.new_context({
'hostname': args.imap_server,
'username': args.login,
'password': password,
'ssl': not args.no_ssl,
})
connection.connect(ctx)
for directory_status in status.status(ctx):
if int(directory_status['unseen']) > 0:
sys.stdout.write(directory_status['directory'])
sys.stdout.write('\n')
ctx.mail_account.select(directory_status['directory'], True)
mail_set = search.search(ctx, search_criterion=[search.create_search_criteria_by_tag(['unseen'])])
for mail_info in list_mail.list_mail(ctx, mail_set=mail_set):
sys.stdout.write(u' From : {:<35} To : {:<35} Subject : {}\n'.format(
truncate_string(mail_info['mail_from'], 35),
truncate_string(mail_info['to'], 35),
mail_info['subject'],
))
connection.disconnect(ctx)
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
41047fa4f4e83d457b68b69ae04d7a5373f3aa06
|
malaria-3view-normalise.py
|
malaria-3view-normalise.py
|
# IPython log file
import toolz as tz
import numpy as np
from skimage import io
import os
filenames = sorted(os.listdir())
from toolz import curried as c
os.makedirs('8bit')
# find the maximum value over all images
tz.pipe(filenames, c.map(io.imread), c.map(np.max), max)
import sys
sys.path.append('/Users/nuneziglesiasj/projects/microscopium')
from microscopium import preprocess as pre
# get an image of representative intensity over all input images
sampled = tz.pipe(filenames, c.map(io.imread), pre._reservoir_sampled_image)
from skimage import exposure
hist = exposure.histogram(sampled)
from matplotlib import pyplot as plt
plt.plot(hist[1], hist[0])
in_range = tuple(np.percentile(sampled.ravel(), [1, 99]))
for filename in filenames:
image = io.imread(filename)
out = exposure.rescale_intensity(image, in_range=in_range,
out_range=(0, 255))
out = out.astype(np.uint8)
io.imsave('8bit/' + filename[:-3] + 'tif', out,
plugin='tifffile', compress=1)
filenames8 = sorted(os.listdir('8bit/'))[1:] # ignore .DS_Store
filenames8 = [os.path.join('8bit', fn) for fn in filenames8]
sampled8 = tz.pipe(filenames8, c.map(io.imread), pre._reservoir_sampled_image)
freq, bin = exposure.histogram(sampled8)
plt.figure()
plt.plot(bin, freq)
|
Add session normalising malaria 3view stack
|
Add session normalising malaria 3view stack
|
Python
|
bsd-3-clause
|
jni/useful-histories
|
Add session normalising malaria 3view stack
|
# IPython log file
import toolz as tz
import numpy as np
from skimage import io
import os
filenames = sorted(os.listdir())
from toolz import curried as c
os.makedirs('8bit')
# find the maximum value over all images
tz.pipe(filenames, c.map(io.imread), c.map(np.max), max)
import sys
sys.path.append('/Users/nuneziglesiasj/projects/microscopium')
from microscopium import preprocess as pre
# get an image of representative intensity over all input images
sampled = tz.pipe(filenames, c.map(io.imread), pre._reservoir_sampled_image)
from skimage import exposure
hist = exposure.histogram(sampled)
from matplotlib import pyplot as plt
plt.plot(hist[1], hist[0])
in_range = tuple(np.percentile(sampled.ravel(), [1, 99]))
for filename in filenames:
image = io.imread(filename)
out = exposure.rescale_intensity(image, in_range=in_range,
out_range=(0, 255))
out = out.astype(np.uint8)
io.imsave('8bit/' + filename[:-3] + 'tif', out,
plugin='tifffile', compress=1)
filenames8 = sorted(os.listdir('8bit/'))[1:] # ignore .DS_Store
filenames8 = [os.path.join('8bit', fn) for fn in filenames8]
sampled8 = tz.pipe(filenames8, c.map(io.imread), pre._reservoir_sampled_image)
freq, bin = exposure.histogram(sampled8)
plt.figure()
plt.plot(bin, freq)
|
<commit_before><commit_msg>Add session normalising malaria 3view stack<commit_after>
|
# IPython log file
import toolz as tz
import numpy as np
from skimage import io
import os
filenames = sorted(os.listdir())
from toolz import curried as c
os.makedirs('8bit')
# find the maximum value over all images
tz.pipe(filenames, c.map(io.imread), c.map(np.max), max)
import sys
sys.path.append('/Users/nuneziglesiasj/projects/microscopium')
from microscopium import preprocess as pre
# get an image of representative intensity over all input images
sampled = tz.pipe(filenames, c.map(io.imread), pre._reservoir_sampled_image)
from skimage import exposure
hist = exposure.histogram(sampled)
from matplotlib import pyplot as plt
plt.plot(hist[1], hist[0])
in_range = tuple(np.percentile(sampled.ravel(), [1, 99]))
for filename in filenames:
image = io.imread(filename)
out = exposure.rescale_intensity(image, in_range=in_range,
out_range=(0, 255))
out = out.astype(np.uint8)
io.imsave('8bit/' + filename[:-3] + 'tif', out,
plugin='tifffile', compress=1)
filenames8 = sorted(os.listdir('8bit/'))[1:] # ignore .DS_Store
filenames8 = [os.path.join('8bit', fn) for fn in filenames8]
sampled8 = tz.pipe(filenames8, c.map(io.imread), pre._reservoir_sampled_image)
freq, bin = exposure.histogram(sampled8)
plt.figure()
plt.plot(bin, freq)
|
Add session normalising malaria 3view stack# IPython log file
import toolz as tz
import numpy as np
from skimage import io
import os
filenames = sorted(os.listdir())
from toolz import curried as c
os.makedirs('8bit')
# find the maximum value over all images
tz.pipe(filenames, c.map(io.imread), c.map(np.max), max)
import sys
sys.path.append('/Users/nuneziglesiasj/projects/microscopium')
from microscopium import preprocess as pre
# get an image of representative intensity over all input images
sampled = tz.pipe(filenames, c.map(io.imread), pre._reservoir_sampled_image)
from skimage import exposure
hist = exposure.histogram(sampled)
from matplotlib import pyplot as plt
plt.plot(hist[1], hist[0])
in_range = tuple(np.percentile(sampled.ravel(), [1, 99]))
for filename in filenames:
image = io.imread(filename)
out = exposure.rescale_intensity(image, in_range=in_range,
out_range=(0, 255))
out = out.astype(np.uint8)
io.imsave('8bit/' + filename[:-3] + 'tif', out,
plugin='tifffile', compress=1)
filenames8 = sorted(os.listdir('8bit/'))[1:] # ignore .DS_Store
filenames8 = [os.path.join('8bit', fn) for fn in filenames8]
sampled8 = tz.pipe(filenames8, c.map(io.imread), pre._reservoir_sampled_image)
freq, bin = exposure.histogram(sampled8)
plt.figure()
plt.plot(bin, freq)
|
<commit_before><commit_msg>Add session normalising malaria 3view stack<commit_after># IPython log file
import toolz as tz
import numpy as np
from skimage import io
import os
filenames = sorted(os.listdir())
from toolz import curried as c
os.makedirs('8bit')
# find the maximum value over all images
tz.pipe(filenames, c.map(io.imread), c.map(np.max), max)
import sys
sys.path.append('/Users/nuneziglesiasj/projects/microscopium')
from microscopium import preprocess as pre
# get an image of representative intensity over all input images
sampled = tz.pipe(filenames, c.map(io.imread), pre._reservoir_sampled_image)
from skimage import exposure
hist = exposure.histogram(sampled)
from matplotlib import pyplot as plt
plt.plot(hist[1], hist[0])
in_range = tuple(np.percentile(sampled.ravel(), [1, 99]))
for filename in filenames:
image = io.imread(filename)
out = exposure.rescale_intensity(image, in_range=in_range,
out_range=(0, 255))
out = out.astype(np.uint8)
io.imsave('8bit/' + filename[:-3] + 'tif', out,
plugin='tifffile', compress=1)
filenames8 = sorted(os.listdir('8bit/'))[1:] # ignore .DS_Store
filenames8 = [os.path.join('8bit', fn) for fn in filenames8]
sampled8 = tz.pipe(filenames8, c.map(io.imread), pre._reservoir_sampled_image)
freq, bin = exposure.histogram(sampled8)
plt.figure()
plt.plot(bin, freq)
|
|
76d1930367418ffc01c9629b686557d0bd979f03
|
CodeFights/rockPaperScissors.py
|
CodeFights/rockPaperScissors.py
|
#!/usr/local/bin/python
# Code Fights Rock Paper Scissors Problem
from itertools import combinations
def rockPaperScissors(players):
return sorted([[b, a] for a, b in combinations(players, 2)] +
[[a, b] for a, b in combinations(players, 2)])
def main():
tests = [
[
["trainee", "warrior", "ninja"],
[["ninja", "trainee"],
["ninja", "warrior"],
["trainee", "ninja"],
["trainee", "warrior"],
["warrior", "ninja"],
["warrior", "trainee"]]
],
[
["macho", "hero"],
[["hero", "macho"],
["macho", "hero"]]
]
]
for t in tests:
res = rockPaperScissors(t[0])
if t[1] == res:
print("PASSED: rockPaperScissors({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: rockPaperScissors({}) returned {}, "
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights rock paper scissors problem
|
Solve Code Fights rock paper scissors problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights rock paper scissors problem
|
#!/usr/local/bin/python
# Code Fights Rock Paper Scissors Problem
from itertools import combinations
def rockPaperScissors(players):
return sorted([[b, a] for a, b in combinations(players, 2)] +
[[a, b] for a, b in combinations(players, 2)])
def main():
tests = [
[
["trainee", "warrior", "ninja"],
[["ninja", "trainee"],
["ninja", "warrior"],
["trainee", "ninja"],
["trainee", "warrior"],
["warrior", "ninja"],
["warrior", "trainee"]]
],
[
["macho", "hero"],
[["hero", "macho"],
["macho", "hero"]]
]
]
for t in tests:
res = rockPaperScissors(t[0])
if t[1] == res:
print("PASSED: rockPaperScissors({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: rockPaperScissors({}) returned {}, "
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights rock paper scissors problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Rock Paper Scissors Problem
from itertools import combinations
def rockPaperScissors(players):
return sorted([[b, a] for a, b in combinations(players, 2)] +
[[a, b] for a, b in combinations(players, 2)])
def main():
tests = [
[
["trainee", "warrior", "ninja"],
[["ninja", "trainee"],
["ninja", "warrior"],
["trainee", "ninja"],
["trainee", "warrior"],
["warrior", "ninja"],
["warrior", "trainee"]]
],
[
["macho", "hero"],
[["hero", "macho"],
["macho", "hero"]]
]
]
for t in tests:
res = rockPaperScissors(t[0])
if t[1] == res:
print("PASSED: rockPaperScissors({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: rockPaperScissors({}) returned {}, "
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights rock paper scissors problem#!/usr/local/bin/python
# Code Fights Rock Paper Scissors Problem
from itertools import combinations
def rockPaperScissors(players):
return sorted([[b, a] for a, b in combinations(players, 2)] +
[[a, b] for a, b in combinations(players, 2)])
def main():
tests = [
[
["trainee", "warrior", "ninja"],
[["ninja", "trainee"],
["ninja", "warrior"],
["trainee", "ninja"],
["trainee", "warrior"],
["warrior", "ninja"],
["warrior", "trainee"]]
],
[
["macho", "hero"],
[["hero", "macho"],
["macho", "hero"]]
]
]
for t in tests:
res = rockPaperScissors(t[0])
if t[1] == res:
print("PASSED: rockPaperScissors({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: rockPaperScissors({}) returned {}, "
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights rock paper scissors problem<commit_after>#!/usr/local/bin/python
# Code Fights Rock Paper Scissors Problem
from itertools import combinations
def rockPaperScissors(players):
return sorted([[b, a] for a, b in combinations(players, 2)] +
[[a, b] for a, b in combinations(players, 2)])
def main():
tests = [
[
["trainee", "warrior", "ninja"],
[["ninja", "trainee"],
["ninja", "warrior"],
["trainee", "ninja"],
["trainee", "warrior"],
["warrior", "ninja"],
["warrior", "trainee"]]
],
[
["macho", "hero"],
[["hero", "macho"],
["macho", "hero"]]
]
]
for t in tests:
res = rockPaperScissors(t[0])
if t[1] == res:
print("PASSED: rockPaperScissors({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: rockPaperScissors({}) returned {}, "
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
|
748e39de86b35d5288cef55d3eb246dc0fd84e48
|
enasearch/__main__.py
|
enasearch/__main__.py
|
#!/usr/bin/env python
import click
import ebisearch
from pprint import pprint
@click.group()
def main():
pass
@click.command('get_results', short_help='Get list of results')
def get_results():
"""Return the list of domains in EBI"""
ebisearch.get_results(verbose=True)
@click.command('get_filter_fields', short_help='Get filter fields')
@click.option(
'--result',
help='Id of a result (accessible with get_results)')
def get_filter_fields(result):
"""Get the filter fields of a result to build a query"""
ebisearch.get_filter_fields(verbose=True)
main.add_command(get_results)
main.add_command(get_filter_fields)
if __name__ == "__main__":
main()
|
Add first version of main
|
Add first version of main
|
Python
|
mit
|
bebatut/enasearch
|
Add first version of main
|
#!/usr/bin/env python
import click
import ebisearch
from pprint import pprint
@click.group()
def main():
pass
@click.command('get_results', short_help='Get list of results')
def get_results():
"""Return the list of domains in EBI"""
ebisearch.get_results(verbose=True)
@click.command('get_filter_fields', short_help='Get filter fields')
@click.option(
'--result',
help='Id of a result (accessible with get_results)')
def get_filter_fields(result):
"""Get the filter fields of a result to build a query"""
ebisearch.get_filter_fields(verbose=True)
main.add_command(get_results)
main.add_command(get_filter_fields)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add first version of main<commit_after>
|
#!/usr/bin/env python
import click
import ebisearch
from pprint import pprint
@click.group()
def main():
pass
@click.command('get_results', short_help='Get list of results')
def get_results():
"""Return the list of domains in EBI"""
ebisearch.get_results(verbose=True)
@click.command('get_filter_fields', short_help='Get filter fields')
@click.option(
'--result',
help='Id of a result (accessible with get_results)')
def get_filter_fields(result):
"""Get the filter fields of a result to build a query"""
ebisearch.get_filter_fields(verbose=True)
main.add_command(get_results)
main.add_command(get_filter_fields)
if __name__ == "__main__":
main()
|
Add first version of main#!/usr/bin/env python
import click
import ebisearch
from pprint import pprint
@click.group()
def main():
pass
@click.command('get_results', short_help='Get list of results')
def get_results():
"""Return the list of domains in EBI"""
ebisearch.get_results(verbose=True)
@click.command('get_filter_fields', short_help='Get filter fields')
@click.option(
'--result',
help='Id of a result (accessible with get_results)')
def get_filter_fields(result):
"""Get the filter fields of a result to build a query"""
ebisearch.get_filter_fields(verbose=True)
main.add_command(get_results)
main.add_command(get_filter_fields)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add first version of main<commit_after>#!/usr/bin/env python
import click
import ebisearch
from pprint import pprint
@click.group()
def main():
pass
@click.command('get_results', short_help='Get list of results')
def get_results():
"""Return the list of domains in EBI"""
ebisearch.get_results(verbose=True)
@click.command('get_filter_fields', short_help='Get filter fields')
@click.option(
'--result',
help='Id of a result (accessible with get_results)')
def get_filter_fields(result):
"""Get the filter fields of a result to build a query"""
ebisearch.get_filter_fields(verbose=True)
main.add_command(get_results)
main.add_command(get_filter_fields)
if __name__ == "__main__":
main()
|
|
99e5badeb1e2e6aeffd3b3d56902d4257e168326
|
examples/hwapi/soft_pwm_uasyncio.py
|
examples/hwapi/soft_pwm_uasyncio.py
|
# See original soft_pwm.py for detailed comments.
import uasyncio
from hwconfig import LED
async def pwm_cycle(led, duty, cycles):
duty_off = 20 - duty
for i in range(cycles):
if duty:
led.value(1)
await uasyncio.sleep_ms(duty)
if duty_off:
led.value(0)
await uasyncio.sleep_ms(duty_off)
async def fade_in_out(LED):
while True:
# Fade in
for i in range(1, 21):
await pwm_cycle(LED, i, 2)
# Fade out
for i in range(20, 0, -1):
await pwm_cycle(LED, i, 2)
loop = uasyncio.get_event_loop()
loop.run_until_complete(fade_in_out(LED))
|
Add soft_pwm example converted to uasyncio.
|
examples/hwapi: Add soft_pwm example converted to uasyncio.
|
Python
|
mit
|
oopy/micropython,swegener/micropython,puuu/micropython,adafruit/micropython,Timmenem/micropython,pozetroninc/micropython,henriknelson/micropython,torwag/micropython,alex-robbins/micropython,infinnovation/micropython,bvernoux/micropython,pozetroninc/micropython,micropython/micropython-esp32,tralamazza/micropython,swegener/micropython,PappaPeppar/micropython,TDAbboud/micropython,ryannathans/micropython,selste/micropython,bvernoux/micropython,toolmacher/micropython,toolmacher/micropython,blazewicz/micropython,AriZuu/micropython,pfalcon/micropython,pramasoul/micropython,selste/micropython,puuu/micropython,micropython/micropython-esp32,HenrikSolver/micropython,Peetz0r/micropython-esp32,MrSurly/micropython,deshipu/micropython,oopy/micropython,MrSurly/micropython-esp32,pramasoul/micropython,bvernoux/micropython,TDAbboud/micropython,PappaPeppar/micropython,torwag/micropython,mhoffma/micropython,chrisdearman/micropython,AriZuu/micropython,bvernoux/micropython,cwyark/micropython,trezor/micropython,oopy/micropython,pozetroninc/micropython,tobbad/micropython,infinnovation/micropython,swegener/micropython,AriZuu/micropython,tuc-osg/micropython,alex-robbins/micropython,chrisdearman/micropython,MrSurly/micropython,MrSurly/micropython-esp32,trezor/micropython,HenrikSolver/micropython,AriZuu/micropython,tuc-osg/micropython,pfalcon/micropython,Timmenem/micropython,dmazzella/micropython,HenrikSolver/micropython,ryannathans/micropython,HenrikSolver/micropython,tobbad/micropython,mhoffma/micropython,adafruit/circuitpython,SHA2017-badge/micropython-esp32,henriknelson/micropython,selste/micropython,Timmenem/micropython,selste/micropython,PappaPeppar/micropython,mhoffma/micropython,hiway/micropython,adafruit/micropython,tobbad/micropython,trezor/micropython,chrisdearman/micropython,tralamazza/micropython,adafruit/circuitpython,tuc-osg/micropython,swegener/micropython,trezor/micropython,adafruit/micropython,dmazzella/micropython,torwag/micropython,kerneltask/micropython,MrSurly/micropython,deshipu/micropython,puuu/micropython,alex-robbins/micropython,dmazzella/micropython,micropython/micropython-esp32,tobbad/micropython,AriZuu/micropython,SHA2017-badge/micropython-esp32,oopy/micropython,alex-robbins/micropython,toolmacher/micropython,mhoffma/micropython,blazewicz/micropython,infinnovation/micropython,adafruit/circuitpython,deshipu/micropython,lowRISC/micropython,pozetroninc/micropython,SHA2017-badge/micropython-esp32,adafruit/circuitpython,kerneltask/micropython,toolmacher/micropython,adafruit/micropython,Peetz0r/micropython-esp32,blazewicz/micropython,pramasoul/micropython,pramasoul/micropython,selste/micropython,infinnovation/micropython,tobbad/micropython,cwyark/micropython,lowRISC/micropython,Peetz0r/micropython-esp32,cwyark/micropython,pfalcon/micropython,puuu/micropython,dmazzella/micropython,HenrikSolver/micropython,TDAbboud/micropython,hiway/micropython,henriknelson/micropython,MrSurly/micropython-esp32,puuu/micropython,pozetroninc/micropython,pfalcon/micropython,tuc-osg/micropython,lowRISC/micropython,kerneltask/micropython,oopy/micropython,TDAbboud/micropython,ryannathans/micropython,adafruit/circuitpython,hiway/micropython,ryannathans/micropython,cwyark/micropython,ryannathans/micropython,PappaPeppar/micropython,Peetz0r/micropython-esp32,bvernoux/micropython,hiway/micropython,pramasoul/micropython,deshipu/micropython,chrisdearman/micropython,blazewicz/micropython,MrSurly/micropython-esp32,alex-robbins/micropython,swegener/micropython,Peetz0r/micropython-esp32,kerneltask/micropython,blazewicz/micropython,henriknelson/micropython,tralamazza/micropython,tuc-osg/micropython,pfalcon/micropython,Timmenem/micropython,Timmenem/micropython,adafruit/circuitpython,deshipu/micropython,MrSurly/micropython,SHA2017-badge/micropython-esp32,lowRISC/micropython,mhoffma/micropython,chrisdearman/micropython,SHA2017-badge/micropython-esp32,micropython/micropython-esp32,PappaPeppar/micropython,torwag/micropython,lowRISC/micropython,trezor/micropython,TDAbboud/micropython,henriknelson/micropython,infinnovation/micropython,cwyark/micropython,toolmacher/micropython,MrSurly/micropython-esp32,adafruit/micropython,torwag/micropython,MrSurly/micropython,micropython/micropython-esp32,kerneltask/micropython,hiway/micropython,tralamazza/micropython
|
examples/hwapi: Add soft_pwm example converted to uasyncio.
|
# See original soft_pwm.py for detailed comments.
import uasyncio
from hwconfig import LED
async def pwm_cycle(led, duty, cycles):
duty_off = 20 - duty
for i in range(cycles):
if duty:
led.value(1)
await uasyncio.sleep_ms(duty)
if duty_off:
led.value(0)
await uasyncio.sleep_ms(duty_off)
async def fade_in_out(LED):
while True:
# Fade in
for i in range(1, 21):
await pwm_cycle(LED, i, 2)
# Fade out
for i in range(20, 0, -1):
await pwm_cycle(LED, i, 2)
loop = uasyncio.get_event_loop()
loop.run_until_complete(fade_in_out(LED))
|
<commit_before><commit_msg>examples/hwapi: Add soft_pwm example converted to uasyncio.<commit_after>
|
# See original soft_pwm.py for detailed comments.
import uasyncio
from hwconfig import LED
async def pwm_cycle(led, duty, cycles):
duty_off = 20 - duty
for i in range(cycles):
if duty:
led.value(1)
await uasyncio.sleep_ms(duty)
if duty_off:
led.value(0)
await uasyncio.sleep_ms(duty_off)
async def fade_in_out(LED):
while True:
# Fade in
for i in range(1, 21):
await pwm_cycle(LED, i, 2)
# Fade out
for i in range(20, 0, -1):
await pwm_cycle(LED, i, 2)
loop = uasyncio.get_event_loop()
loop.run_until_complete(fade_in_out(LED))
|
examples/hwapi: Add soft_pwm example converted to uasyncio.# See original soft_pwm.py for detailed comments.
import uasyncio
from hwconfig import LED
async def pwm_cycle(led, duty, cycles):
duty_off = 20 - duty
for i in range(cycles):
if duty:
led.value(1)
await uasyncio.sleep_ms(duty)
if duty_off:
led.value(0)
await uasyncio.sleep_ms(duty_off)
async def fade_in_out(LED):
while True:
# Fade in
for i in range(1, 21):
await pwm_cycle(LED, i, 2)
# Fade out
for i in range(20, 0, -1):
await pwm_cycle(LED, i, 2)
loop = uasyncio.get_event_loop()
loop.run_until_complete(fade_in_out(LED))
|
<commit_before><commit_msg>examples/hwapi: Add soft_pwm example converted to uasyncio.<commit_after># See original soft_pwm.py for detailed comments.
import uasyncio
from hwconfig import LED
async def pwm_cycle(led, duty, cycles):
duty_off = 20 - duty
for i in range(cycles):
if duty:
led.value(1)
await uasyncio.sleep_ms(duty)
if duty_off:
led.value(0)
await uasyncio.sleep_ms(duty_off)
async def fade_in_out(LED):
while True:
# Fade in
for i in range(1, 21):
await pwm_cycle(LED, i, 2)
# Fade out
for i in range(20, 0, -1):
await pwm_cycle(LED, i, 2)
loop = uasyncio.get_event_loop()
loop.run_until_complete(fade_in_out(LED))
|
|
36f8bcbb5df95c10deea461ee5afddbfbf746f16
|
oidc_provider/tests/test_creatersakey_command.py
|
oidc_provider/tests/test_creatersakey_command.py
|
from django.core.management import call_command
from django.test import TestCase, override_settings
from django.utils.six import StringIO
class CreateRSAKeyTest(TestCase):
@override_settings(BASE_DIR='/tmp')
def test_command_output(self):
out = StringIO()
call_command('creatersakey', stdout=out)
self.assertIn('RSA key successfully created', out.getvalue())
|
Add a basic test for the creatersakey management command
|
Add a basic test for the creatersakey management command
|
Python
|
mit
|
wayward710/django-oidc-provider,wayward710/django-oidc-provider,torreco/django-oidc-provider,wojtek-fliposports/django-oidc-provider,juanifioren/django-oidc-provider,bunnyinc/django-oidc-provider,juanifioren/django-oidc-provider,ByteInternet/django-oidc-provider,bunnyinc/django-oidc-provider,ByteInternet/django-oidc-provider,torreco/django-oidc-provider,wojtek-fliposports/django-oidc-provider
|
Add a basic test for the creatersakey management command
|
from django.core.management import call_command
from django.test import TestCase, override_settings
from django.utils.six import StringIO
class CreateRSAKeyTest(TestCase):
@override_settings(BASE_DIR='/tmp')
def test_command_output(self):
out = StringIO()
call_command('creatersakey', stdout=out)
self.assertIn('RSA key successfully created', out.getvalue())
|
<commit_before><commit_msg>Add a basic test for the creatersakey management command<commit_after>
|
from django.core.management import call_command
from django.test import TestCase, override_settings
from django.utils.six import StringIO
class CreateRSAKeyTest(TestCase):
@override_settings(BASE_DIR='/tmp')
def test_command_output(self):
out = StringIO()
call_command('creatersakey', stdout=out)
self.assertIn('RSA key successfully created', out.getvalue())
|
Add a basic test for the creatersakey management commandfrom django.core.management import call_command
from django.test import TestCase, override_settings
from django.utils.six import StringIO
class CreateRSAKeyTest(TestCase):
@override_settings(BASE_DIR='/tmp')
def test_command_output(self):
out = StringIO()
call_command('creatersakey', stdout=out)
self.assertIn('RSA key successfully created', out.getvalue())
|
<commit_before><commit_msg>Add a basic test for the creatersakey management command<commit_after>from django.core.management import call_command
from django.test import TestCase, override_settings
from django.utils.six import StringIO
class CreateRSAKeyTest(TestCase):
@override_settings(BASE_DIR='/tmp')
def test_command_output(self):
out = StringIO()
call_command('creatersakey', stdout=out)
self.assertIn('RSA key successfully created', out.getvalue())
|
|
238a063b821525bc24f059225cfe8fe1ecbdb586
|
thinc/tests/unit/test_difference.py
|
thinc/tests/unit/test_difference.py
|
'''Tests for distance-based objectives, layers, etc.'''
import pytest
import numpy
import numpy.linalg
from numpy.testing import assert_allclose
from ...neural._classes.difference import word_movers_similarity
@pytest.fixture
def N1():
return 5
@pytest.fixture
def N2():
return 3
@pytest.fixture
def ndim():
return 2
@pytest.fixture
def mat1(N1, ndim):
mat = numpy.ones((N1, ndim))
for i in range(N1):
mat[i] /= numpy.linalg.norm(mat[i])
return mat
@pytest.fixture
def mat2(N2, ndim):
mat = numpy.ones((N2, ndim))
for i in range(N2):
mat[i] /= numpy.linalg.norm(mat[i])
return mat
def cosine_similarity(vec1_vec2):
# Assume vectors are normalized
vec1, vec2 = vec1_vec2
def backward(d_sim, sgd=None):
if d_sim.ndim == 1:
d_sim = d_sim.reshape((d_sim.shape[0], 1))
print(vec1.shape, d_sim.shape)
print(vec1.shape, d_sim.shape)
return (vec2 * d_sim, vec1 * d_sim)
dotted = (vec1 * vec2).sum(axis=1)
return dotted, backward
def test_word_movers_similarity_unit_matrices(mat1, mat2):
sim, backward = word_movers_similarity(mat1, mat2)
assert_allclose(sim, 1.0)
d_mat1, d_mat2 = backward(0.0, None)
assert d_mat1.shape == mat1.shape
assert d_mat2.shape == mat2.shape
def test_gradient(mat1, mat2):
mat1[0] = 10.0
mat2[-1] = 10.0
mat1[1] = -2.
mat2[0] = -2
sim, backward = word_movers_similarity(mat1, mat2)
d_mat1, d_mat2 = backward(-1.0)
assert d_mat1[0, -1] != 0.
assert d_mat1[0, 0] == (-1./(mat1.shape[0]+mat2.shape[0])) * 10.
|
Add tests for new difference module
|
Add tests for new difference module
|
Python
|
mit
|
spacy-io/thinc,explosion/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc
|
Add tests for new difference module
|
'''Tests for distance-based objectives, layers, etc.'''
import pytest
import numpy
import numpy.linalg
from numpy.testing import assert_allclose
from ...neural._classes.difference import word_movers_similarity
@pytest.fixture
def N1():
return 5
@pytest.fixture
def N2():
return 3
@pytest.fixture
def ndim():
return 2
@pytest.fixture
def mat1(N1, ndim):
mat = numpy.ones((N1, ndim))
for i in range(N1):
mat[i] /= numpy.linalg.norm(mat[i])
return mat
@pytest.fixture
def mat2(N2, ndim):
mat = numpy.ones((N2, ndim))
for i in range(N2):
mat[i] /= numpy.linalg.norm(mat[i])
return mat
def cosine_similarity(vec1_vec2):
# Assume vectors are normalized
vec1, vec2 = vec1_vec2
def backward(d_sim, sgd=None):
if d_sim.ndim == 1:
d_sim = d_sim.reshape((d_sim.shape[0], 1))
print(vec1.shape, d_sim.shape)
print(vec1.shape, d_sim.shape)
return (vec2 * d_sim, vec1 * d_sim)
dotted = (vec1 * vec2).sum(axis=1)
return dotted, backward
def test_word_movers_similarity_unit_matrices(mat1, mat2):
sim, backward = word_movers_similarity(mat1, mat2)
assert_allclose(sim, 1.0)
d_mat1, d_mat2 = backward(0.0, None)
assert d_mat1.shape == mat1.shape
assert d_mat2.shape == mat2.shape
def test_gradient(mat1, mat2):
mat1[0] = 10.0
mat2[-1] = 10.0
mat1[1] = -2.
mat2[0] = -2
sim, backward = word_movers_similarity(mat1, mat2)
d_mat1, d_mat2 = backward(-1.0)
assert d_mat1[0, -1] != 0.
assert d_mat1[0, 0] == (-1./(mat1.shape[0]+mat2.shape[0])) * 10.
|
<commit_before><commit_msg>Add tests for new difference module<commit_after>
|
'''Tests for distance-based objectives, layers, etc.'''
import pytest
import numpy
import numpy.linalg
from numpy.testing import assert_allclose
from ...neural._classes.difference import word_movers_similarity
@pytest.fixture
def N1():
return 5
@pytest.fixture
def N2():
return 3
@pytest.fixture
def ndim():
return 2
@pytest.fixture
def mat1(N1, ndim):
mat = numpy.ones((N1, ndim))
for i in range(N1):
mat[i] /= numpy.linalg.norm(mat[i])
return mat
@pytest.fixture
def mat2(N2, ndim):
mat = numpy.ones((N2, ndim))
for i in range(N2):
mat[i] /= numpy.linalg.norm(mat[i])
return mat
def cosine_similarity(vec1_vec2):
# Assume vectors are normalized
vec1, vec2 = vec1_vec2
def backward(d_sim, sgd=None):
if d_sim.ndim == 1:
d_sim = d_sim.reshape((d_sim.shape[0], 1))
print(vec1.shape, d_sim.shape)
print(vec1.shape, d_sim.shape)
return (vec2 * d_sim, vec1 * d_sim)
dotted = (vec1 * vec2).sum(axis=1)
return dotted, backward
def test_word_movers_similarity_unit_matrices(mat1, mat2):
sim, backward = word_movers_similarity(mat1, mat2)
assert_allclose(sim, 1.0)
d_mat1, d_mat2 = backward(0.0, None)
assert d_mat1.shape == mat1.shape
assert d_mat2.shape == mat2.shape
def test_gradient(mat1, mat2):
mat1[0] = 10.0
mat2[-1] = 10.0
mat1[1] = -2.
mat2[0] = -2
sim, backward = word_movers_similarity(mat1, mat2)
d_mat1, d_mat2 = backward(-1.0)
assert d_mat1[0, -1] != 0.
assert d_mat1[0, 0] == (-1./(mat1.shape[0]+mat2.shape[0])) * 10.
|
Add tests for new difference module'''Tests for distance-based objectives, layers, etc.'''
import pytest
import numpy
import numpy.linalg
from numpy.testing import assert_allclose
from ...neural._classes.difference import word_movers_similarity
@pytest.fixture
def N1():
return 5
@pytest.fixture
def N2():
return 3
@pytest.fixture
def ndim():
return 2
@pytest.fixture
def mat1(N1, ndim):
mat = numpy.ones((N1, ndim))
for i in range(N1):
mat[i] /= numpy.linalg.norm(mat[i])
return mat
@pytest.fixture
def mat2(N2, ndim):
mat = numpy.ones((N2, ndim))
for i in range(N2):
mat[i] /= numpy.linalg.norm(mat[i])
return mat
def cosine_similarity(vec1_vec2):
# Assume vectors are normalized
vec1, vec2 = vec1_vec2
def backward(d_sim, sgd=None):
if d_sim.ndim == 1:
d_sim = d_sim.reshape((d_sim.shape[0], 1))
print(vec1.shape, d_sim.shape)
print(vec1.shape, d_sim.shape)
return (vec2 * d_sim, vec1 * d_sim)
dotted = (vec1 * vec2).sum(axis=1)
return dotted, backward
def test_word_movers_similarity_unit_matrices(mat1, mat2):
sim, backward = word_movers_similarity(mat1, mat2)
assert_allclose(sim, 1.0)
d_mat1, d_mat2 = backward(0.0, None)
assert d_mat1.shape == mat1.shape
assert d_mat2.shape == mat2.shape
def test_gradient(mat1, mat2):
mat1[0] = 10.0
mat2[-1] = 10.0
mat1[1] = -2.
mat2[0] = -2
sim, backward = word_movers_similarity(mat1, mat2)
d_mat1, d_mat2 = backward(-1.0)
assert d_mat1[0, -1] != 0.
assert d_mat1[0, 0] == (-1./(mat1.shape[0]+mat2.shape[0])) * 10.
|
<commit_before><commit_msg>Add tests for new difference module<commit_after>'''Tests for distance-based objectives, layers, etc.'''
import pytest
import numpy
import numpy.linalg
from numpy.testing import assert_allclose
from ...neural._classes.difference import word_movers_similarity
@pytest.fixture
def N1():
return 5
@pytest.fixture
def N2():
return 3
@pytest.fixture
def ndim():
return 2
@pytest.fixture
def mat1(N1, ndim):
mat = numpy.ones((N1, ndim))
for i in range(N1):
mat[i] /= numpy.linalg.norm(mat[i])
return mat
@pytest.fixture
def mat2(N2, ndim):
mat = numpy.ones((N2, ndim))
for i in range(N2):
mat[i] /= numpy.linalg.norm(mat[i])
return mat
def cosine_similarity(vec1_vec2):
# Assume vectors are normalized
vec1, vec2 = vec1_vec2
def backward(d_sim, sgd=None):
if d_sim.ndim == 1:
d_sim = d_sim.reshape((d_sim.shape[0], 1))
print(vec1.shape, d_sim.shape)
print(vec1.shape, d_sim.shape)
return (vec2 * d_sim, vec1 * d_sim)
dotted = (vec1 * vec2).sum(axis=1)
return dotted, backward
def test_word_movers_similarity_unit_matrices(mat1, mat2):
sim, backward = word_movers_similarity(mat1, mat2)
assert_allclose(sim, 1.0)
d_mat1, d_mat2 = backward(0.0, None)
assert d_mat1.shape == mat1.shape
assert d_mat2.shape == mat2.shape
def test_gradient(mat1, mat2):
mat1[0] = 10.0
mat2[-1] = 10.0
mat1[1] = -2.
mat2[0] = -2
sim, backward = word_movers_similarity(mat1, mat2)
d_mat1, d_mat2 = backward(-1.0)
assert d_mat1[0, -1] != 0.
assert d_mat1[0, 0] == (-1./(mat1.shape[0]+mat2.shape[0])) * 10.
|
|
dc7ba5b850c268c3a572e109e8bcaf9dff3ff589
|
amcat/management/commands/install_r.py
|
amcat/management/commands/install_r.py
|
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from django.core.management import BaseCommand
from rpy2.rinterface._rinterface import RRuntimeError
from amcat.scripts.query import get_r_queryactions
class Command(BaseCommand):
def handle(self, *args, **options):
packages = set()
for qa in get_r_queryactions():
packages |= set(qa.get_dependencies())
from rpy2.robjects import r
available = r("available.packages()[,'Version']")
available = dict(zip(r.names(available), available))
printrow = lambda p, i, a: print("{:30s}{:10s}{:10s}".format(p, i, a))
printrow("Package", "Installed", "Available")
todo = []
for package in packages:
try:
installed = r("as.character")(r.packageVersion(package))[0]
except RRuntimeError as e:
installed = '-'
todo.append(package)
printrow(package, installed, available.get(package))
print("\nInstalling required packages: ", todo)
for package in todo:
print("... ", package)
r("install.packages")(package)
print("\nDone!")
|
Add manage command for installing R dependencies
|
Add manage command for installing R dependencies
|
Python
|
agpl-3.0
|
amcat/amcat,amcat/amcat,amcat/amcat,amcat/amcat,amcat/amcat,amcat/amcat
|
Add manage command for installing R dependencies
|
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from django.core.management import BaseCommand
from rpy2.rinterface._rinterface import RRuntimeError
from amcat.scripts.query import get_r_queryactions
class Command(BaseCommand):
def handle(self, *args, **options):
packages = set()
for qa in get_r_queryactions():
packages |= set(qa.get_dependencies())
from rpy2.robjects import r
available = r("available.packages()[,'Version']")
available = dict(zip(r.names(available), available))
printrow = lambda p, i, a: print("{:30s}{:10s}{:10s}".format(p, i, a))
printrow("Package", "Installed", "Available")
todo = []
for package in packages:
try:
installed = r("as.character")(r.packageVersion(package))[0]
except RRuntimeError as e:
installed = '-'
todo.append(package)
printrow(package, installed, available.get(package))
print("\nInstalling required packages: ", todo)
for package in todo:
print("... ", package)
r("install.packages")(package)
print("\nDone!")
|
<commit_before><commit_msg>Add manage command for installing R dependencies<commit_after>
|
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from django.core.management import BaseCommand
from rpy2.rinterface._rinterface import RRuntimeError
from amcat.scripts.query import get_r_queryactions
class Command(BaseCommand):
def handle(self, *args, **options):
packages = set()
for qa in get_r_queryactions():
packages |= set(qa.get_dependencies())
from rpy2.robjects import r
available = r("available.packages()[,'Version']")
available = dict(zip(r.names(available), available))
printrow = lambda p, i, a: print("{:30s}{:10s}{:10s}".format(p, i, a))
printrow("Package", "Installed", "Available")
todo = []
for package in packages:
try:
installed = r("as.character")(r.packageVersion(package))[0]
except RRuntimeError as e:
installed = '-'
todo.append(package)
printrow(package, installed, available.get(package))
print("\nInstalling required packages: ", todo)
for package in todo:
print("... ", package)
r("install.packages")(package)
print("\nDone!")
|
Add manage command for installing R dependencies###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from django.core.management import BaseCommand
from rpy2.rinterface._rinterface import RRuntimeError
from amcat.scripts.query import get_r_queryactions
class Command(BaseCommand):
def handle(self, *args, **options):
packages = set()
for qa in get_r_queryactions():
packages |= set(qa.get_dependencies())
from rpy2.robjects import r
available = r("available.packages()[,'Version']")
available = dict(zip(r.names(available), available))
printrow = lambda p, i, a: print("{:30s}{:10s}{:10s}".format(p, i, a))
printrow("Package", "Installed", "Available")
todo = []
for package in packages:
try:
installed = r("as.character")(r.packageVersion(package))[0]
except RRuntimeError as e:
installed = '-'
todo.append(package)
printrow(package, installed, available.get(package))
print("\nInstalling required packages: ", todo)
for package in todo:
print("... ", package)
r("install.packages")(package)
print("\nDone!")
|
<commit_before><commit_msg>Add manage command for installing R dependencies<commit_after>###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from django.core.management import BaseCommand
from rpy2.rinterface._rinterface import RRuntimeError
from amcat.scripts.query import get_r_queryactions
class Command(BaseCommand):
def handle(self, *args, **options):
packages = set()
for qa in get_r_queryactions():
packages |= set(qa.get_dependencies())
from rpy2.robjects import r
available = r("available.packages()[,'Version']")
available = dict(zip(r.names(available), available))
printrow = lambda p, i, a: print("{:30s}{:10s}{:10s}".format(p, i, a))
printrow("Package", "Installed", "Available")
todo = []
for package in packages:
try:
installed = r("as.character")(r.packageVersion(package))[0]
except RRuntimeError as e:
installed = '-'
todo.append(package)
printrow(package, installed, available.get(package))
print("\nInstalling required packages: ", todo)
for package in todo:
print("... ", package)
r("install.packages")(package)
print("\nDone!")
|
|
d0f144e6e7ca587e42556e19958c53ea131a68e2
|
tests/data_checks/test_pvalue_filtering.py
|
tests/data_checks/test_pvalue_filtering.py
|
# ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
class TestPValueFiltering(TestPostgapBase):
def test_one_pvalue_per_gwas_pmid_AND_efo_id(self):
self.skipTest('ONE PVALUE PER GWAS PMID AND EFO ID')
def test_largest_pvalue_of_set_per_gwas_pmid_AND_efo_id(self):
self.skipTest('LARGEST PVALUE OF SET PER GWAS PMID AND EFO ID')
if __name__ == '__main__':
unittest.main()
|
Add placeholder for gwas pvalue
|
Add placeholder for gwas pvalue
|
Python
|
apache-2.0
|
Ensembl/cttv024,Ensembl/cttv024
|
Add placeholder for gwas pvalue
|
# ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
class TestPValueFiltering(TestPostgapBase):
def test_one_pvalue_per_gwas_pmid_AND_efo_id(self):
self.skipTest('ONE PVALUE PER GWAS PMID AND EFO ID')
def test_largest_pvalue_of_set_per_gwas_pmid_AND_efo_id(self):
self.skipTest('LARGEST PVALUE OF SET PER GWAS PMID AND EFO ID')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add placeholder for gwas pvalue<commit_after>
|
# ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
class TestPValueFiltering(TestPostgapBase):
def test_one_pvalue_per_gwas_pmid_AND_efo_id(self):
self.skipTest('ONE PVALUE PER GWAS PMID AND EFO ID')
def test_largest_pvalue_of_set_per_gwas_pmid_AND_efo_id(self):
self.skipTest('LARGEST PVALUE OF SET PER GWAS PMID AND EFO ID')
if __name__ == '__main__':
unittest.main()
|
Add placeholder for gwas pvalue# ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
class TestPValueFiltering(TestPostgapBase):
def test_one_pvalue_per_gwas_pmid_AND_efo_id(self):
self.skipTest('ONE PVALUE PER GWAS PMID AND EFO ID')
def test_largest_pvalue_of_set_per_gwas_pmid_AND_efo_id(self):
self.skipTest('LARGEST PVALUE OF SET PER GWAS PMID AND EFO ID')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add placeholder for gwas pvalue<commit_after># ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
class TestPValueFiltering(TestPostgapBase):
def test_one_pvalue_per_gwas_pmid_AND_efo_id(self):
self.skipTest('ONE PVALUE PER GWAS PMID AND EFO ID')
def test_largest_pvalue_of_set_per_gwas_pmid_AND_efo_id(self):
self.skipTest('LARGEST PVALUE OF SET PER GWAS PMID AND EFO ID')
if __name__ == '__main__':
unittest.main()
|
|
b69a3381381aabd97873903eea863a85fc4c932d
|
pokedex/tests/test_docs.py
|
pokedex/tests/test_docs.py
|
import os
import re
from pokedex.db.tables import mapped_classes
def test_main_tables():
"""Check that tables.py and main-tables.rst are in sync: every table should
be documented, and every documented table should exist."""
main_tables_path = os.path.join(os.path.dirname(__file__), '../../doc/main-tables.rst')
with open(main_tables_path) as f:
doc_class_names = set(
re.findall(r'^\.\. dex-table:: (\w+)$', f.read(), re.MULTILINE)
)
mapped_class_names = set(cls.__name__ for cls in mapped_classes)
# EXTRA ITEMS IN THE LEFT SET: tables defined but not documented
# EXTRA ITEMS IN THE RIGHT SET: tables documented but not defined
assert mapped_class_names == doc_class_names
|
Add a test for main-tables.rst
|
Add a test for main-tables.rst
|
Python
|
mit
|
mschex1/pokedex,DaMouse404/pokedex,RK905/pokedex-1,xfix/pokedex,veekun/pokedex,veekun/pokedex
|
Add a test for main-tables.rst
|
import os
import re
from pokedex.db.tables import mapped_classes
def test_main_tables():
"""Check that tables.py and main-tables.rst are in sync: every table should
be documented, and every documented table should exist."""
main_tables_path = os.path.join(os.path.dirname(__file__), '../../doc/main-tables.rst')
with open(main_tables_path) as f:
doc_class_names = set(
re.findall(r'^\.\. dex-table:: (\w+)$', f.read(), re.MULTILINE)
)
mapped_class_names = set(cls.__name__ for cls in mapped_classes)
# EXTRA ITEMS IN THE LEFT SET: tables defined but not documented
# EXTRA ITEMS IN THE RIGHT SET: tables documented but not defined
assert mapped_class_names == doc_class_names
|
<commit_before><commit_msg>Add a test for main-tables.rst<commit_after>
|
import os
import re
from pokedex.db.tables import mapped_classes
def test_main_tables():
"""Check that tables.py and main-tables.rst are in sync: every table should
be documented, and every documented table should exist."""
main_tables_path = os.path.join(os.path.dirname(__file__), '../../doc/main-tables.rst')
with open(main_tables_path) as f:
doc_class_names = set(
re.findall(r'^\.\. dex-table:: (\w+)$', f.read(), re.MULTILINE)
)
mapped_class_names = set(cls.__name__ for cls in mapped_classes)
# EXTRA ITEMS IN THE LEFT SET: tables defined but not documented
# EXTRA ITEMS IN THE RIGHT SET: tables documented but not defined
assert mapped_class_names == doc_class_names
|
Add a test for main-tables.rstimport os
import re
from pokedex.db.tables import mapped_classes
def test_main_tables():
"""Check that tables.py and main-tables.rst are in sync: every table should
be documented, and every documented table should exist."""
main_tables_path = os.path.join(os.path.dirname(__file__), '../../doc/main-tables.rst')
with open(main_tables_path) as f:
doc_class_names = set(
re.findall(r'^\.\. dex-table:: (\w+)$', f.read(), re.MULTILINE)
)
mapped_class_names = set(cls.__name__ for cls in mapped_classes)
# EXTRA ITEMS IN THE LEFT SET: tables defined but not documented
# EXTRA ITEMS IN THE RIGHT SET: tables documented but not defined
assert mapped_class_names == doc_class_names
|
<commit_before><commit_msg>Add a test for main-tables.rst<commit_after>import os
import re
from pokedex.db.tables import mapped_classes
def test_main_tables():
"""Check that tables.py and main-tables.rst are in sync: every table should
be documented, and every documented table should exist."""
main_tables_path = os.path.join(os.path.dirname(__file__), '../../doc/main-tables.rst')
with open(main_tables_path) as f:
doc_class_names = set(
re.findall(r'^\.\. dex-table:: (\w+)$', f.read(), re.MULTILINE)
)
mapped_class_names = set(cls.__name__ for cls in mapped_classes)
# EXTRA ITEMS IN THE LEFT SET: tables defined but not documented
# EXTRA ITEMS IN THE RIGHT SET: tables documented but not defined
assert mapped_class_names == doc_class_names
|
|
6454bcae6c519c47fc4bd05610e8f16dfe355d66
|
TopologyEventTest/host_lib.py
|
TopologyEventTest/host_lib.py
|
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
class EventHostTimeout(event.EventBase):
def __init__(self, host):
super(EventHostTimeout, self).__init__()
self.host = host
class HostLib(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_EVENTS = [EventHostTimeout]
def __init__(self, *args, **kwargs):
super(HostLib, self).__init__(*args, **kwargs)
self.port_infos = {}
hub.spawn(self.port_request_loop)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def port_stats_event_handler(self, ev):
pass
def port_request_loop(self):
time.sleep(5)
while True:
hosts = topo_api.get_all_host(self)
ports = [host.port for host in hosts]
for port in ports:
dpid = port.dpid
switch = topo_api.get_switch(self, dpid)
dp = switch.dp
parser = dp.ofproto_parser
ofproto = dp.ofproto
msg = parser.OFPPortStatsRequest(dp, 0, port.port_no)
dp.send_msg(msg)
time.sleep(1)
|
Add host library to topology event test
|
Add host library to topology event test
|
Python
|
mit
|
TakeshiTseng/SDN-Work,TakeshiTseng/SDN-Work,TakeshiTseng/SDN-Work,TakeshiTseng/SDN-Work
|
Add host library to topology event test
|
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
class EventHostTimeout(event.EventBase):
def __init__(self, host):
super(EventHostTimeout, self).__init__()
self.host = host
class HostLib(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_EVENTS = [EventHostTimeout]
def __init__(self, *args, **kwargs):
super(HostLib, self).__init__(*args, **kwargs)
self.port_infos = {}
hub.spawn(self.port_request_loop)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def port_stats_event_handler(self, ev):
pass
def port_request_loop(self):
time.sleep(5)
while True:
hosts = topo_api.get_all_host(self)
ports = [host.port for host in hosts]
for port in ports:
dpid = port.dpid
switch = topo_api.get_switch(self, dpid)
dp = switch.dp
parser = dp.ofproto_parser
ofproto = dp.ofproto
msg = parser.OFPPortStatsRequest(dp, 0, port.port_no)
dp.send_msg(msg)
time.sleep(1)
|
<commit_before><commit_msg>Add host library to topology event test<commit_after>
|
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
class EventHostTimeout(event.EventBase):
def __init__(self, host):
super(EventHostTimeout, self).__init__()
self.host = host
class HostLib(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_EVENTS = [EventHostTimeout]
def __init__(self, *args, **kwargs):
super(HostLib, self).__init__(*args, **kwargs)
self.port_infos = {}
hub.spawn(self.port_request_loop)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def port_stats_event_handler(self, ev):
pass
def port_request_loop(self):
time.sleep(5)
while True:
hosts = topo_api.get_all_host(self)
ports = [host.port for host in hosts]
for port in ports:
dpid = port.dpid
switch = topo_api.get_switch(self, dpid)
dp = switch.dp
parser = dp.ofproto_parser
ofproto = dp.ofproto
msg = parser.OFPPortStatsRequest(dp, 0, port.port_no)
dp.send_msg(msg)
time.sleep(1)
|
Add host library to topology event testfrom ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
class EventHostTimeout(event.EventBase):
def __init__(self, host):
super(EventHostTimeout, self).__init__()
self.host = host
class HostLib(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_EVENTS = [EventHostTimeout]
def __init__(self, *args, **kwargs):
super(HostLib, self).__init__(*args, **kwargs)
self.port_infos = {}
hub.spawn(self.port_request_loop)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def port_stats_event_handler(self, ev):
pass
def port_request_loop(self):
time.sleep(5)
while True:
hosts = topo_api.get_all_host(self)
ports = [host.port for host in hosts]
for port in ports:
dpid = port.dpid
switch = topo_api.get_switch(self, dpid)
dp = switch.dp
parser = dp.ofproto_parser
ofproto = dp.ofproto
msg = parser.OFPPortStatsRequest(dp, 0, port.port_no)
dp.send_msg(msg)
time.sleep(1)
|
<commit_before><commit_msg>Add host library to topology event test<commit_after>from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
class EventHostTimeout(event.EventBase):
def __init__(self, host):
super(EventHostTimeout, self).__init__()
self.host = host
class HostLib(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_EVENTS = [EventHostTimeout]
def __init__(self, *args, **kwargs):
super(HostLib, self).__init__(*args, **kwargs)
self.port_infos = {}
hub.spawn(self.port_request_loop)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def port_stats_event_handler(self, ev):
pass
def port_request_loop(self):
time.sleep(5)
while True:
hosts = topo_api.get_all_host(self)
ports = [host.port for host in hosts]
for port in ports:
dpid = port.dpid
switch = topo_api.get_switch(self, dpid)
dp = switch.dp
parser = dp.ofproto_parser
ofproto = dp.ofproto
msg = parser.OFPPortStatsRequest(dp, 0, port.port_no)
dp.send_msg(msg)
time.sleep(1)
|
|
37e8ab6ca1ac8853df9b856c03263f4d0d7bf8ac
|
pdf_to_text.py
|
pdf_to_text.py
|
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from cStringIO import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
def pdf_to_text(s):
infile = StringIO(s)
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
pagenums = set()
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close()
return text
if __name__ == '__main__':
pdf = open('test.pdf').read()
print pdf_to_text(pdf)
|
Convert PDF to Text As a Function
|
Convert PDF to Text As a Function
|
Python
|
mit
|
young-geng/ResumeAnalytics
|
Convert PDF to Text As a Function
|
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from cStringIO import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
def pdf_to_text(s):
infile = StringIO(s)
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
pagenums = set()
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close()
return text
if __name__ == '__main__':
pdf = open('test.pdf').read()
print pdf_to_text(pdf)
|
<commit_before><commit_msg>Convert PDF to Text As a Function<commit_after>
|
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from cStringIO import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
def pdf_to_text(s):
infile = StringIO(s)
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
pagenums = set()
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close()
return text
if __name__ == '__main__':
pdf = open('test.pdf').read()
print pdf_to_text(pdf)
|
Convert PDF to Text As a Functionfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from cStringIO import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
def pdf_to_text(s):
infile = StringIO(s)
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
pagenums = set()
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close()
return text
if __name__ == '__main__':
pdf = open('test.pdf').read()
print pdf_to_text(pdf)
|
<commit_before><commit_msg>Convert PDF to Text As a Function<commit_after>from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from cStringIO import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
def pdf_to_text(s):
infile = StringIO(s)
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
pagenums = set()
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close()
return text
if __name__ == '__main__':
pdf = open('test.pdf').read()
print pdf_to_text(pdf)
|
|
143af64f9435b3964ee618cccb89e7ad211e030a
|
db/__init__.py
|
db/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import session_scope
def commit_db_item(db_item):
with session_scope() as session:
session.merge(db_item)
session.commit()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import session_scope
def commit_db_item(db_item):
with session_scope() as session:
session.merge(db_item)
session.commit()
def create_or_update_db_item(db_item, new_item):
"""
Updates an existing or creates a new database item.
"""
with session_scope() as session:
# if database item exists
if db_item is not None:
# returning if database item is unchanged
if db_item == new_item:
return
# updating database item otherwise
else:
db_item.update(new_item)
session.merge(db_item)
# creating database item otherwise
else:
session.add(new_item)
session.commit()
|
Add utility function to create or update database items
|
Add utility function to create or update database items
|
Python
|
mit
|
leaffan/pynhldb
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import session_scope
def commit_db_item(db_item):
with session_scope() as session:
session.merge(db_item)
session.commit()
Add utility function to create or update database items
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import session_scope
def commit_db_item(db_item):
with session_scope() as session:
session.merge(db_item)
session.commit()
def create_or_update_db_item(db_item, new_item):
"""
Updates an existing or creates a new database item.
"""
with session_scope() as session:
# if database item exists
if db_item is not None:
# returning if database item is unchanged
if db_item == new_item:
return
# updating database item otherwise
else:
db_item.update(new_item)
session.merge(db_item)
# creating database item otherwise
else:
session.add(new_item)
session.commit()
|
<commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import session_scope
def commit_db_item(db_item):
with session_scope() as session:
session.merge(db_item)
session.commit()
<commit_msg>Add utility function to create or update database items<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import session_scope
def commit_db_item(db_item):
with session_scope() as session:
session.merge(db_item)
session.commit()
def create_or_update_db_item(db_item, new_item):
"""
Updates an existing or creates a new database item.
"""
with session_scope() as session:
# if database item exists
if db_item is not None:
# returning if database item is unchanged
if db_item == new_item:
return
# updating database item otherwise
else:
db_item.update(new_item)
session.merge(db_item)
# creating database item otherwise
else:
session.add(new_item)
session.commit()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import session_scope
def commit_db_item(db_item):
with session_scope() as session:
session.merge(db_item)
session.commit()
Add utility function to create or update database items#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import session_scope
def commit_db_item(db_item):
with session_scope() as session:
session.merge(db_item)
session.commit()
def create_or_update_db_item(db_item, new_item):
"""
Updates an existing or creates a new database item.
"""
with session_scope() as session:
# if database item exists
if db_item is not None:
# returning if database item is unchanged
if db_item == new_item:
return
# updating database item otherwise
else:
db_item.update(new_item)
session.merge(db_item)
# creating database item otherwise
else:
session.add(new_item)
session.commit()
|
<commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import session_scope
def commit_db_item(db_item):
with session_scope() as session:
session.merge(db_item)
session.commit()
<commit_msg>Add utility function to create or update database items<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import session_scope
def commit_db_item(db_item):
with session_scope() as session:
session.merge(db_item)
session.commit()
def create_or_update_db_item(db_item, new_item):
"""
Updates an existing or creates a new database item.
"""
with session_scope() as session:
# if database item exists
if db_item is not None:
# returning if database item is unchanged
if db_item == new_item:
return
# updating database item otherwise
else:
db_item.update(new_item)
session.merge(db_item)
# creating database item otherwise
else:
session.add(new_item)
session.commit()
|
ea7aea1950e8794d9cc838b4c2311e806058e129
|
tests/test1.py
|
tests/test1.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 02 12:23:00 2013
@author: jeff
"""
import OpenPNM #This fails
|
Test script that demonstrate the failure to import OpenPNM in the test folder.
|
Test script that demonstrate the failure to import OpenPNM in the test folder.
Former-commit-id: 38d931cb07cfb102415dc2ac2a68142e3ca3f22d
Former-commit-id: 83bb6579e4bd1d15fe39ef9dcb894519feaff707
|
Python
|
mit
|
amdouglas/OpenPNM,TomTranter/OpenPNM,stadelmanma/OpenPNM,PMEAL/OpenPNM,amdouglas/OpenPNM
|
Test script that demonstrate the failure to import OpenPNM in the test folder.
Former-commit-id: 38d931cb07cfb102415dc2ac2a68142e3ca3f22d
Former-commit-id: 83bb6579e4bd1d15fe39ef9dcb894519feaff707
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 02 12:23:00 2013
@author: jeff
"""
import OpenPNM #This fails
|
<commit_before><commit_msg>Test script that demonstrate the failure to import OpenPNM in the test folder.
Former-commit-id: 38d931cb07cfb102415dc2ac2a68142e3ca3f22d
Former-commit-id: 83bb6579e4bd1d15fe39ef9dcb894519feaff707<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 02 12:23:00 2013
@author: jeff
"""
import OpenPNM #This fails
|
Test script that demonstrate the failure to import OpenPNM in the test folder.
Former-commit-id: 38d931cb07cfb102415dc2ac2a68142e3ca3f22d
Former-commit-id: 83bb6579e4bd1d15fe39ef9dcb894519feaff707# -*- coding: utf-8 -*-
"""
Created on Wed Oct 02 12:23:00 2013
@author: jeff
"""
import OpenPNM #This fails
|
<commit_before><commit_msg>Test script that demonstrate the failure to import OpenPNM in the test folder.
Former-commit-id: 38d931cb07cfb102415dc2ac2a68142e3ca3f22d
Former-commit-id: 83bb6579e4bd1d15fe39ef9dcb894519feaff707<commit_after># -*- coding: utf-8 -*-
"""
Created on Wed Oct 02 12:23:00 2013
@author: jeff
"""
import OpenPNM #This fails
|
|
002bd742a2dfb01e3c69ce97987af21cb5104292
|
readthedocs/projects/migrations/0038_update-doctype-helptext.py
|
readthedocs/projects/migrations/0038_update-doctype-helptext.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-02 19:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0037_add_htmlfile'),
]
operations = [
migrations.AlterField(
model_name='project',
name='documentation_type',
field=models.CharField(choices=[('sphinx', 'Sphinx Html'), ('mkdocs', 'Mkdocs (Markdown)'), ('sphinx_htmldir', 'Sphinx HtmlDir'), ('sphinx_singlehtml', 'Sphinx Single Page HTML')], default='sphinx', help_text='Type of documentation you are building. <a href="http://www.sphinx-doc.org/en/stable/builders.html#sphinx.builders.html.DirectoryHTMLBuilder">More info on sphinx builders</a>.', max_length=20, verbose_name='Documentation type'),
),
]
|
Create migration for the change to the more info link
|
Create migration for the change to the more info link
|
Python
|
mit
|
rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org
|
Create migration for the change to the more info link
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-02 19:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0037_add_htmlfile'),
]
operations = [
migrations.AlterField(
model_name='project',
name='documentation_type',
field=models.CharField(choices=[('sphinx', 'Sphinx Html'), ('mkdocs', 'Mkdocs (Markdown)'), ('sphinx_htmldir', 'Sphinx HtmlDir'), ('sphinx_singlehtml', 'Sphinx Single Page HTML')], default='sphinx', help_text='Type of documentation you are building. <a href="http://www.sphinx-doc.org/en/stable/builders.html#sphinx.builders.html.DirectoryHTMLBuilder">More info on sphinx builders</a>.', max_length=20, verbose_name='Documentation type'),
),
]
|
<commit_before><commit_msg>Create migration for the change to the more info link<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-02 19:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0037_add_htmlfile'),
]
operations = [
migrations.AlterField(
model_name='project',
name='documentation_type',
field=models.CharField(choices=[('sphinx', 'Sphinx Html'), ('mkdocs', 'Mkdocs (Markdown)'), ('sphinx_htmldir', 'Sphinx HtmlDir'), ('sphinx_singlehtml', 'Sphinx Single Page HTML')], default='sphinx', help_text='Type of documentation you are building. <a href="http://www.sphinx-doc.org/en/stable/builders.html#sphinx.builders.html.DirectoryHTMLBuilder">More info on sphinx builders</a>.', max_length=20, verbose_name='Documentation type'),
),
]
|
Create migration for the change to the more info link# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-02 19:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0037_add_htmlfile'),
]
operations = [
migrations.AlterField(
model_name='project',
name='documentation_type',
field=models.CharField(choices=[('sphinx', 'Sphinx Html'), ('mkdocs', 'Mkdocs (Markdown)'), ('sphinx_htmldir', 'Sphinx HtmlDir'), ('sphinx_singlehtml', 'Sphinx Single Page HTML')], default='sphinx', help_text='Type of documentation you are building. <a href="http://www.sphinx-doc.org/en/stable/builders.html#sphinx.builders.html.DirectoryHTMLBuilder">More info on sphinx builders</a>.', max_length=20, verbose_name='Documentation type'),
),
]
|
<commit_before><commit_msg>Create migration for the change to the more info link<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-02 19:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0037_add_htmlfile'),
]
operations = [
migrations.AlterField(
model_name='project',
name='documentation_type',
field=models.CharField(choices=[('sphinx', 'Sphinx Html'), ('mkdocs', 'Mkdocs (Markdown)'), ('sphinx_htmldir', 'Sphinx HtmlDir'), ('sphinx_singlehtml', 'Sphinx Single Page HTML')], default='sphinx', help_text='Type of documentation you are building. <a href="http://www.sphinx-doc.org/en/stable/builders.html#sphinx.builders.html.DirectoryHTMLBuilder">More info on sphinx builders</a>.', max_length=20, verbose_name='Documentation type'),
),
]
|
|
a86264c3d238d04cd38be2d995a7421dbaa49cd1
|
examples/resources_with_negotiation.py
|
examples/resources_with_negotiation.py
|
"""Example using `routing.ResourceRouter` and `negotiation` (content negotation).
Start the app with
::
$ pip install gunicorn http
$ gunicorn -k aiohttp.worker.GunicornWebWorker examples.resources_with_negotiation:app --reload
Try it out:
::
$ pip install httpie
$ http :8000/ message==Hello
$ http POST :8000/ message=Greetings
"""
from aiohttp import web
from aiohttp_utils import Response, routing, negotiation
class EchoResource:
async def get(self, request):
return Response({
'GET': dict(request.GET)
})
async def post(self, request):
data = await request.json()
return Response({
'POST': dict(data)
})
def create_app():
app = web.Application(router=routing.ResourceRouter())
app.router.add_resource('/', EchoResource())
negotiation.setup(app)
return app
app = create_app()
|
Add example with ResourceRouter and content negotiation
|
Add example with ResourceRouter and content negotiation
|
Python
|
mit
|
sloria/aiohttp_utils
|
Add example with ResourceRouter and content negotiation
|
"""Example using `routing.ResourceRouter` and `negotiation` (content negotation).
Start the app with
::
$ pip install gunicorn http
$ gunicorn -k aiohttp.worker.GunicornWebWorker examples.resources_with_negotiation:app --reload
Try it out:
::
$ pip install httpie
$ http :8000/ message==Hello
$ http POST :8000/ message=Greetings
"""
from aiohttp import web
from aiohttp_utils import Response, routing, negotiation
class EchoResource:
async def get(self, request):
return Response({
'GET': dict(request.GET)
})
async def post(self, request):
data = await request.json()
return Response({
'POST': dict(data)
})
def create_app():
app = web.Application(router=routing.ResourceRouter())
app.router.add_resource('/', EchoResource())
negotiation.setup(app)
return app
app = create_app()
|
<commit_before><commit_msg>Add example with ResourceRouter and content negotiation<commit_after>
|
"""Example using `routing.ResourceRouter` and `negotiation` (content negotation).
Start the app with
::
$ pip install gunicorn http
$ gunicorn -k aiohttp.worker.GunicornWebWorker examples.resources_with_negotiation:app --reload
Try it out:
::
$ pip install httpie
$ http :8000/ message==Hello
$ http POST :8000/ message=Greetings
"""
from aiohttp import web
from aiohttp_utils import Response, routing, negotiation
class EchoResource:
async def get(self, request):
return Response({
'GET': dict(request.GET)
})
async def post(self, request):
data = await request.json()
return Response({
'POST': dict(data)
})
def create_app():
app = web.Application(router=routing.ResourceRouter())
app.router.add_resource('/', EchoResource())
negotiation.setup(app)
return app
app = create_app()
|
Add example with ResourceRouter and content negotiation"""Example using `routing.ResourceRouter` and `negotiation` (content negotation).
Start the app with
::
$ pip install gunicorn http
$ gunicorn -k aiohttp.worker.GunicornWebWorker examples.resources_with_negotiation:app --reload
Try it out:
::
$ pip install httpie
$ http :8000/ message==Hello
$ http POST :8000/ message=Greetings
"""
from aiohttp import web
from aiohttp_utils import Response, routing, negotiation
class EchoResource:
async def get(self, request):
return Response({
'GET': dict(request.GET)
})
async def post(self, request):
data = await request.json()
return Response({
'POST': dict(data)
})
def create_app():
app = web.Application(router=routing.ResourceRouter())
app.router.add_resource('/', EchoResource())
negotiation.setup(app)
return app
app = create_app()
|
<commit_before><commit_msg>Add example with ResourceRouter and content negotiation<commit_after>"""Example using `routing.ResourceRouter` and `negotiation` (content negotation).
Start the app with
::
$ pip install gunicorn http
$ gunicorn -k aiohttp.worker.GunicornWebWorker examples.resources_with_negotiation:app --reload
Try it out:
::
$ pip install httpie
$ http :8000/ message==Hello
$ http POST :8000/ message=Greetings
"""
from aiohttp import web
from aiohttp_utils import Response, routing, negotiation
class EchoResource:
async def get(self, request):
return Response({
'GET': dict(request.GET)
})
async def post(self, request):
data = await request.json()
return Response({
'POST': dict(data)
})
def create_app():
app = web.Application(router=routing.ResourceRouter())
app.router.add_resource('/', EchoResource())
negotiation.setup(app)
return app
app = create_app()
|
|
28c0046329681ceb0f0d7163738a60314837339e
|
website/tests/views/test_gene.py
|
website/tests/views/test_gene.py
|
from view_testing import ViewTest
from models import Protein
from models import Gene
from database import db
test_gene_data = {
'name': 'BRCA1',
'isoforms': [
Protein(
refseq='NM_000123',
sequence='TRAN',
),
]
}
class TestPGeneView(ViewTest):
def test_show(self):
g = Gene(**test_gene_data)
db.session.add(g)
response = self.client.get('/gene/show/BRCA1')
assert response.status_code == 200
assert b'BRCA1' in response.data
assert b'NM_000123' in response.data
|
Add basic test for GeneView:show
|
Add basic test for GeneView:show
|
Python
|
lgpl-2.1
|
reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations
|
Add basic test for GeneView:show
|
from view_testing import ViewTest
from models import Protein
from models import Gene
from database import db
test_gene_data = {
'name': 'BRCA1',
'isoforms': [
Protein(
refseq='NM_000123',
sequence='TRAN',
),
]
}
class TestPGeneView(ViewTest):
def test_show(self):
g = Gene(**test_gene_data)
db.session.add(g)
response = self.client.get('/gene/show/BRCA1')
assert response.status_code == 200
assert b'BRCA1' in response.data
assert b'NM_000123' in response.data
|
<commit_before><commit_msg>Add basic test for GeneView:show<commit_after>
|
from view_testing import ViewTest
from models import Protein
from models import Gene
from database import db
test_gene_data = {
'name': 'BRCA1',
'isoforms': [
Protein(
refseq='NM_000123',
sequence='TRAN',
),
]
}
class TestPGeneView(ViewTest):
def test_show(self):
g = Gene(**test_gene_data)
db.session.add(g)
response = self.client.get('/gene/show/BRCA1')
assert response.status_code == 200
assert b'BRCA1' in response.data
assert b'NM_000123' in response.data
|
Add basic test for GeneView:showfrom view_testing import ViewTest
from models import Protein
from models import Gene
from database import db
test_gene_data = {
'name': 'BRCA1',
'isoforms': [
Protein(
refseq='NM_000123',
sequence='TRAN',
),
]
}
class TestPGeneView(ViewTest):
def test_show(self):
g = Gene(**test_gene_data)
db.session.add(g)
response = self.client.get('/gene/show/BRCA1')
assert response.status_code == 200
assert b'BRCA1' in response.data
assert b'NM_000123' in response.data
|
<commit_before><commit_msg>Add basic test for GeneView:show<commit_after>from view_testing import ViewTest
from models import Protein
from models import Gene
from database import db
test_gene_data = {
'name': 'BRCA1',
'isoforms': [
Protein(
refseq='NM_000123',
sequence='TRAN',
),
]
}
class TestPGeneView(ViewTest):
def test_show(self):
g = Gene(**test_gene_data)
db.session.add(g)
response = self.client.get('/gene/show/BRCA1')
assert response.status_code == 200
assert b'BRCA1' in response.data
assert b'NM_000123' in response.data
|
|
e445e62dac1e08ab36fee5d37a51c32228707ce8
|
scrolls/configure.py
|
scrolls/configure.py
|
from copy import copy
def run():
## UDP /etc/rsyslog.conf
uncomment('/etc/rsyslog.conf', ['$ModLoad imudp', '$UDPServerRun 514'])
## nginx
## sudo service rsyslog restart
def uncomment(fpath, statements):
found = {s:False for s in statements}
with open(fpath, 'r') as fh:
oldlines = fh.readlines()
newlines = copy(oldlines)
for l, line in enumerate(oldlines):
for statement in statements:
if statement in line:
found[statement] = True
if '#' in line:
print('uncommenting')
newlines[l] = line.replace('#', '')
else:
print('already uncommented')
if list(found.values()).count(False):
print('Not all stmts found')
if newlines == oldlines:
print('no changes necessary')
else:
print('saving edited file')
with open(fpath, 'w') as fh:
fh.writelines(newlines)
if __name__ == '__main__':
run()
|
Configure will uncomment UDP config
|
Configure will uncomment UDP config
|
Python
|
mit
|
ilogue/scrolls
|
Configure will uncomment UDP config
|
from copy import copy
def run():
## UDP /etc/rsyslog.conf
uncomment('/etc/rsyslog.conf', ['$ModLoad imudp', '$UDPServerRun 514'])
## nginx
## sudo service rsyslog restart
def uncomment(fpath, statements):
found = {s:False for s in statements}
with open(fpath, 'r') as fh:
oldlines = fh.readlines()
newlines = copy(oldlines)
for l, line in enumerate(oldlines):
for statement in statements:
if statement in line:
found[statement] = True
if '#' in line:
print('uncommenting')
newlines[l] = line.replace('#', '')
else:
print('already uncommented')
if list(found.values()).count(False):
print('Not all stmts found')
if newlines == oldlines:
print('no changes necessary')
else:
print('saving edited file')
with open(fpath, 'w') as fh:
fh.writelines(newlines)
if __name__ == '__main__':
run()
|
<commit_before><commit_msg>Configure will uncomment UDP config<commit_after>
|
from copy import copy
def run():
## UDP /etc/rsyslog.conf
uncomment('/etc/rsyslog.conf', ['$ModLoad imudp', '$UDPServerRun 514'])
## nginx
## sudo service rsyslog restart
def uncomment(fpath, statements):
found = {s:False for s in statements}
with open(fpath, 'r') as fh:
oldlines = fh.readlines()
newlines = copy(oldlines)
for l, line in enumerate(oldlines):
for statement in statements:
if statement in line:
found[statement] = True
if '#' in line:
print('uncommenting')
newlines[l] = line.replace('#', '')
else:
print('already uncommented')
if list(found.values()).count(False):
print('Not all stmts found')
if newlines == oldlines:
print('no changes necessary')
else:
print('saving edited file')
with open(fpath, 'w') as fh:
fh.writelines(newlines)
if __name__ == '__main__':
run()
|
Configure will uncomment UDP configfrom copy import copy
def run():
## UDP /etc/rsyslog.conf
uncomment('/etc/rsyslog.conf', ['$ModLoad imudp', '$UDPServerRun 514'])
## nginx
## sudo service rsyslog restart
def uncomment(fpath, statements):
found = {s:False for s in statements}
with open(fpath, 'r') as fh:
oldlines = fh.readlines()
newlines = copy(oldlines)
for l, line in enumerate(oldlines):
for statement in statements:
if statement in line:
found[statement] = True
if '#' in line:
print('uncommenting')
newlines[l] = line.replace('#', '')
else:
print('already uncommented')
if list(found.values()).count(False):
print('Not all stmts found')
if newlines == oldlines:
print('no changes necessary')
else:
print('saving edited file')
with open(fpath, 'w') as fh:
fh.writelines(newlines)
if __name__ == '__main__':
run()
|
<commit_before><commit_msg>Configure will uncomment UDP config<commit_after>from copy import copy
def run():
## UDP /etc/rsyslog.conf
uncomment('/etc/rsyslog.conf', ['$ModLoad imudp', '$UDPServerRun 514'])
## nginx
## sudo service rsyslog restart
def uncomment(fpath, statements):
found = {s:False for s in statements}
with open(fpath, 'r') as fh:
oldlines = fh.readlines()
newlines = copy(oldlines)
for l, line in enumerate(oldlines):
for statement in statements:
if statement in line:
found[statement] = True
if '#' in line:
print('uncommenting')
newlines[l] = line.replace('#', '')
else:
print('already uncommented')
if list(found.values()).count(False):
print('Not all stmts found')
if newlines == oldlines:
print('no changes necessary')
else:
print('saving edited file')
with open(fpath, 'w') as fh:
fh.writelines(newlines)
if __name__ == '__main__':
run()
|
|
1cbd0c19501fc4605a65e89cb8fb72271751777d
|
txircd/modules/server/push.py
|
txircd/modules/server/push.py
|
from twisted.plugin import IPlugin
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
class ServerPush(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "ServerPush"
core = True
def hookIRCd(self, ircd):
self.ircd = ircd
def serverCommands(self):
return [ ("PUSH", 1, self) ]
def parseParams(self, server, params, prefix, tags):
if len(params) != 2:
return None
if params[0] not in self.ircd.users:
return None
return {
"user": self.ircd.users[params[0]],
"line": params[1],
"source": prefix
}
def execute(self, server, data):
user = data["user"]
if user.uuid[:3] == self.ircd.serverID:
user.sendLine(data["line"])
return True
toServer = self.ircd.servers[user.uuid[:3]]
toServer.sendMessage("PUSH", user.uuid, "{}".format(data["line"]), prefix=data["source"])
return True
serverPush = ServerPush()
|
Implement the server PUSH command
|
Implement the server PUSH command
|
Python
|
bsd-3-clause
|
ElementalAlchemist/txircd,Heufneutje/txircd
|
Implement the server PUSH command
|
from twisted.plugin import IPlugin
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
class ServerPush(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "ServerPush"
core = True
def hookIRCd(self, ircd):
self.ircd = ircd
def serverCommands(self):
return [ ("PUSH", 1, self) ]
def parseParams(self, server, params, prefix, tags):
if len(params) != 2:
return None
if params[0] not in self.ircd.users:
return None
return {
"user": self.ircd.users[params[0]],
"line": params[1],
"source": prefix
}
def execute(self, server, data):
user = data["user"]
if user.uuid[:3] == self.ircd.serverID:
user.sendLine(data["line"])
return True
toServer = self.ircd.servers[user.uuid[:3]]
toServer.sendMessage("PUSH", user.uuid, "{}".format(data["line"]), prefix=data["source"])
return True
serverPush = ServerPush()
|
<commit_before><commit_msg>Implement the server PUSH command<commit_after>
|
from twisted.plugin import IPlugin
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
class ServerPush(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "ServerPush"
core = True
def hookIRCd(self, ircd):
self.ircd = ircd
def serverCommands(self):
return [ ("PUSH", 1, self) ]
def parseParams(self, server, params, prefix, tags):
if len(params) != 2:
return None
if params[0] not in self.ircd.users:
return None
return {
"user": self.ircd.users[params[0]],
"line": params[1],
"source": prefix
}
def execute(self, server, data):
user = data["user"]
if user.uuid[:3] == self.ircd.serverID:
user.sendLine(data["line"])
return True
toServer = self.ircd.servers[user.uuid[:3]]
toServer.sendMessage("PUSH", user.uuid, "{}".format(data["line"]), prefix=data["source"])
return True
serverPush = ServerPush()
|
Implement the server PUSH commandfrom twisted.plugin import IPlugin
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
class ServerPush(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "ServerPush"
core = True
def hookIRCd(self, ircd):
self.ircd = ircd
def serverCommands(self):
return [ ("PUSH", 1, self) ]
def parseParams(self, server, params, prefix, tags):
if len(params) != 2:
return None
if params[0] not in self.ircd.users:
return None
return {
"user": self.ircd.users[params[0]],
"line": params[1],
"source": prefix
}
def execute(self, server, data):
user = data["user"]
if user.uuid[:3] == self.ircd.serverID:
user.sendLine(data["line"])
return True
toServer = self.ircd.servers[user.uuid[:3]]
toServer.sendMessage("PUSH", user.uuid, "{}".format(data["line"]), prefix=data["source"])
return True
serverPush = ServerPush()
|
<commit_before><commit_msg>Implement the server PUSH command<commit_after>from twisted.plugin import IPlugin
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
class ServerPush(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "ServerPush"
core = True
def hookIRCd(self, ircd):
self.ircd = ircd
def serverCommands(self):
return [ ("PUSH", 1, self) ]
def parseParams(self, server, params, prefix, tags):
if len(params) != 2:
return None
if params[0] not in self.ircd.users:
return None
return {
"user": self.ircd.users[params[0]],
"line": params[1],
"source": prefix
}
def execute(self, server, data):
user = data["user"]
if user.uuid[:3] == self.ircd.serverID:
user.sendLine(data["line"])
return True
toServer = self.ircd.servers[user.uuid[:3]]
toServer.sendMessage("PUSH", user.uuid, "{}".format(data["line"]), prefix=data["source"])
return True
serverPush = ServerPush()
|
|
acda62ab8af2499c04ff15012039cb5fb5fa8968
|
process-lines.py
|
process-lines.py
|
#!/usr/bin/env python
# coding: utf-8
import re
import envoy
def parse_state(line):
match = re.search('\[(?P<state>[^\]]+?)\] at time', line)
if match:
return match.group('state')
def parse_time(line):
time = line.rsplit(' ', 1)[-1]
time = map(float, time.split(':'))
return time[0] * 60 * 60 + time[1] * 60 + time[2]
def format_duration(seconds):
if seconds > 60:
minutes = int(seconds) / 60
seconds = seconds - minutes * 60
return '{0} minutes and {1} seconds'.format(
minutes, seconds)
else:
return '{0} seconds'.format(seconds)
def process(lines):
starts = {}
for line in lines:
line = line.strip()
if 'Running state' in line:
state = parse_state(line)
starts[state] = parse_time(line)
elif 'Completed state' in line:
state = parse_state(line)
end = parse_time(line)
duration = end - starts[state]
if duration > 30:
print 'State {0} took {1}'.format(state,
format_duration(duration))
else:
print ''
print line
if __name__ == '__main__':
command = "sudo grep -e 'Running state' -e 'Completed state' -e 'Executing command state.highstate' /var/log/salt/minion"
response = envoy.run(command)
if response.std_err:
print response.std_err
else:
lines = response.std_out.split('\n')
process(lines)
|
Add an utiity to measure salt performance.
|
Add an utiity to measure salt performance.
|
Python
|
bsd-2-clause
|
AllMyChanges/allmychanges.com,AllMyChanges/allmychanges.com,AllMyChanges/allmychanges.com,AllMyChanges/allmychanges.com
|
Add an utiity to measure salt performance.
|
#!/usr/bin/env python
# coding: utf-8
import re
import envoy
def parse_state(line):
match = re.search('\[(?P<state>[^\]]+?)\] at time', line)
if match:
return match.group('state')
def parse_time(line):
time = line.rsplit(' ', 1)[-1]
time = map(float, time.split(':'))
return time[0] * 60 * 60 + time[1] * 60 + time[2]
def format_duration(seconds):
if seconds > 60:
minutes = int(seconds) / 60
seconds = seconds - minutes * 60
return '{0} minutes and {1} seconds'.format(
minutes, seconds)
else:
return '{0} seconds'.format(seconds)
def process(lines):
starts = {}
for line in lines:
line = line.strip()
if 'Running state' in line:
state = parse_state(line)
starts[state] = parse_time(line)
elif 'Completed state' in line:
state = parse_state(line)
end = parse_time(line)
duration = end - starts[state]
if duration > 30:
print 'State {0} took {1}'.format(state,
format_duration(duration))
else:
print ''
print line
if __name__ == '__main__':
command = "sudo grep -e 'Running state' -e 'Completed state' -e 'Executing command state.highstate' /var/log/salt/minion"
response = envoy.run(command)
if response.std_err:
print response.std_err
else:
lines = response.std_out.split('\n')
process(lines)
|
<commit_before><commit_msg>Add an utiity to measure salt performance.<commit_after>
|
#!/usr/bin/env python
# coding: utf-8
import re
import envoy
def parse_state(line):
match = re.search('\[(?P<state>[^\]]+?)\] at time', line)
if match:
return match.group('state')
def parse_time(line):
time = line.rsplit(' ', 1)[-1]
time = map(float, time.split(':'))
return time[0] * 60 * 60 + time[1] * 60 + time[2]
def format_duration(seconds):
if seconds > 60:
minutes = int(seconds) / 60
seconds = seconds - minutes * 60
return '{0} minutes and {1} seconds'.format(
minutes, seconds)
else:
return '{0} seconds'.format(seconds)
def process(lines):
starts = {}
for line in lines:
line = line.strip()
if 'Running state' in line:
state = parse_state(line)
starts[state] = parse_time(line)
elif 'Completed state' in line:
state = parse_state(line)
end = parse_time(line)
duration = end - starts[state]
if duration > 30:
print 'State {0} took {1}'.format(state,
format_duration(duration))
else:
print ''
print line
if __name__ == '__main__':
command = "sudo grep -e 'Running state' -e 'Completed state' -e 'Executing command state.highstate' /var/log/salt/minion"
response = envoy.run(command)
if response.std_err:
print response.std_err
else:
lines = response.std_out.split('\n')
process(lines)
|
Add an utiity to measure salt performance.#!/usr/bin/env python
# coding: utf-8
import re
import envoy
def parse_state(line):
match = re.search('\[(?P<state>[^\]]+?)\] at time', line)
if match:
return match.group('state')
def parse_time(line):
time = line.rsplit(' ', 1)[-1]
time = map(float, time.split(':'))
return time[0] * 60 * 60 + time[1] * 60 + time[2]
def format_duration(seconds):
if seconds > 60:
minutes = int(seconds) / 60
seconds = seconds - minutes * 60
return '{0} minutes and {1} seconds'.format(
minutes, seconds)
else:
return '{0} seconds'.format(seconds)
def process(lines):
starts = {}
for line in lines:
line = line.strip()
if 'Running state' in line:
state = parse_state(line)
starts[state] = parse_time(line)
elif 'Completed state' in line:
state = parse_state(line)
end = parse_time(line)
duration = end - starts[state]
if duration > 30:
print 'State {0} took {1}'.format(state,
format_duration(duration))
else:
print ''
print line
if __name__ == '__main__':
command = "sudo grep -e 'Running state' -e 'Completed state' -e 'Executing command state.highstate' /var/log/salt/minion"
response = envoy.run(command)
if response.std_err:
print response.std_err
else:
lines = response.std_out.split('\n')
process(lines)
|
<commit_before><commit_msg>Add an utiity to measure salt performance.<commit_after>#!/usr/bin/env python
# coding: utf-8
import re
import envoy
def parse_state(line):
match = re.search('\[(?P<state>[^\]]+?)\] at time', line)
if match:
return match.group('state')
def parse_time(line):
time = line.rsplit(' ', 1)[-1]
time = map(float, time.split(':'))
return time[0] * 60 * 60 + time[1] * 60 + time[2]
def format_duration(seconds):
if seconds > 60:
minutes = int(seconds) / 60
seconds = seconds - minutes * 60
return '{0} minutes and {1} seconds'.format(
minutes, seconds)
else:
return '{0} seconds'.format(seconds)
def process(lines):
starts = {}
for line in lines:
line = line.strip()
if 'Running state' in line:
state = parse_state(line)
starts[state] = parse_time(line)
elif 'Completed state' in line:
state = parse_state(line)
end = parse_time(line)
duration = end - starts[state]
if duration > 30:
print 'State {0} took {1}'.format(state,
format_duration(duration))
else:
print ''
print line
if __name__ == '__main__':
command = "sudo grep -e 'Running state' -e 'Completed state' -e 'Executing command state.highstate' /var/log/salt/minion"
response = envoy.run(command)
if response.std_err:
print response.std_err
else:
lines = response.std_out.split('\n')
process(lines)
|
|
1107b5435d7564cf3dcadfa6515d397eab203c6f
|
bs.py
|
bs.py
|
import urllib
import re
import math
from bs4 import BeautifulSoup
## This is my small project on Sentimental Analysis
## Please NOTE: You cannot use this code without prior permission
poststext=[]
htmlfile = urllib.urlopen("http://manraj.collegespace.in/sentimentanalysisPY/")
htmltext = htmlfile.read()
htmlfile.close()
soup = BeautifulSoup(htmltext)
post = soup.find_all("post")
for text in post:
poststext.append(''.join(text.find_all(text=True)))
i=0
patternsplit = re.compile(r"\W+")
list=["love","hate"]
while i<len(poststext):
words = patternsplit.split(poststext[i].lower())
sentilist = map(lambda word: word in list,words)
t = sentilist.count(True)
f = sentilist.count(False)
factor = t*100/math.sqrt(len(sentilist))
print str(factor) + ' %'
i+=1
|
Add list of words and combine it with twitter.
|
Add list of words and combine it with twitter.
|
Python
|
mit
|
ManrajGrover/Sentimental-Analysis
|
Add list of words and combine it with twitter.
|
import urllib
import re
import math
from bs4 import BeautifulSoup
## This is my small project on Sentimental Analysis
## Please NOTE: You cannot use this code without prior permission
poststext=[]
htmlfile = urllib.urlopen("http://manraj.collegespace.in/sentimentanalysisPY/")
htmltext = htmlfile.read()
htmlfile.close()
soup = BeautifulSoup(htmltext)
post = soup.find_all("post")
for text in post:
poststext.append(''.join(text.find_all(text=True)))
i=0
patternsplit = re.compile(r"\W+")
list=["love","hate"]
while i<len(poststext):
words = patternsplit.split(poststext[i].lower())
sentilist = map(lambda word: word in list,words)
t = sentilist.count(True)
f = sentilist.count(False)
factor = t*100/math.sqrt(len(sentilist))
print str(factor) + ' %'
i+=1
|
<commit_before><commit_msg>Add list of words and combine it with twitter.<commit_after>
|
import urllib
import re
import math
from bs4 import BeautifulSoup
## This is my small project on Sentimental Analysis
## Please NOTE: You cannot use this code without prior permission
poststext=[]
htmlfile = urllib.urlopen("http://manraj.collegespace.in/sentimentanalysisPY/")
htmltext = htmlfile.read()
htmlfile.close()
soup = BeautifulSoup(htmltext)
post = soup.find_all("post")
for text in post:
poststext.append(''.join(text.find_all(text=True)))
i=0
patternsplit = re.compile(r"\W+")
list=["love","hate"]
while i<len(poststext):
words = patternsplit.split(poststext[i].lower())
sentilist = map(lambda word: word in list,words)
t = sentilist.count(True)
f = sentilist.count(False)
factor = t*100/math.sqrt(len(sentilist))
print str(factor) + ' %'
i+=1
|
Add list of words and combine it with twitter.import urllib
import re
import math
from bs4 import BeautifulSoup
## This is my small project on Sentimental Analysis
## Please NOTE: You cannot use this code without prior permission
poststext=[]
htmlfile = urllib.urlopen("http://manraj.collegespace.in/sentimentanalysisPY/")
htmltext = htmlfile.read()
htmlfile.close()
soup = BeautifulSoup(htmltext)
post = soup.find_all("post")
for text in post:
poststext.append(''.join(text.find_all(text=True)))
i=0
patternsplit = re.compile(r"\W+")
list=["love","hate"]
while i<len(poststext):
words = patternsplit.split(poststext[i].lower())
sentilist = map(lambda word: word in list,words)
t = sentilist.count(True)
f = sentilist.count(False)
factor = t*100/math.sqrt(len(sentilist))
print str(factor) + ' %'
i+=1
|
<commit_before><commit_msg>Add list of words and combine it with twitter.<commit_after>import urllib
import re
import math
from bs4 import BeautifulSoup
## This is my small project on Sentimental Analysis
## Please NOTE: You cannot use this code without prior permission
poststext=[]
htmlfile = urllib.urlopen("http://manraj.collegespace.in/sentimentanalysisPY/")
htmltext = htmlfile.read()
htmlfile.close()
soup = BeautifulSoup(htmltext)
post = soup.find_all("post")
for text in post:
poststext.append(''.join(text.find_all(text=True)))
i=0
patternsplit = re.compile(r"\W+")
list=["love","hate"]
while i<len(poststext):
words = patternsplit.split(poststext[i].lower())
sentilist = map(lambda word: word in list,words)
t = sentilist.count(True)
f = sentilist.count(False)
factor = t*100/math.sqrt(len(sentilist))
print str(factor) + ' %'
i+=1
|
|
d1e241e0b9ce264227e151df7a302471e0df7e6d
|
python/vpc_flow_logs_enabled.py
|
python/vpc_flow_logs_enabled.py
|
#
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
import boto3
import json
def evaluate_compliance(config_item, is_flow_logs_enabled):
if (config_item['resourceType'] != 'AWS::EC2::VPC'):
return 'NOT_APPLICABLE'
elif is_flow_logs_enabled:
return 'COMPLIANT'
else:
return 'NON_COMPLIANT'
def is_flow_logs_enabled(vpc_id):
ec2 = boto3.client('ec2')
response = ec2.describe_flow_logs(
Filter=[
{
'Name': 'resource-id',
'Values': [
vpc_id,
]
},
],
)
if len(response[u'FlowLogs']) != 0: return True
def lambda_handler(event, context):
invoking_event = json.loads(event['invokingEvent'])
compliance_value = 'NOT_APPLICABLE'
vpc_id = invoking_event['configurationItem']['resourceId']
compliance_value = evaluate_compliance(
invoking_event['configurationItem'], is_flow_logs_enabled(vpc_id))
config = boto3.client('config')
response = config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': invoking_event['configurationItem']['resourceType'],
'ComplianceResourceId': vpc_id,
'ComplianceType': compliance_value,
'OrderingTimestamp': invoking_event['configurationItem']['configurationItemCaptureTime']
},
],
ResultToken=event['resultToken'])
|
Add rule that vpc flow logs enabled
|
Add rule that vpc flow logs enabled
|
Python
|
cc0-1.0
|
awslabs/aws-config-rules,awslabs/aws-config-rules,awslabs/aws-config-rules,ravvav/aws-config-rules,aaronkhoo/aws-config-rules,ravvav/aws-config-rules,anthroprose/aws-config-rules,ravvav/aws-config-rules,anthroprose/aws-config-rules,aaronkhoo/aws-config-rules,aaronkhoo/aws-config-rules
|
Add rule that vpc flow logs enabled
|
#
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
import boto3
import json
def evaluate_compliance(config_item, is_flow_logs_enabled):
if (config_item['resourceType'] != 'AWS::EC2::VPC'):
return 'NOT_APPLICABLE'
elif is_flow_logs_enabled:
return 'COMPLIANT'
else:
return 'NON_COMPLIANT'
def is_flow_logs_enabled(vpc_id):
ec2 = boto3.client('ec2')
response = ec2.describe_flow_logs(
Filter=[
{
'Name': 'resource-id',
'Values': [
vpc_id,
]
},
],
)
if len(response[u'FlowLogs']) != 0: return True
def lambda_handler(event, context):
invoking_event = json.loads(event['invokingEvent'])
compliance_value = 'NOT_APPLICABLE'
vpc_id = invoking_event['configurationItem']['resourceId']
compliance_value = evaluate_compliance(
invoking_event['configurationItem'], is_flow_logs_enabled(vpc_id))
config = boto3.client('config')
response = config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': invoking_event['configurationItem']['resourceType'],
'ComplianceResourceId': vpc_id,
'ComplianceType': compliance_value,
'OrderingTimestamp': invoking_event['configurationItem']['configurationItemCaptureTime']
},
],
ResultToken=event['resultToken'])
|
<commit_before><commit_msg>Add rule that vpc flow logs enabled<commit_after>
|
#
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
import boto3
import json
def evaluate_compliance(config_item, is_flow_logs_enabled):
if (config_item['resourceType'] != 'AWS::EC2::VPC'):
return 'NOT_APPLICABLE'
elif is_flow_logs_enabled:
return 'COMPLIANT'
else:
return 'NON_COMPLIANT'
def is_flow_logs_enabled(vpc_id):
ec2 = boto3.client('ec2')
response = ec2.describe_flow_logs(
Filter=[
{
'Name': 'resource-id',
'Values': [
vpc_id,
]
},
],
)
if len(response[u'FlowLogs']) != 0: return True
def lambda_handler(event, context):
invoking_event = json.loads(event['invokingEvent'])
compliance_value = 'NOT_APPLICABLE'
vpc_id = invoking_event['configurationItem']['resourceId']
compliance_value = evaluate_compliance(
invoking_event['configurationItem'], is_flow_logs_enabled(vpc_id))
config = boto3.client('config')
response = config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': invoking_event['configurationItem']['resourceType'],
'ComplianceResourceId': vpc_id,
'ComplianceType': compliance_value,
'OrderingTimestamp': invoking_event['configurationItem']['configurationItemCaptureTime']
},
],
ResultToken=event['resultToken'])
|
Add rule that vpc flow logs enabled#
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
import boto3
import json
def evaluate_compliance(config_item, is_flow_logs_enabled):
if (config_item['resourceType'] != 'AWS::EC2::VPC'):
return 'NOT_APPLICABLE'
elif is_flow_logs_enabled:
return 'COMPLIANT'
else:
return 'NON_COMPLIANT'
def is_flow_logs_enabled(vpc_id):
ec2 = boto3.client('ec2')
response = ec2.describe_flow_logs(
Filter=[
{
'Name': 'resource-id',
'Values': [
vpc_id,
]
},
],
)
if len(response[u'FlowLogs']) != 0: return True
def lambda_handler(event, context):
invoking_event = json.loads(event['invokingEvent'])
compliance_value = 'NOT_APPLICABLE'
vpc_id = invoking_event['configurationItem']['resourceId']
compliance_value = evaluate_compliance(
invoking_event['configurationItem'], is_flow_logs_enabled(vpc_id))
config = boto3.client('config')
response = config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': invoking_event['configurationItem']['resourceType'],
'ComplianceResourceId': vpc_id,
'ComplianceType': compliance_value,
'OrderingTimestamp': invoking_event['configurationItem']['configurationItemCaptureTime']
},
],
ResultToken=event['resultToken'])
|
<commit_before><commit_msg>Add rule that vpc flow logs enabled<commit_after>#
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
import boto3
import json
def evaluate_compliance(config_item, is_flow_logs_enabled):
if (config_item['resourceType'] != 'AWS::EC2::VPC'):
return 'NOT_APPLICABLE'
elif is_flow_logs_enabled:
return 'COMPLIANT'
else:
return 'NON_COMPLIANT'
def is_flow_logs_enabled(vpc_id):
ec2 = boto3.client('ec2')
response = ec2.describe_flow_logs(
Filter=[
{
'Name': 'resource-id',
'Values': [
vpc_id,
]
},
],
)
if len(response[u'FlowLogs']) != 0: return True
def lambda_handler(event, context):
invoking_event = json.loads(event['invokingEvent'])
compliance_value = 'NOT_APPLICABLE'
vpc_id = invoking_event['configurationItem']['resourceId']
compliance_value = evaluate_compliance(
invoking_event['configurationItem'], is_flow_logs_enabled(vpc_id))
config = boto3.client('config')
response = config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': invoking_event['configurationItem']['resourceType'],
'ComplianceResourceId': vpc_id,
'ComplianceType': compliance_value,
'OrderingTimestamp': invoking_event['configurationItem']['configurationItemCaptureTime']
},
],
ResultToken=event['resultToken'])
|
|
b48cbb3a41372e785bb840a14880ebade4be4e67
|
py/maximum-product-of-three-numbers.py
|
py/maximum-product-of-three-numbers.py
|
from operator import mul
import heapq
class Solution(object):
def maximumProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
largest3 = heapq.nlargest(3, nums)
smallest3 = heapq.nsmallest(3, nums)
if largest3[0] <= 0:
if largest3[0] == 0:
return 0
else:
return reduce(mul, largest3)
elif largest3[1] <= 0:
if smallest3[1] < 0:
return smallest3[0] * smallest3[1] * largest3[0]
else:
return 0
elif largest3[2] <= 0:
if smallest3[1] < 0:
return smallest3[0] * smallest3[1] * largest3[0]
else:
return reduce(mul, largest3)
else:
if smallest3[1] < 0:
return max(smallest3[0] * smallest3[1] * largest3[0], reduce(mul, largest3))
else:
return reduce(mul, largest3)
|
Add py solution for 628. Maximum Product of Three Numbers
|
Add py solution for 628. Maximum Product of Three Numbers
628. Maximum Product of Three Numbers: https://leetcode.com/problems/maximum-product-of-three-numbers/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 628. Maximum Product of Three Numbers
628. Maximum Product of Three Numbers: https://leetcode.com/problems/maximum-product-of-three-numbers/
|
from operator import mul
import heapq
class Solution(object):
def maximumProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
largest3 = heapq.nlargest(3, nums)
smallest3 = heapq.nsmallest(3, nums)
if largest3[0] <= 0:
if largest3[0] == 0:
return 0
else:
return reduce(mul, largest3)
elif largest3[1] <= 0:
if smallest3[1] < 0:
return smallest3[0] * smallest3[1] * largest3[0]
else:
return 0
elif largest3[2] <= 0:
if smallest3[1] < 0:
return smallest3[0] * smallest3[1] * largest3[0]
else:
return reduce(mul, largest3)
else:
if smallest3[1] < 0:
return max(smallest3[0] * smallest3[1] * largest3[0], reduce(mul, largest3))
else:
return reduce(mul, largest3)
|
<commit_before><commit_msg>Add py solution for 628. Maximum Product of Three Numbers
628. Maximum Product of Three Numbers: https://leetcode.com/problems/maximum-product-of-three-numbers/<commit_after>
|
from operator import mul
import heapq
class Solution(object):
def maximumProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
largest3 = heapq.nlargest(3, nums)
smallest3 = heapq.nsmallest(3, nums)
if largest3[0] <= 0:
if largest3[0] == 0:
return 0
else:
return reduce(mul, largest3)
elif largest3[1] <= 0:
if smallest3[1] < 0:
return smallest3[0] * smallest3[1] * largest3[0]
else:
return 0
elif largest3[2] <= 0:
if smallest3[1] < 0:
return smallest3[0] * smallest3[1] * largest3[0]
else:
return reduce(mul, largest3)
else:
if smallest3[1] < 0:
return max(smallest3[0] * smallest3[1] * largest3[0], reduce(mul, largest3))
else:
return reduce(mul, largest3)
|
Add py solution for 628. Maximum Product of Three Numbers
628. Maximum Product of Three Numbers: https://leetcode.com/problems/maximum-product-of-three-numbers/from operator import mul
import heapq
class Solution(object):
def maximumProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
largest3 = heapq.nlargest(3, nums)
smallest3 = heapq.nsmallest(3, nums)
if largest3[0] <= 0:
if largest3[0] == 0:
return 0
else:
return reduce(mul, largest3)
elif largest3[1] <= 0:
if smallest3[1] < 0:
return smallest3[0] * smallest3[1] * largest3[0]
else:
return 0
elif largest3[2] <= 0:
if smallest3[1] < 0:
return smallest3[0] * smallest3[1] * largest3[0]
else:
return reduce(mul, largest3)
else:
if smallest3[1] < 0:
return max(smallest3[0] * smallest3[1] * largest3[0], reduce(mul, largest3))
else:
return reduce(mul, largest3)
|
<commit_before><commit_msg>Add py solution for 628. Maximum Product of Three Numbers
628. Maximum Product of Three Numbers: https://leetcode.com/problems/maximum-product-of-three-numbers/<commit_after>from operator import mul
import heapq
class Solution(object):
def maximumProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
largest3 = heapq.nlargest(3, nums)
smallest3 = heapq.nsmallest(3, nums)
if largest3[0] <= 0:
if largest3[0] == 0:
return 0
else:
return reduce(mul, largest3)
elif largest3[1] <= 0:
if smallest3[1] < 0:
return smallest3[0] * smallest3[1] * largest3[0]
else:
return 0
elif largest3[2] <= 0:
if smallest3[1] < 0:
return smallest3[0] * smallest3[1] * largest3[0]
else:
return reduce(mul, largest3)
else:
if smallest3[1] < 0:
return max(smallest3[0] * smallest3[1] * largest3[0], reduce(mul, largest3))
else:
return reduce(mul, largest3)
|
|
a5173a0a153f40b62a7f3c086de1d97b97279bd4
|
test/helper.py
|
test/helper.py
|
import os, os.path
import nose.tools as nose
import biobox_cli.container as ctn
def project_root():
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def is_ci_server():
return "CI" in os.environ.keys()
def remove_container(container):
if not is_ci_server():
ctn.remove(container)
def assert_file_not_empty(file_):
file_size = os.stat(file_).st_size
nose.assert_not_equal(file_size, 0,
"File should not be empty but is: {}".format(file_))
def assert_file_contents_equal(file_, contents):
with open(file_, 'r') as f:
nose.assert_equal(f.read(), contents)
|
import os, os.path
import nose.tools as nose
import biobox_cli.container as ctn
def project_root():
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def is_ci_server():
return "CIRCLECI" in os.environ.keys()
def remove_container(container):
if not is_ci_server():
ctn.remove(container)
def assert_file_not_empty(file_):
file_size = os.stat(file_).st_size
nose.assert_not_equal(file_size, 0,
"File should not be empty but is: {}".format(file_))
def assert_file_contents_equal(file_, contents):
with open(file_, 'r') as f:
nose.assert_equal(f.read(), contents)
|
Fix environment variable checks for removing container
|
Fix environment variable checks for removing container
|
Python
|
mit
|
michaelbarton/command-line-interface,michaelbarton/command-line-interface,bioboxes/command-line-interface,bioboxes/command-line-interface
|
import os, os.path
import nose.tools as nose
import biobox_cli.container as ctn
def project_root():
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def is_ci_server():
return "CI" in os.environ.keys()
def remove_container(container):
if not is_ci_server():
ctn.remove(container)
def assert_file_not_empty(file_):
file_size = os.stat(file_).st_size
nose.assert_not_equal(file_size, 0,
"File should not be empty but is: {}".format(file_))
def assert_file_contents_equal(file_, contents):
with open(file_, 'r') as f:
nose.assert_equal(f.read(), contents)
Fix environment variable checks for removing container
|
import os, os.path
import nose.tools as nose
import biobox_cli.container as ctn
def project_root():
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def is_ci_server():
return "CIRCLECI" in os.environ.keys()
def remove_container(container):
if not is_ci_server():
ctn.remove(container)
def assert_file_not_empty(file_):
file_size = os.stat(file_).st_size
nose.assert_not_equal(file_size, 0,
"File should not be empty but is: {}".format(file_))
def assert_file_contents_equal(file_, contents):
with open(file_, 'r') as f:
nose.assert_equal(f.read(), contents)
|
<commit_before>import os, os.path
import nose.tools as nose
import biobox_cli.container as ctn
def project_root():
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def is_ci_server():
return "CI" in os.environ.keys()
def remove_container(container):
if not is_ci_server():
ctn.remove(container)
def assert_file_not_empty(file_):
file_size = os.stat(file_).st_size
nose.assert_not_equal(file_size, 0,
"File should not be empty but is: {}".format(file_))
def assert_file_contents_equal(file_, contents):
with open(file_, 'r') as f:
nose.assert_equal(f.read(), contents)
<commit_msg>Fix environment variable checks for removing container<commit_after>
|
import os, os.path
import nose.tools as nose
import biobox_cli.container as ctn
def project_root():
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def is_ci_server():
return "CIRCLECI" in os.environ.keys()
def remove_container(container):
if not is_ci_server():
ctn.remove(container)
def assert_file_not_empty(file_):
file_size = os.stat(file_).st_size
nose.assert_not_equal(file_size, 0,
"File should not be empty but is: {}".format(file_))
def assert_file_contents_equal(file_, contents):
with open(file_, 'r') as f:
nose.assert_equal(f.read(), contents)
|
import os, os.path
import nose.tools as nose
import biobox_cli.container as ctn
def project_root():
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def is_ci_server():
return "CI" in os.environ.keys()
def remove_container(container):
if not is_ci_server():
ctn.remove(container)
def assert_file_not_empty(file_):
file_size = os.stat(file_).st_size
nose.assert_not_equal(file_size, 0,
"File should not be empty but is: {}".format(file_))
def assert_file_contents_equal(file_, contents):
with open(file_, 'r') as f:
nose.assert_equal(f.read(), contents)
Fix environment variable checks for removing containerimport os, os.path
import nose.tools as nose
import biobox_cli.container as ctn
def project_root():
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def is_ci_server():
return "CIRCLECI" in os.environ.keys()
def remove_container(container):
if not is_ci_server():
ctn.remove(container)
def assert_file_not_empty(file_):
file_size = os.stat(file_).st_size
nose.assert_not_equal(file_size, 0,
"File should not be empty but is: {}".format(file_))
def assert_file_contents_equal(file_, contents):
with open(file_, 'r') as f:
nose.assert_equal(f.read(), contents)
|
<commit_before>import os, os.path
import nose.tools as nose
import biobox_cli.container as ctn
def project_root():
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def is_ci_server():
return "CI" in os.environ.keys()
def remove_container(container):
if not is_ci_server():
ctn.remove(container)
def assert_file_not_empty(file_):
file_size = os.stat(file_).st_size
nose.assert_not_equal(file_size, 0,
"File should not be empty but is: {}".format(file_))
def assert_file_contents_equal(file_, contents):
with open(file_, 'r') as f:
nose.assert_equal(f.read(), contents)
<commit_msg>Fix environment variable checks for removing container<commit_after>import os, os.path
import nose.tools as nose
import biobox_cli.container as ctn
def project_root():
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def is_ci_server():
return "CIRCLECI" in os.environ.keys()
def remove_container(container):
if not is_ci_server():
ctn.remove(container)
def assert_file_not_empty(file_):
file_size = os.stat(file_).st_size
nose.assert_not_equal(file_size, 0,
"File should not be empty but is: {}".format(file_))
def assert_file_contents_equal(file_, contents):
with open(file_, 'r') as f:
nose.assert_equal(f.read(), contents)
|
2654bb9ee85a9c4ba4798c1a1ce40df20417380a
|
src/owncloud_rename.py
|
src/owncloud_rename.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Parse tree and find files matching owncloud forbidden characters.
Rename in place or move into a specific folder
"""
import KmdCmd
import KmdFiles
import os, re
import logging
class KmdOwncloudRename(KmdCmd.KmdCommand):
regexp = r'[\*:"?><|]+'
def extendParser(self):
super(KmdOwncloudRename, self).extendParser()
#Extend parser
self.parser.add_argument('folders', metavar='</path/to/tree>', nargs='+', help='The source tree')
self.parser.add_argument('--moveto', metavar='</path/to/folder>', nargs=1, default=None, help='Path to move bad named files')
def run(self):
owncloudre = re.compile(self.regexp)
for folder in self.args.folders :
logging.info("Running in %s", folder)
for root, dirs, files in os.walk(folder):
for i in files:
m = owncloudre.search("%s" % i)
if m is not None :
newname = owncloudre.sub('_', i)
logging.info("Renaming %s into %s", i, newname)
if self.args.doit :
origpath = os.path.join(root, i)
newpath = os.path.join(root, newname)
KmdFiles.fileMoveRename(origpath, newpath, self.args.doit)
logging.debug("Done : %s -> %s", origpath, newpath)
if __name__ == "__main__":
cmd = KmdOwncloudRename(__doc__)
cmd.run()
|
Rename files according to owncloud forbidden characters
|
Rename files according to owncloud forbidden characters
|
Python
|
mit
|
pzia/keepmydatas
|
Rename files according to owncloud forbidden characters
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Parse tree and find files matching owncloud forbidden characters.
Rename in place or move into a specific folder
"""
import KmdCmd
import KmdFiles
import os, re
import logging
class KmdOwncloudRename(KmdCmd.KmdCommand):
regexp = r'[\*:"?><|]+'
def extendParser(self):
super(KmdOwncloudRename, self).extendParser()
#Extend parser
self.parser.add_argument('folders', metavar='</path/to/tree>', nargs='+', help='The source tree')
self.parser.add_argument('--moveto', metavar='</path/to/folder>', nargs=1, default=None, help='Path to move bad named files')
def run(self):
owncloudre = re.compile(self.regexp)
for folder in self.args.folders :
logging.info("Running in %s", folder)
for root, dirs, files in os.walk(folder):
for i in files:
m = owncloudre.search("%s" % i)
if m is not None :
newname = owncloudre.sub('_', i)
logging.info("Renaming %s into %s", i, newname)
if self.args.doit :
origpath = os.path.join(root, i)
newpath = os.path.join(root, newname)
KmdFiles.fileMoveRename(origpath, newpath, self.args.doit)
logging.debug("Done : %s -> %s", origpath, newpath)
if __name__ == "__main__":
cmd = KmdOwncloudRename(__doc__)
cmd.run()
|
<commit_before><commit_msg>Rename files according to owncloud forbidden characters<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Parse tree and find files matching owncloud forbidden characters.
Rename in place or move into a specific folder
"""
import KmdCmd
import KmdFiles
import os, re
import logging
class KmdOwncloudRename(KmdCmd.KmdCommand):
regexp = r'[\*:"?><|]+'
def extendParser(self):
super(KmdOwncloudRename, self).extendParser()
#Extend parser
self.parser.add_argument('folders', metavar='</path/to/tree>', nargs='+', help='The source tree')
self.parser.add_argument('--moveto', metavar='</path/to/folder>', nargs=1, default=None, help='Path to move bad named files')
def run(self):
owncloudre = re.compile(self.regexp)
for folder in self.args.folders :
logging.info("Running in %s", folder)
for root, dirs, files in os.walk(folder):
for i in files:
m = owncloudre.search("%s" % i)
if m is not None :
newname = owncloudre.sub('_', i)
logging.info("Renaming %s into %s", i, newname)
if self.args.doit :
origpath = os.path.join(root, i)
newpath = os.path.join(root, newname)
KmdFiles.fileMoveRename(origpath, newpath, self.args.doit)
logging.debug("Done : %s -> %s", origpath, newpath)
if __name__ == "__main__":
cmd = KmdOwncloudRename(__doc__)
cmd.run()
|
Rename files according to owncloud forbidden characters#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Parse tree and find files matching owncloud forbidden characters.
Rename in place or move into a specific folder
"""
import KmdCmd
import KmdFiles
import os, re
import logging
class KmdOwncloudRename(KmdCmd.KmdCommand):
regexp = r'[\*:"?><|]+'
def extendParser(self):
super(KmdOwncloudRename, self).extendParser()
#Extend parser
self.parser.add_argument('folders', metavar='</path/to/tree>', nargs='+', help='The source tree')
self.parser.add_argument('--moveto', metavar='</path/to/folder>', nargs=1, default=None, help='Path to move bad named files')
def run(self):
owncloudre = re.compile(self.regexp)
for folder in self.args.folders :
logging.info("Running in %s", folder)
for root, dirs, files in os.walk(folder):
for i in files:
m = owncloudre.search("%s" % i)
if m is not None :
newname = owncloudre.sub('_', i)
logging.info("Renaming %s into %s", i, newname)
if self.args.doit :
origpath = os.path.join(root, i)
newpath = os.path.join(root, newname)
KmdFiles.fileMoveRename(origpath, newpath, self.args.doit)
logging.debug("Done : %s -> %s", origpath, newpath)
if __name__ == "__main__":
cmd = KmdOwncloudRename(__doc__)
cmd.run()
|
<commit_before><commit_msg>Rename files according to owncloud forbidden characters<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Parse tree and find files matching owncloud forbidden characters.
Rename in place or move into a specific folder
"""
import KmdCmd
import KmdFiles
import os, re
import logging
class KmdOwncloudRename(KmdCmd.KmdCommand):
regexp = r'[\*:"?><|]+'
def extendParser(self):
super(KmdOwncloudRename, self).extendParser()
#Extend parser
self.parser.add_argument('folders', metavar='</path/to/tree>', nargs='+', help='The source tree')
self.parser.add_argument('--moveto', metavar='</path/to/folder>', nargs=1, default=None, help='Path to move bad named files')
def run(self):
owncloudre = re.compile(self.regexp)
for folder in self.args.folders :
logging.info("Running in %s", folder)
for root, dirs, files in os.walk(folder):
for i in files:
m = owncloudre.search("%s" % i)
if m is not None :
newname = owncloudre.sub('_', i)
logging.info("Renaming %s into %s", i, newname)
if self.args.doit :
origpath = os.path.join(root, i)
newpath = os.path.join(root, newname)
KmdFiles.fileMoveRename(origpath, newpath, self.args.doit)
logging.debug("Done : %s -> %s", origpath, newpath)
if __name__ == "__main__":
cmd = KmdOwncloudRename(__doc__)
cmd.run()
|
|
cfc1d8c604ab4d472d7b8a73374dcead40c88073
|
indico/modules/core/captcha_test.py
|
indico/modules/core/captcha_test.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import pytest
from flask import session
from .captcha import _verify_captcha, generate_captcha_challenge
class _MockRandom:
@staticmethod
def choices(pop, k):
return list('6970')
@pytest.mark.filterwarnings('ignore:.*removed in Pillow 10.*:DeprecationWarning')
def test_generate_captcha_challenge(monkeypatch):
# this test exists mainly to fail in case the captcha lib isn't updated
# to stop using deprecated Pillow features by the time they remove them
# for good in in 2023
monkeypatch.setattr('indico.modules.core.captcha.random', _MockRandom)
data, answer = generate_captcha_challenge()
assert set(data) == {'image', 'audio'}
assert answer == '6970'
@pytest.mark.usefixtures('request_context')
def test_verify_captcha():
assert not _verify_captcha('')
session['captcha_state'] = '7234'
assert not _verify_captcha('')
assert not _verify_captcha('0000')
assert not _verify_captcha('4327')
assert _verify_captcha('1234')
assert _verify_captcha('7234')
|
Add test for captcha generation/validation
|
Add test for captcha generation/validation
|
Python
|
mit
|
indico/indico,indico/indico,indico/indico,indico/indico
|
Add test for captcha generation/validation
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import pytest
from flask import session
from .captcha import _verify_captcha, generate_captcha_challenge
class _MockRandom:
@staticmethod
def choices(pop, k):
return list('6970')
@pytest.mark.filterwarnings('ignore:.*removed in Pillow 10.*:DeprecationWarning')
def test_generate_captcha_challenge(monkeypatch):
# this test exists mainly to fail in case the captcha lib isn't updated
# to stop using deprecated Pillow features by the time they remove them
# for good in in 2023
monkeypatch.setattr('indico.modules.core.captcha.random', _MockRandom)
data, answer = generate_captcha_challenge()
assert set(data) == {'image', 'audio'}
assert answer == '6970'
@pytest.mark.usefixtures('request_context')
def test_verify_captcha():
assert not _verify_captcha('')
session['captcha_state'] = '7234'
assert not _verify_captcha('')
assert not _verify_captcha('0000')
assert not _verify_captcha('4327')
assert _verify_captcha('1234')
assert _verify_captcha('7234')
|
<commit_before><commit_msg>Add test for captcha generation/validation<commit_after>
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import pytest
from flask import session
from .captcha import _verify_captcha, generate_captcha_challenge
class _MockRandom:
@staticmethod
def choices(pop, k):
return list('6970')
@pytest.mark.filterwarnings('ignore:.*removed in Pillow 10.*:DeprecationWarning')
def test_generate_captcha_challenge(monkeypatch):
# this test exists mainly to fail in case the captcha lib isn't updated
# to stop using deprecated Pillow features by the time they remove them
# for good in in 2023
monkeypatch.setattr('indico.modules.core.captcha.random', _MockRandom)
data, answer = generate_captcha_challenge()
assert set(data) == {'image', 'audio'}
assert answer == '6970'
@pytest.mark.usefixtures('request_context')
def test_verify_captcha():
assert not _verify_captcha('')
session['captcha_state'] = '7234'
assert not _verify_captcha('')
assert not _verify_captcha('0000')
assert not _verify_captcha('4327')
assert _verify_captcha('1234')
assert _verify_captcha('7234')
|
Add test for captcha generation/validation# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import pytest
from flask import session
from .captcha import _verify_captcha, generate_captcha_challenge
class _MockRandom:
@staticmethod
def choices(pop, k):
return list('6970')
@pytest.mark.filterwarnings('ignore:.*removed in Pillow 10.*:DeprecationWarning')
def test_generate_captcha_challenge(monkeypatch):
# this test exists mainly to fail in case the captcha lib isn't updated
# to stop using deprecated Pillow features by the time they remove them
# for good in in 2023
monkeypatch.setattr('indico.modules.core.captcha.random', _MockRandom)
data, answer = generate_captcha_challenge()
assert set(data) == {'image', 'audio'}
assert answer == '6970'
@pytest.mark.usefixtures('request_context')
def test_verify_captcha():
assert not _verify_captcha('')
session['captcha_state'] = '7234'
assert not _verify_captcha('')
assert not _verify_captcha('0000')
assert not _verify_captcha('4327')
assert _verify_captcha('1234')
assert _verify_captcha('7234')
|
<commit_before><commit_msg>Add test for captcha generation/validation<commit_after># This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import pytest
from flask import session
from .captcha import _verify_captcha, generate_captcha_challenge
class _MockRandom:
@staticmethod
def choices(pop, k):
return list('6970')
@pytest.mark.filterwarnings('ignore:.*removed in Pillow 10.*:DeprecationWarning')
def test_generate_captcha_challenge(monkeypatch):
# this test exists mainly to fail in case the captcha lib isn't updated
# to stop using deprecated Pillow features by the time they remove them
# for good in in 2023
monkeypatch.setattr('indico.modules.core.captcha.random', _MockRandom)
data, answer = generate_captcha_challenge()
assert set(data) == {'image', 'audio'}
assert answer == '6970'
@pytest.mark.usefixtures('request_context')
def test_verify_captcha():
assert not _verify_captcha('')
session['captcha_state'] = '7234'
assert not _verify_captcha('')
assert not _verify_captcha('0000')
assert not _verify_captcha('4327')
assert _verify_captcha('1234')
assert _verify_captcha('7234')
|
|
b5f61c33c4e6cb73df5a00532191fbc38fbf3010
|
utils/verify_alerts.py
|
utils/verify_alerts.py
|
#!/usr/bin/env python
import os
import sys
from os.path import dirname, join, realpath
from optparse import OptionParser
# Get the current working directory of this file.
# http://stackoverflow.com/a/4060259/120999
__location__ = realpath(join(os.getcwd(), dirname(__file__)))
# Add the shared settings file to namespace.
sys.path.insert(0, join(__location__, '..', 'src'))
import settings
# Add the analyzer file to namespace.
sys.path.insert(0, join(__location__, '..', 'src', 'analyzer'))
from alerters import trigger_alert
parser = OptionParser()
parser.add_option("-t", "--trigger", dest="trigger", default=False,
help="Actually trigger the appropriate alerts (default is False)")
parser.add_option("-m", "--metric", dest="metric", default='skyline.horizon.queue_size',
help="Pass the metric to test (default is skyline.horizon.queue_size)")
(options, args) = parser.parse_args()
try:
alerts_enabled = settings.ENABLE_ALERTS
alerts = settings.ALERTS
except:
print "Exception: Check your settings file for the existence of ENABLE_ALERTS and ALERTS"
sys.exit()
print 'Verifying alerts for metric: "' + options.metric + '"'
# Send alerts
if alerts_enabled:
print 'Alerts Enabled'
for alert in alerts:
print 'Checking metric name contains: "' + alert[0] + '" to send via ' + alert[1]
if alert[0] in options.metric:
print '...Trigger alert via ' + alert[1]
if options.trigger:
trigger_alert(alert, options.metric)
else:
print 'Alerts are Disabled'
|
Add script to test/verify alert configuration
|
Add script to test/verify alert configuration
We're looking to build out another alerter or two, and wanted a way to test our current alert configurations as well as be able to trigger alerts as we developer new alerters.
|
Python
|
mit
|
triplekill/skyline,CDKGlobal/skyline,PaytmLabs/skyline,loggly/skyline,hcxiong/skyline,sdgdsffdsfff/skyline,CDKGlobal/skyline,hcxiong/skyline,sdgdsffdsfff/skyline,100star/skyline,etsy/skyline,triplekill/skyline,etsy/skyline,sdgdsffdsfff/skyline,PaytmLabs/skyline,MyNameIsMeerkat/skyline,100star/skyline,etsy/skyline,PaytmLabs/skyline,CDKGlobal/skyline,pombredanne/skyline,PaytmLabs/skyline,aelialper/skyline,loggly/skyline,triplekill/skyline,sdgdsffdsfff/skyline,pombredanne/skyline,loggly/skyline,aelialper/skyline,pombredanne/skyline,aelialper/skyline,pombredanne/skyline,hcxiong/skyline,triplekill/skyline,klynch/skyline,MyNameIsMeerkat/skyline,etsy/skyline,klynch/skyline,aelialper/skyline,loggly/skyline,hcxiong/skyline,CDKGlobal/skyline,klynch/skyline
|
Add script to test/verify alert configuration
We're looking to build out another alerter or two, and wanted a way to test our current alert configurations as well as be able to trigger alerts as we developer new alerters.
|
#!/usr/bin/env python
import os
import sys
from os.path import dirname, join, realpath
from optparse import OptionParser
# Get the current working directory of this file.
# http://stackoverflow.com/a/4060259/120999
__location__ = realpath(join(os.getcwd(), dirname(__file__)))
# Add the shared settings file to namespace.
sys.path.insert(0, join(__location__, '..', 'src'))
import settings
# Add the analyzer file to namespace.
sys.path.insert(0, join(__location__, '..', 'src', 'analyzer'))
from alerters import trigger_alert
parser = OptionParser()
parser.add_option("-t", "--trigger", dest="trigger", default=False,
help="Actually trigger the appropriate alerts (default is False)")
parser.add_option("-m", "--metric", dest="metric", default='skyline.horizon.queue_size',
help="Pass the metric to test (default is skyline.horizon.queue_size)")
(options, args) = parser.parse_args()
try:
alerts_enabled = settings.ENABLE_ALERTS
alerts = settings.ALERTS
except:
print "Exception: Check your settings file for the existence of ENABLE_ALERTS and ALERTS"
sys.exit()
print 'Verifying alerts for metric: "' + options.metric + '"'
# Send alerts
if alerts_enabled:
print 'Alerts Enabled'
for alert in alerts:
print 'Checking metric name contains: "' + alert[0] + '" to send via ' + alert[1]
if alert[0] in options.metric:
print '...Trigger alert via ' + alert[1]
if options.trigger:
trigger_alert(alert, options.metric)
else:
print 'Alerts are Disabled'
|
<commit_before><commit_msg>Add script to test/verify alert configuration
We're looking to build out another alerter or two, and wanted a way to test our current alert configurations as well as be able to trigger alerts as we developer new alerters.<commit_after>
|
#!/usr/bin/env python
import os
import sys
from os.path import dirname, join, realpath
from optparse import OptionParser
# Get the current working directory of this file.
# http://stackoverflow.com/a/4060259/120999
__location__ = realpath(join(os.getcwd(), dirname(__file__)))
# Add the shared settings file to namespace.
sys.path.insert(0, join(__location__, '..', 'src'))
import settings
# Add the analyzer file to namespace.
sys.path.insert(0, join(__location__, '..', 'src', 'analyzer'))
from alerters import trigger_alert
parser = OptionParser()
parser.add_option("-t", "--trigger", dest="trigger", default=False,
help="Actually trigger the appropriate alerts (default is False)")
parser.add_option("-m", "--metric", dest="metric", default='skyline.horizon.queue_size',
help="Pass the metric to test (default is skyline.horizon.queue_size)")
(options, args) = parser.parse_args()
try:
alerts_enabled = settings.ENABLE_ALERTS
alerts = settings.ALERTS
except:
print "Exception: Check your settings file for the existence of ENABLE_ALERTS and ALERTS"
sys.exit()
print 'Verifying alerts for metric: "' + options.metric + '"'
# Send alerts
if alerts_enabled:
print 'Alerts Enabled'
for alert in alerts:
print 'Checking metric name contains: "' + alert[0] + '" to send via ' + alert[1]
if alert[0] in options.metric:
print '...Trigger alert via ' + alert[1]
if options.trigger:
trigger_alert(alert, options.metric)
else:
print 'Alerts are Disabled'
|
Add script to test/verify alert configuration
We're looking to build out another alerter or two, and wanted a way to test our current alert configurations as well as be able to trigger alerts as we developer new alerters.#!/usr/bin/env python
import os
import sys
from os.path import dirname, join, realpath
from optparse import OptionParser
# Get the current working directory of this file.
# http://stackoverflow.com/a/4060259/120999
__location__ = realpath(join(os.getcwd(), dirname(__file__)))
# Add the shared settings file to namespace.
sys.path.insert(0, join(__location__, '..', 'src'))
import settings
# Add the analyzer file to namespace.
sys.path.insert(0, join(__location__, '..', 'src', 'analyzer'))
from alerters import trigger_alert
parser = OptionParser()
parser.add_option("-t", "--trigger", dest="trigger", default=False,
help="Actually trigger the appropriate alerts (default is False)")
parser.add_option("-m", "--metric", dest="metric", default='skyline.horizon.queue_size',
help="Pass the metric to test (default is skyline.horizon.queue_size)")
(options, args) = parser.parse_args()
try:
alerts_enabled = settings.ENABLE_ALERTS
alerts = settings.ALERTS
except:
print "Exception: Check your settings file for the existence of ENABLE_ALERTS and ALERTS"
sys.exit()
print 'Verifying alerts for metric: "' + options.metric + '"'
# Send alerts
if alerts_enabled:
print 'Alerts Enabled'
for alert in alerts:
print 'Checking metric name contains: "' + alert[0] + '" to send via ' + alert[1]
if alert[0] in options.metric:
print '...Trigger alert via ' + alert[1]
if options.trigger:
trigger_alert(alert, options.metric)
else:
print 'Alerts are Disabled'
|
<commit_before><commit_msg>Add script to test/verify alert configuration
We're looking to build out another alerter or two, and wanted a way to test our current alert configurations as well as be able to trigger alerts as we developer new alerters.<commit_after>#!/usr/bin/env python
import os
import sys
from os.path import dirname, join, realpath
from optparse import OptionParser
# Get the current working directory of this file.
# http://stackoverflow.com/a/4060259/120999
__location__ = realpath(join(os.getcwd(), dirname(__file__)))
# Add the shared settings file to namespace.
sys.path.insert(0, join(__location__, '..', 'src'))
import settings
# Add the analyzer file to namespace.
sys.path.insert(0, join(__location__, '..', 'src', 'analyzer'))
from alerters import trigger_alert
parser = OptionParser()
parser.add_option("-t", "--trigger", dest="trigger", default=False,
help="Actually trigger the appropriate alerts (default is False)")
parser.add_option("-m", "--metric", dest="metric", default='skyline.horizon.queue_size',
help="Pass the metric to test (default is skyline.horizon.queue_size)")
(options, args) = parser.parse_args()
try:
alerts_enabled = settings.ENABLE_ALERTS
alerts = settings.ALERTS
except:
print "Exception: Check your settings file for the existence of ENABLE_ALERTS and ALERTS"
sys.exit()
print 'Verifying alerts for metric: "' + options.metric + '"'
# Send alerts
if alerts_enabled:
print 'Alerts Enabled'
for alert in alerts:
print 'Checking metric name contains: "' + alert[0] + '" to send via ' + alert[1]
if alert[0] in options.metric:
print '...Trigger alert via ' + alert[1]
if options.trigger:
trigger_alert(alert, options.metric)
else:
print 'Alerts are Disabled'
|
|
f6b28b3f256388057ef54f72bbc39d89d61ad5a0
|
ironic/tests/unit/api/v1/test_expose.py
|
ironic/tests/unit/api/v1/test_expose.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imp
import inspect
import os
import sys
import mock
from oslo_utils import uuidutils
from ironic.tests import base as test_base
class TestExposedAPIMethodsCheckPolicy(test_base.TestCase):
"""Ensure that all exposed HTTP endpoints call authorize."""
def setUp(self):
super(TestExposedAPIMethodsCheckPolicy, self).setUp()
self.original_method = sys.modules['ironic.api.expose'].expose
self.exposed_methods = set()
def expose_and_track(*args, **kwargs):
def wrap(f):
if f not in self.exposed_methods:
self.exposed_methods.add(f)
e = self.original_method(*args, **kwargs)
return e(f)
return wrap
p = mock.patch('ironic.api.expose.expose', expose_and_track)
p.start()
self.addCleanup(p.stop)
def _test(self, module):
module_path = os.path.abspath(sys.modules[module].__file__)
# NOTE(vdrok): coverage runs on compiled .pyc files, which breaks
# load_source. Strip c and o letters from the end of the module path,
# just in case someone tries to use .pyo or .pyc for whatever reason
imp.load_source(uuidutils.generate_uuid(), module_path.rstrip('co'))
for func in self.exposed_methods:
src = inspect.getsource(func)
self.assertTrue('policy.authorize' in src,
'policy.authorize call not found in exposed '
'method %s' % func)
self.assertTrue('context.to_policy_values' in src,
'context.to_policy_values call not found in '
'exposed method %s' % func)
def test_chasis_api_policy(self):
self._test('ironic.api.controllers.v1.chassis')
def test_driver_api_policy(self):
self._test('ironic.api.controllers.v1.driver')
def test_node_api_policy(self):
self._test('ironic.api.controllers.v1.node')
def test_port_api_policy(self):
self._test('ironic.api.controllers.v1.port')
def test_portgroup_api_policy(self):
self._test('ironic.api.controllers.v1.portgroup')
def test_ramdisk_api_policy(self):
self._test('ironic.api.controllers.v1.ramdisk')
|
Add test to ensure policy is always authorized
|
Add test to ensure policy is always authorized
This adds new unit tests to ensure that all API methods decorated
with @expose.expose are also calling policy.authorize() and
context.to_policy_values() within the method body.
This is done by patching sys.modules to replace the @expose decorator
with a wrapper that records a reference to the exposed method. Then,
all API modules' (chassis, node, port, etc.) sources are reloaded
within the unit test, causing the test classes own decorator to be
called, and references to every @expose'd method to be recorded in the
test class instance. The test case then iterates over that list of
function references, and inspects the python source of each one to
determine if it contains a call to the policy.authorize and
context.to_policy_values methods. An error is raised if it does not.
This approach is rather brutish, but without being able to invoke
@authorize as a decorator (*) I have not found a better way than
inspecting the source.
(*) we can't invoke @authorize as a decorator because decorators are
evaluated at module compile time, but @authorize requires run-time
context available only from the Pecan run-time magic accessor
pecan.request.context.
Co-Authored-By: Vladyslav Drok <488302dd246f7368b4b820d34501759efce48f70@mirantis.com>
Change-Id: Iebfd9183dcbd49dbef78398c07327a347a41976e
|
Python
|
apache-2.0
|
openstack/ironic,pshchelo/ironic,NaohiroTamura/ironic,SauloAislan/ironic,NaohiroTamura/ironic,pshchelo/ironic,openstack/ironic,SauloAislan/ironic
|
Add test to ensure policy is always authorized
This adds new unit tests to ensure that all API methods decorated
with @expose.expose are also calling policy.authorize() and
context.to_policy_values() within the method body.
This is done by patching sys.modules to replace the @expose decorator
with a wrapper that records a reference to the exposed method. Then,
all API modules' (chassis, node, port, etc.) sources are reloaded
within the unit test, causing the test classes own decorator to be
called, and references to every @expose'd method to be recorded in the
test class instance. The test case then iterates over that list of
function references, and inspects the python source of each one to
determine if it contains a call to the policy.authorize and
context.to_policy_values methods. An error is raised if it does not.
This approach is rather brutish, but without being able to invoke
@authorize as a decorator (*) I have not found a better way than
inspecting the source.
(*) we can't invoke @authorize as a decorator because decorators are
evaluated at module compile time, but @authorize requires run-time
context available only from the Pecan run-time magic accessor
pecan.request.context.
Co-Authored-By: Vladyslav Drok <488302dd246f7368b4b820d34501759efce48f70@mirantis.com>
Change-Id: Iebfd9183dcbd49dbef78398c07327a347a41976e
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imp
import inspect
import os
import sys
import mock
from oslo_utils import uuidutils
from ironic.tests import base as test_base
class TestExposedAPIMethodsCheckPolicy(test_base.TestCase):
"""Ensure that all exposed HTTP endpoints call authorize."""
def setUp(self):
super(TestExposedAPIMethodsCheckPolicy, self).setUp()
self.original_method = sys.modules['ironic.api.expose'].expose
self.exposed_methods = set()
def expose_and_track(*args, **kwargs):
def wrap(f):
if f not in self.exposed_methods:
self.exposed_methods.add(f)
e = self.original_method(*args, **kwargs)
return e(f)
return wrap
p = mock.patch('ironic.api.expose.expose', expose_and_track)
p.start()
self.addCleanup(p.stop)
def _test(self, module):
module_path = os.path.abspath(sys.modules[module].__file__)
# NOTE(vdrok): coverage runs on compiled .pyc files, which breaks
# load_source. Strip c and o letters from the end of the module path,
# just in case someone tries to use .pyo or .pyc for whatever reason
imp.load_source(uuidutils.generate_uuid(), module_path.rstrip('co'))
for func in self.exposed_methods:
src = inspect.getsource(func)
self.assertTrue('policy.authorize' in src,
'policy.authorize call not found in exposed '
'method %s' % func)
self.assertTrue('context.to_policy_values' in src,
'context.to_policy_values call not found in '
'exposed method %s' % func)
def test_chasis_api_policy(self):
self._test('ironic.api.controllers.v1.chassis')
def test_driver_api_policy(self):
self._test('ironic.api.controllers.v1.driver')
def test_node_api_policy(self):
self._test('ironic.api.controllers.v1.node')
def test_port_api_policy(self):
self._test('ironic.api.controllers.v1.port')
def test_portgroup_api_policy(self):
self._test('ironic.api.controllers.v1.portgroup')
def test_ramdisk_api_policy(self):
self._test('ironic.api.controllers.v1.ramdisk')
|
<commit_before><commit_msg>Add test to ensure policy is always authorized
This adds new unit tests to ensure that all API methods decorated
with @expose.expose are also calling policy.authorize() and
context.to_policy_values() within the method body.
This is done by patching sys.modules to replace the @expose decorator
with a wrapper that records a reference to the exposed method. Then,
all API modules' (chassis, node, port, etc.) sources are reloaded
within the unit test, causing the test classes own decorator to be
called, and references to every @expose'd method to be recorded in the
test class instance. The test case then iterates over that list of
function references, and inspects the python source of each one to
determine if it contains a call to the policy.authorize and
context.to_policy_values methods. An error is raised if it does not.
This approach is rather brutish, but without being able to invoke
@authorize as a decorator (*) I have not found a better way than
inspecting the source.
(*) we can't invoke @authorize as a decorator because decorators are
evaluated at module compile time, but @authorize requires run-time
context available only from the Pecan run-time magic accessor
pecan.request.context.
Co-Authored-By: Vladyslav Drok <488302dd246f7368b4b820d34501759efce48f70@mirantis.com>
Change-Id: Iebfd9183dcbd49dbef78398c07327a347a41976e<commit_after>
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imp
import inspect
import os
import sys
import mock
from oslo_utils import uuidutils
from ironic.tests import base as test_base
class TestExposedAPIMethodsCheckPolicy(test_base.TestCase):
"""Ensure that all exposed HTTP endpoints call authorize."""
def setUp(self):
super(TestExposedAPIMethodsCheckPolicy, self).setUp()
self.original_method = sys.modules['ironic.api.expose'].expose
self.exposed_methods = set()
def expose_and_track(*args, **kwargs):
def wrap(f):
if f not in self.exposed_methods:
self.exposed_methods.add(f)
e = self.original_method(*args, **kwargs)
return e(f)
return wrap
p = mock.patch('ironic.api.expose.expose', expose_and_track)
p.start()
self.addCleanup(p.stop)
def _test(self, module):
module_path = os.path.abspath(sys.modules[module].__file__)
# NOTE(vdrok): coverage runs on compiled .pyc files, which breaks
# load_source. Strip c and o letters from the end of the module path,
# just in case someone tries to use .pyo or .pyc for whatever reason
imp.load_source(uuidutils.generate_uuid(), module_path.rstrip('co'))
for func in self.exposed_methods:
src = inspect.getsource(func)
self.assertTrue('policy.authorize' in src,
'policy.authorize call not found in exposed '
'method %s' % func)
self.assertTrue('context.to_policy_values' in src,
'context.to_policy_values call not found in '
'exposed method %s' % func)
def test_chasis_api_policy(self):
self._test('ironic.api.controllers.v1.chassis')
def test_driver_api_policy(self):
self._test('ironic.api.controllers.v1.driver')
def test_node_api_policy(self):
self._test('ironic.api.controllers.v1.node')
def test_port_api_policy(self):
self._test('ironic.api.controllers.v1.port')
def test_portgroup_api_policy(self):
self._test('ironic.api.controllers.v1.portgroup')
def test_ramdisk_api_policy(self):
self._test('ironic.api.controllers.v1.ramdisk')
|
Add test to ensure policy is always authorized
This adds new unit tests to ensure that all API methods decorated
with @expose.expose are also calling policy.authorize() and
context.to_policy_values() within the method body.
This is done by patching sys.modules to replace the @expose decorator
with a wrapper that records a reference to the exposed method. Then,
all API modules' (chassis, node, port, etc.) sources are reloaded
within the unit test, causing the test classes own decorator to be
called, and references to every @expose'd method to be recorded in the
test class instance. The test case then iterates over that list of
function references, and inspects the python source of each one to
determine if it contains a call to the policy.authorize and
context.to_policy_values methods. An error is raised if it does not.
This approach is rather brutish, but without being able to invoke
@authorize as a decorator (*) I have not found a better way than
inspecting the source.
(*) we can't invoke @authorize as a decorator because decorators are
evaluated at module compile time, but @authorize requires run-time
context available only from the Pecan run-time magic accessor
pecan.request.context.
Co-Authored-By: Vladyslav Drok <488302dd246f7368b4b820d34501759efce48f70@mirantis.com>
Change-Id: Iebfd9183dcbd49dbef78398c07327a347a41976e# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imp
import inspect
import os
import sys
import mock
from oslo_utils import uuidutils
from ironic.tests import base as test_base
class TestExposedAPIMethodsCheckPolicy(test_base.TestCase):
"""Ensure that all exposed HTTP endpoints call authorize."""
def setUp(self):
super(TestExposedAPIMethodsCheckPolicy, self).setUp()
self.original_method = sys.modules['ironic.api.expose'].expose
self.exposed_methods = set()
def expose_and_track(*args, **kwargs):
def wrap(f):
if f not in self.exposed_methods:
self.exposed_methods.add(f)
e = self.original_method(*args, **kwargs)
return e(f)
return wrap
p = mock.patch('ironic.api.expose.expose', expose_and_track)
p.start()
self.addCleanup(p.stop)
def _test(self, module):
module_path = os.path.abspath(sys.modules[module].__file__)
# NOTE(vdrok): coverage runs on compiled .pyc files, which breaks
# load_source. Strip c and o letters from the end of the module path,
# just in case someone tries to use .pyo or .pyc for whatever reason
imp.load_source(uuidutils.generate_uuid(), module_path.rstrip('co'))
for func in self.exposed_methods:
src = inspect.getsource(func)
self.assertTrue('policy.authorize' in src,
'policy.authorize call not found in exposed '
'method %s' % func)
self.assertTrue('context.to_policy_values' in src,
'context.to_policy_values call not found in '
'exposed method %s' % func)
def test_chasis_api_policy(self):
self._test('ironic.api.controllers.v1.chassis')
def test_driver_api_policy(self):
self._test('ironic.api.controllers.v1.driver')
def test_node_api_policy(self):
self._test('ironic.api.controllers.v1.node')
def test_port_api_policy(self):
self._test('ironic.api.controllers.v1.port')
def test_portgroup_api_policy(self):
self._test('ironic.api.controllers.v1.portgroup')
def test_ramdisk_api_policy(self):
self._test('ironic.api.controllers.v1.ramdisk')
|
<commit_before><commit_msg>Add test to ensure policy is always authorized
This adds new unit tests to ensure that all API methods decorated
with @expose.expose are also calling policy.authorize() and
context.to_policy_values() within the method body.
This is done by patching sys.modules to replace the @expose decorator
with a wrapper that records a reference to the exposed method. Then,
all API modules' (chassis, node, port, etc.) sources are reloaded
within the unit test, causing the test classes own decorator to be
called, and references to every @expose'd method to be recorded in the
test class instance. The test case then iterates over that list of
function references, and inspects the python source of each one to
determine if it contains a call to the policy.authorize and
context.to_policy_values methods. An error is raised if it does not.
This approach is rather brutish, but without being able to invoke
@authorize as a decorator (*) I have not found a better way than
inspecting the source.
(*) we can't invoke @authorize as a decorator because decorators are
evaluated at module compile time, but @authorize requires run-time
context available only from the Pecan run-time magic accessor
pecan.request.context.
Co-Authored-By: Vladyslav Drok <488302dd246f7368b4b820d34501759efce48f70@mirantis.com>
Change-Id: Iebfd9183dcbd49dbef78398c07327a347a41976e<commit_after># All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imp
import inspect
import os
import sys
import mock
from oslo_utils import uuidutils
from ironic.tests import base as test_base
class TestExposedAPIMethodsCheckPolicy(test_base.TestCase):
"""Ensure that all exposed HTTP endpoints call authorize."""
def setUp(self):
super(TestExposedAPIMethodsCheckPolicy, self).setUp()
self.original_method = sys.modules['ironic.api.expose'].expose
self.exposed_methods = set()
def expose_and_track(*args, **kwargs):
def wrap(f):
if f not in self.exposed_methods:
self.exposed_methods.add(f)
e = self.original_method(*args, **kwargs)
return e(f)
return wrap
p = mock.patch('ironic.api.expose.expose', expose_and_track)
p.start()
self.addCleanup(p.stop)
def _test(self, module):
module_path = os.path.abspath(sys.modules[module].__file__)
# NOTE(vdrok): coverage runs on compiled .pyc files, which breaks
# load_source. Strip c and o letters from the end of the module path,
# just in case someone tries to use .pyo or .pyc for whatever reason
imp.load_source(uuidutils.generate_uuid(), module_path.rstrip('co'))
for func in self.exposed_methods:
src = inspect.getsource(func)
self.assertTrue('policy.authorize' in src,
'policy.authorize call not found in exposed '
'method %s' % func)
self.assertTrue('context.to_policy_values' in src,
'context.to_policy_values call not found in '
'exposed method %s' % func)
def test_chasis_api_policy(self):
self._test('ironic.api.controllers.v1.chassis')
def test_driver_api_policy(self):
self._test('ironic.api.controllers.v1.driver')
def test_node_api_policy(self):
self._test('ironic.api.controllers.v1.node')
def test_port_api_policy(self):
self._test('ironic.api.controllers.v1.port')
def test_portgroup_api_policy(self):
self._test('ironic.api.controllers.v1.portgroup')
def test_ramdisk_api_policy(self):
self._test('ironic.api.controllers.v1.ramdisk')
|
|
127087eef72df5f052638bf8a770728800c15fb1
|
lib/bridgedb/test/test_Tests.py
|
lib/bridgedb/test/test_Tests.py
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Class wrappers to adapt BridgeDB old unittests in :mod:`bridgedb.Tests` to
be compatible with the newer :mod:`twisted.trial` unittests in this directory.
"""
from __future__ import print_function
import logging
import warnings
from twisted.trial import unittest
from bridgedb import Tests
from bridgedb.Tests import EmailBridgeDistTests
from bridgedb.Tests import IPBridgeDistTests
from bridgedb.Tests import DictStorageTests
from bridgedb.Tests import SQLStorageTests
from bridgedb.Tests import ParseDescFileTests
from bridgedb.Tests import BridgeStabilityTests
logging.warnings.filterwarnings('ignore', module="Tests")
pyunit = __import__('unittest')
class TrialAdaptedOldUnittests(unittest.TestCase):
"""A wrapper around :mod:`bridgedb.Tests` to produce :mod:`~twisted.trial`
compatible output.
"""
def test_allOldUnittests(self):
testSuite = Tests.testSuite()
testResult = pyunit.TestResult()
testSuite.run(testResult, debug=True)
return unittest.PyUnitResultAdapter(testResult)
|
Add trial runner/report class adapter for old unittests.
|
Add trial runner/report class adapter for old unittests.
This adds a new unittest file, `lib/bridgedb/test/test_Tests.py`, which
runs the old unittests in `lib/bridgedb/Tests.py` with a unittest report
adapter that is compatible with twisted.trial. It uses the underlying
SynchronousTestCase wrapper for the stdlib `unittest.TestCase` (which
the tests in `lib/bridgedb/Tests.py` use).
* ADDS a trial runner for BridgeDB's old unittests.
* FIXES #9873
|
Python
|
bsd-3-clause
|
wfn/bridgedb,mmaker/bridgedb,pagea/bridgedb,pagea/bridgedb,wfn/bridgedb,mmaker/bridgedb
|
Add trial runner/report class adapter for old unittests.
This adds a new unittest file, `lib/bridgedb/test/test_Tests.py`, which
runs the old unittests in `lib/bridgedb/Tests.py` with a unittest report
adapter that is compatible with twisted.trial. It uses the underlying
SynchronousTestCase wrapper for the stdlib `unittest.TestCase` (which
the tests in `lib/bridgedb/Tests.py` use).
* ADDS a trial runner for BridgeDB's old unittests.
* FIXES #9873
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Class wrappers to adapt BridgeDB old unittests in :mod:`bridgedb.Tests` to
be compatible with the newer :mod:`twisted.trial` unittests in this directory.
"""
from __future__ import print_function
import logging
import warnings
from twisted.trial import unittest
from bridgedb import Tests
from bridgedb.Tests import EmailBridgeDistTests
from bridgedb.Tests import IPBridgeDistTests
from bridgedb.Tests import DictStorageTests
from bridgedb.Tests import SQLStorageTests
from bridgedb.Tests import ParseDescFileTests
from bridgedb.Tests import BridgeStabilityTests
logging.warnings.filterwarnings('ignore', module="Tests")
pyunit = __import__('unittest')
class TrialAdaptedOldUnittests(unittest.TestCase):
"""A wrapper around :mod:`bridgedb.Tests` to produce :mod:`~twisted.trial`
compatible output.
"""
def test_allOldUnittests(self):
testSuite = Tests.testSuite()
testResult = pyunit.TestResult()
testSuite.run(testResult, debug=True)
return unittest.PyUnitResultAdapter(testResult)
|
<commit_before><commit_msg>Add trial runner/report class adapter for old unittests.
This adds a new unittest file, `lib/bridgedb/test/test_Tests.py`, which
runs the old unittests in `lib/bridgedb/Tests.py` with a unittest report
adapter that is compatible with twisted.trial. It uses the underlying
SynchronousTestCase wrapper for the stdlib `unittest.TestCase` (which
the tests in `lib/bridgedb/Tests.py` use).
* ADDS a trial runner for BridgeDB's old unittests.
* FIXES #9873<commit_after>
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Class wrappers to adapt BridgeDB old unittests in :mod:`bridgedb.Tests` to
be compatible with the newer :mod:`twisted.trial` unittests in this directory.
"""
from __future__ import print_function
import logging
import warnings
from twisted.trial import unittest
from bridgedb import Tests
from bridgedb.Tests import EmailBridgeDistTests
from bridgedb.Tests import IPBridgeDistTests
from bridgedb.Tests import DictStorageTests
from bridgedb.Tests import SQLStorageTests
from bridgedb.Tests import ParseDescFileTests
from bridgedb.Tests import BridgeStabilityTests
logging.warnings.filterwarnings('ignore', module="Tests")
pyunit = __import__('unittest')
class TrialAdaptedOldUnittests(unittest.TestCase):
"""A wrapper around :mod:`bridgedb.Tests` to produce :mod:`~twisted.trial`
compatible output.
"""
def test_allOldUnittests(self):
testSuite = Tests.testSuite()
testResult = pyunit.TestResult()
testSuite.run(testResult, debug=True)
return unittest.PyUnitResultAdapter(testResult)
|
Add trial runner/report class adapter for old unittests.
This adds a new unittest file, `lib/bridgedb/test/test_Tests.py`, which
runs the old unittests in `lib/bridgedb/Tests.py` with a unittest report
adapter that is compatible with twisted.trial. It uses the underlying
SynchronousTestCase wrapper for the stdlib `unittest.TestCase` (which
the tests in `lib/bridgedb/Tests.py` use).
* ADDS a trial runner for BridgeDB's old unittests.
* FIXES #9873# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Class wrappers to adapt BridgeDB old unittests in :mod:`bridgedb.Tests` to
be compatible with the newer :mod:`twisted.trial` unittests in this directory.
"""
from __future__ import print_function
import logging
import warnings
from twisted.trial import unittest
from bridgedb import Tests
from bridgedb.Tests import EmailBridgeDistTests
from bridgedb.Tests import IPBridgeDistTests
from bridgedb.Tests import DictStorageTests
from bridgedb.Tests import SQLStorageTests
from bridgedb.Tests import ParseDescFileTests
from bridgedb.Tests import BridgeStabilityTests
logging.warnings.filterwarnings('ignore', module="Tests")
pyunit = __import__('unittest')
class TrialAdaptedOldUnittests(unittest.TestCase):
"""A wrapper around :mod:`bridgedb.Tests` to produce :mod:`~twisted.trial`
compatible output.
"""
def test_allOldUnittests(self):
testSuite = Tests.testSuite()
testResult = pyunit.TestResult()
testSuite.run(testResult, debug=True)
return unittest.PyUnitResultAdapter(testResult)
|
<commit_before><commit_msg>Add trial runner/report class adapter for old unittests.
This adds a new unittest file, `lib/bridgedb/test/test_Tests.py`, which
runs the old unittests in `lib/bridgedb/Tests.py` with a unittest report
adapter that is compatible with twisted.trial. It uses the underlying
SynchronousTestCase wrapper for the stdlib `unittest.TestCase` (which
the tests in `lib/bridgedb/Tests.py` use).
* ADDS a trial runner for BridgeDB's old unittests.
* FIXES #9873<commit_after># -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Class wrappers to adapt BridgeDB old unittests in :mod:`bridgedb.Tests` to
be compatible with the newer :mod:`twisted.trial` unittests in this directory.
"""
from __future__ import print_function
import logging
import warnings
from twisted.trial import unittest
from bridgedb import Tests
from bridgedb.Tests import EmailBridgeDistTests
from bridgedb.Tests import IPBridgeDistTests
from bridgedb.Tests import DictStorageTests
from bridgedb.Tests import SQLStorageTests
from bridgedb.Tests import ParseDescFileTests
from bridgedb.Tests import BridgeStabilityTests
logging.warnings.filterwarnings('ignore', module="Tests")
pyunit = __import__('unittest')
class TrialAdaptedOldUnittests(unittest.TestCase):
"""A wrapper around :mod:`bridgedb.Tests` to produce :mod:`~twisted.trial`
compatible output.
"""
def test_allOldUnittests(self):
testSuite = Tests.testSuite()
testResult = pyunit.TestResult()
testSuite.run(testResult, debug=True)
return unittest.PyUnitResultAdapter(testResult)
|
|
546df2580036206eaa9a9bbc3d45c8004ad0825e
|
run.py
|
run.py
|
#!/usr/bin/env python
import sys
import os
from app.main import app
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
app.debug = True
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, threaded=True)
|
Add separate script for starting app
|
Add separate script for starting app
|
Python
|
mit
|
mpolden/jarvis2,mpolden/jarvis2,Foxboron/Frank,martinp/jarvis2,martinp/jarvis2,mpolden/jarvis2,Foxboron/Frank,Foxboron/Frank,martinp/jarvis2
|
Add separate script for starting app
|
#!/usr/bin/env python
import sys
import os
from app.main import app
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
app.debug = True
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, threaded=True)
|
<commit_before><commit_msg>Add separate script for starting app<commit_after>
|
#!/usr/bin/env python
import sys
import os
from app.main import app
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
app.debug = True
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, threaded=True)
|
Add separate script for starting app#!/usr/bin/env python
import sys
import os
from app.main import app
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
app.debug = True
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, threaded=True)
|
<commit_before><commit_msg>Add separate script for starting app<commit_after>#!/usr/bin/env python
import sys
import os
from app.main import app
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
app.debug = True
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, threaded=True)
|
|
63a70d847073accf679c215e3bf3296c1b107dca
|
cmsplugin_filer_utils/__init__.py
|
cmsplugin_filer_utils/__init__.py
|
# -*- coding: utf-8 -*-
from django.db import models
class FilerPluginManager(models.Manager):
def __init__(self, select_related=None):
self._select_related = select_related
super(FilerPluginManager, self).__init__()
def get_query_set(self):
qs = super(FilerPluginManager, self).get_query_set()
if self._select_related:
qs = qs.select_related(*self._select_related)
return qs
|
# -*- coding: utf-8 -*-
from django.db import models
class FilerPluginManager(models.Manager):
def __init__(self, select_related=None):
self._select_related = select_related
super(FilerPluginManager, self).__init__()
def get_query_set(self):
qs = super(FilerPluginManager, self).get_query_set()
if self._select_related:
qs = qs.prefetch_related(*self._select_related)
return qs
|
Use prefetch related to trigger polymorphic downcast.
|
Use prefetch related to trigger polymorphic downcast.
|
Python
|
bsd-3-clause
|
divio/cmsplugin-filer,creimers/cmsplugin-filer,NB-Dev/cmsplugin-filer,brightinteractive/cmsplugin-filer,yvess/cmsplugin-filer,nephila/cmsplugin-filer,stefanfoulis/cmsplugin-filer,centralniak/cmsplugin-filer,skirsdeda/cmsplugin-filer,brightinteractive/cmsplugin-filer,jrutila/cmsplugin-filer,alsoicode/cmsplugin-filer,jrutila/cmsplugin-filer,jschneier/cmsplugin-filer,NB-Dev/cmsplugin-filer,alsoicode/cmsplugin-filer,ImaginaryLandscape/cmsplugin-filer,wlanslovenija/cmsplugin-filer,yvess/cmsplugin-filer,yvess/cmsplugin-filer,divio/cmsplugin-filer,jschneier/cmsplugin-filer,brightinteractive/cmsplugin-filer,yvess/cmsplugin-filer,alsoicode/cmsplugin-filer,skirsdeda/cmsplugin-filer,divio/cmsplugin-filer,nephila/cmsplugin-filer,wlanslovenija/cmsplugin-filer,creimers/cmsplugin-filer,nephila/cmsplugin-filer,wlanslovenija/cmsplugin-filer,divio/cmsplugin-filer,sephii/cmsplugin-filer,ImaginaryLandscape/cmsplugin-filer,skirsdeda/cmsplugin-filer,jschneier/cmsplugin-filer,douwevandermeij/cmsplugin-filer,creimers/cmsplugin-filer,stefanfoulis/cmsplugin-filer,stefanfoulis/cmsplugin-filer,centralniak/cmsplugin-filer,sephii/cmsplugin-filer,sephii/cmsplugin-filer,douwevandermeij/cmsplugin-filer,stefanfoulis/cmsplugin-filer,douwevandermeij/cmsplugin-filer
|
# -*- coding: utf-8 -*-
from django.db import models
class FilerPluginManager(models.Manager):
def __init__(self, select_related=None):
self._select_related = select_related
super(FilerPluginManager, self).__init__()
def get_query_set(self):
qs = super(FilerPluginManager, self).get_query_set()
if self._select_related:
qs = qs.select_related(*self._select_related)
return qs
Use prefetch related to trigger polymorphic downcast.
|
# -*- coding: utf-8 -*-
from django.db import models
class FilerPluginManager(models.Manager):
def __init__(self, select_related=None):
self._select_related = select_related
super(FilerPluginManager, self).__init__()
def get_query_set(self):
qs = super(FilerPluginManager, self).get_query_set()
if self._select_related:
qs = qs.prefetch_related(*self._select_related)
return qs
|
<commit_before># -*- coding: utf-8 -*-
from django.db import models
class FilerPluginManager(models.Manager):
def __init__(self, select_related=None):
self._select_related = select_related
super(FilerPluginManager, self).__init__()
def get_query_set(self):
qs = super(FilerPluginManager, self).get_query_set()
if self._select_related:
qs = qs.select_related(*self._select_related)
return qs
<commit_msg>Use prefetch related to trigger polymorphic downcast.<commit_after>
|
# -*- coding: utf-8 -*-
from django.db import models
class FilerPluginManager(models.Manager):
def __init__(self, select_related=None):
self._select_related = select_related
super(FilerPluginManager, self).__init__()
def get_query_set(self):
qs = super(FilerPluginManager, self).get_query_set()
if self._select_related:
qs = qs.prefetch_related(*self._select_related)
return qs
|
# -*- coding: utf-8 -*-
from django.db import models
class FilerPluginManager(models.Manager):
def __init__(self, select_related=None):
self._select_related = select_related
super(FilerPluginManager, self).__init__()
def get_query_set(self):
qs = super(FilerPluginManager, self).get_query_set()
if self._select_related:
qs = qs.select_related(*self._select_related)
return qs
Use prefetch related to trigger polymorphic downcast.# -*- coding: utf-8 -*-
from django.db import models
class FilerPluginManager(models.Manager):
def __init__(self, select_related=None):
self._select_related = select_related
super(FilerPluginManager, self).__init__()
def get_query_set(self):
qs = super(FilerPluginManager, self).get_query_set()
if self._select_related:
qs = qs.prefetch_related(*self._select_related)
return qs
|
<commit_before># -*- coding: utf-8 -*-
from django.db import models
class FilerPluginManager(models.Manager):
def __init__(self, select_related=None):
self._select_related = select_related
super(FilerPluginManager, self).__init__()
def get_query_set(self):
qs = super(FilerPluginManager, self).get_query_set()
if self._select_related:
qs = qs.select_related(*self._select_related)
return qs
<commit_msg>Use prefetch related to trigger polymorphic downcast.<commit_after># -*- coding: utf-8 -*-
from django.db import models
class FilerPluginManager(models.Manager):
def __init__(self, select_related=None):
self._select_related = select_related
super(FilerPluginManager, self).__init__()
def get_query_set(self):
qs = super(FilerPluginManager, self).get_query_set()
if self._select_related:
qs = qs.prefetch_related(*self._select_related)
return qs
|
628907dc438f80f9f587c612fa756f2ffbb0eaaf
|
src/account.py
|
src/account.py
|
#!/usr/bin/env python3
import sqlite3
# Connexion database
database = "../data/storage.sq3"
connexion = sqlite3.connect(database)
cursor = connexion.cursor()
for row in cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='users' AND name='password';"):
print(row)
if ("users" not in row):
cursor.execute("CREATE TABLE users(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE, nickname VARCHAR(55), fullname VARCHAR(128), password VARCHAR(255), email VARCHAR(128))")
elif ("password" not in row):
cursor.execute("CREATE TABLE password(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE, host VARCHAR(255), user_id INTEGER, password VARCHAR(255), dt datetime DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(user_id) REFERENCES users(id));")
connexion.commit()
connexion.close()
|
Connect to database and create if tables not exists
|
Connect to database and create if tables not exists
|
Python
|
mit
|
cboin/becon
|
Connect to database and create if tables not exists
|
#!/usr/bin/env python3
import sqlite3
# Connexion database
database = "../data/storage.sq3"
connexion = sqlite3.connect(database)
cursor = connexion.cursor()
for row in cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='users' AND name='password';"):
print(row)
if ("users" not in row):
cursor.execute("CREATE TABLE users(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE, nickname VARCHAR(55), fullname VARCHAR(128), password VARCHAR(255), email VARCHAR(128))")
elif ("password" not in row):
cursor.execute("CREATE TABLE password(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE, host VARCHAR(255), user_id INTEGER, password VARCHAR(255), dt datetime DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(user_id) REFERENCES users(id));")
connexion.commit()
connexion.close()
|
<commit_before><commit_msg>Connect to database and create if tables not exists<commit_after>
|
#!/usr/bin/env python3
import sqlite3
# Connexion database
database = "../data/storage.sq3"
connexion = sqlite3.connect(database)
cursor = connexion.cursor()
for row in cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='users' AND name='password';"):
print(row)
if ("users" not in row):
cursor.execute("CREATE TABLE users(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE, nickname VARCHAR(55), fullname VARCHAR(128), password VARCHAR(255), email VARCHAR(128))")
elif ("password" not in row):
cursor.execute("CREATE TABLE password(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE, host VARCHAR(255), user_id INTEGER, password VARCHAR(255), dt datetime DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(user_id) REFERENCES users(id));")
connexion.commit()
connexion.close()
|
Connect to database and create if tables not exists#!/usr/bin/env python3
import sqlite3
# Connexion database
database = "../data/storage.sq3"
connexion = sqlite3.connect(database)
cursor = connexion.cursor()
for row in cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='users' AND name='password';"):
print(row)
if ("users" not in row):
cursor.execute("CREATE TABLE users(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE, nickname VARCHAR(55), fullname VARCHAR(128), password VARCHAR(255), email VARCHAR(128))")
elif ("password" not in row):
cursor.execute("CREATE TABLE password(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE, host VARCHAR(255), user_id INTEGER, password VARCHAR(255), dt datetime DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(user_id) REFERENCES users(id));")
connexion.commit()
connexion.close()
|
<commit_before><commit_msg>Connect to database and create if tables not exists<commit_after>#!/usr/bin/env python3
import sqlite3
# Connexion database
database = "../data/storage.sq3"
connexion = sqlite3.connect(database)
cursor = connexion.cursor()
for row in cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='users' AND name='password';"):
print(row)
if ("users" not in row):
cursor.execute("CREATE TABLE users(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE, nickname VARCHAR(55), fullname VARCHAR(128), password VARCHAR(255), email VARCHAR(128))")
elif ("password" not in row):
cursor.execute("CREATE TABLE password(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE, host VARCHAR(255), user_id INTEGER, password VARCHAR(255), dt datetime DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(user_id) REFERENCES users(id));")
connexion.commit()
connexion.close()
|
|
adf12bd0561fb92705346f09ec6740c664fd656f
|
misc/automata/main.py
|
misc/automata/main.py
|
data = "00000000000000000000100000000000000000000"
str2idx = {
"111": 0,
"110": 1,
"101": 2,
"100": 3,
"011": 4,
"010": 5,
"001": 6,
"000": 7
}
def step(data, rule):
newdata = ""
newdata += rule[str2idx["0" + data[:2]]]
for i in range(1,len(data)-1):
newdata += rule[str2idx[data[i-1:i+2]]]
newdata += rule[str2idx[data[-2:] + "0"]]
return newdata
if __name__ == "__main__":
for _ in range(17):
print(data)
data = step(data, "01011010")
|
Add simple wolfram-style cellular automata
|
Add simple wolfram-style cellular automata
|
Python
|
mit
|
WesleyAC/toybox,WesleyAC/toybox,WesleyAC/toybox,WesleyAC/toybox,WesleyAC/toybox
|
Add simple wolfram-style cellular automata
|
data = "00000000000000000000100000000000000000000"
str2idx = {
"111": 0,
"110": 1,
"101": 2,
"100": 3,
"011": 4,
"010": 5,
"001": 6,
"000": 7
}
def step(data, rule):
newdata = ""
newdata += rule[str2idx["0" + data[:2]]]
for i in range(1,len(data)-1):
newdata += rule[str2idx[data[i-1:i+2]]]
newdata += rule[str2idx[data[-2:] + "0"]]
return newdata
if __name__ == "__main__":
for _ in range(17):
print(data)
data = step(data, "01011010")
|
<commit_before><commit_msg>Add simple wolfram-style cellular automata<commit_after>
|
data = "00000000000000000000100000000000000000000"
str2idx = {
"111": 0,
"110": 1,
"101": 2,
"100": 3,
"011": 4,
"010": 5,
"001": 6,
"000": 7
}
def step(data, rule):
newdata = ""
newdata += rule[str2idx["0" + data[:2]]]
for i in range(1,len(data)-1):
newdata += rule[str2idx[data[i-1:i+2]]]
newdata += rule[str2idx[data[-2:] + "0"]]
return newdata
if __name__ == "__main__":
for _ in range(17):
print(data)
data = step(data, "01011010")
|
Add simple wolfram-style cellular automatadata = "00000000000000000000100000000000000000000"
str2idx = {
"111": 0,
"110": 1,
"101": 2,
"100": 3,
"011": 4,
"010": 5,
"001": 6,
"000": 7
}
def step(data, rule):
newdata = ""
newdata += rule[str2idx["0" + data[:2]]]
for i in range(1,len(data)-1):
newdata += rule[str2idx[data[i-1:i+2]]]
newdata += rule[str2idx[data[-2:] + "0"]]
return newdata
if __name__ == "__main__":
for _ in range(17):
print(data)
data = step(data, "01011010")
|
<commit_before><commit_msg>Add simple wolfram-style cellular automata<commit_after>data = "00000000000000000000100000000000000000000"
str2idx = {
"111": 0,
"110": 1,
"101": 2,
"100": 3,
"011": 4,
"010": 5,
"001": 6,
"000": 7
}
def step(data, rule):
newdata = ""
newdata += rule[str2idx["0" + data[:2]]]
for i in range(1,len(data)-1):
newdata += rule[str2idx[data[i-1:i+2]]]
newdata += rule[str2idx[data[-2:] + "0"]]
return newdata
if __name__ == "__main__":
for _ in range(17):
print(data)
data = step(data, "01011010")
|
|
8c6e294de4c70d97bed685f433a6541dd80cfb0b
|
src/ggrc/migrations/versions/20150428152310_3be12e136921_remove_programdirective_table.py
|
src/ggrc/migrations/versions/20150428152310_3be12e136921_remove_programdirective_table.py
|
"""Remove ProgramDirective table
Revision ID: 3be12e136921
Revises: 57cc398ad417
Create Date: 2015-04-28 15:23:10.503624
"""
# revision identifiers, used by Alembic.
revision = '3be12e136921'
down_revision = '57cc398ad417'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
sql = """
INSERT INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT pd.modified_by_id, pd.created_at, pd.updated_at, pd.program_id as source_id,
'Program' as source_type, pd.directive_id, IFNULL(d.kind,"Policy") as destination_type, pd.context_id
FROM program_directives as pd JOIN directives as d ON pd.directive_id = d.id;
"""
op.execute(sql)
op.drop_table('program_directives')
def downgrade():
op.create_table('calendar_entries',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('name', mysql.VARCHAR(length=250), nullable=True),
sa.Column('calendar_id', mysql.VARCHAR(length=250), nullable=True),
sa.Column('created_at', mysql.DATETIME(), nullable=True),
sa.Column('modified_by_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('updated_at', mysql.DATETIME(), nullable=True),
sa.Column('context_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('owner_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'calendar_entries_ibfk_1'),
sa.ForeignKeyConstraint(['owner_id'], [u'people.id'], name=u'calendar_entries_ibfk_2'),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
|
Add drop ProgramDirective table migration
|
Add drop ProgramDirective table migration
|
Python
|
apache-2.0
|
prasannav7/ggrc-core,plamut/ggrc-core,uskudnik/ggrc-core,NejcZupec/ggrc-core,hyperNURb/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,vladan-m/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,hasanalom/ggrc-core,hyperNURb/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,hyperNURb/ggrc-core,uskudnik/ggrc-core,hyperNURb/ggrc-core,hasanalom/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,vladan-m/ggrc-core,hasanalom/ggrc-core,edofic/ggrc-core,uskudnik/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,uskudnik/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,vladan-m/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,uskudnik/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,vladan-m/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,vladan-m/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,hyperNURb/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core
|
Add drop ProgramDirective table migration
|
"""Remove ProgramDirective table
Revision ID: 3be12e136921
Revises: 57cc398ad417
Create Date: 2015-04-28 15:23:10.503624
"""
# revision identifiers, used by Alembic.
revision = '3be12e136921'
down_revision = '57cc398ad417'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
sql = """
INSERT INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT pd.modified_by_id, pd.created_at, pd.updated_at, pd.program_id as source_id,
'Program' as source_type, pd.directive_id, IFNULL(d.kind,"Policy") as destination_type, pd.context_id
FROM program_directives as pd JOIN directives as d ON pd.directive_id = d.id;
"""
op.execute(sql)
op.drop_table('program_directives')
def downgrade():
op.create_table('calendar_entries',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('name', mysql.VARCHAR(length=250), nullable=True),
sa.Column('calendar_id', mysql.VARCHAR(length=250), nullable=True),
sa.Column('created_at', mysql.DATETIME(), nullable=True),
sa.Column('modified_by_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('updated_at', mysql.DATETIME(), nullable=True),
sa.Column('context_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('owner_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'calendar_entries_ibfk_1'),
sa.ForeignKeyConstraint(['owner_id'], [u'people.id'], name=u'calendar_entries_ibfk_2'),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
|
<commit_before><commit_msg>Add drop ProgramDirective table migration<commit_after>
|
"""Remove ProgramDirective table
Revision ID: 3be12e136921
Revises: 57cc398ad417
Create Date: 2015-04-28 15:23:10.503624
"""
# revision identifiers, used by Alembic.
revision = '3be12e136921'
down_revision = '57cc398ad417'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
sql = """
INSERT INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT pd.modified_by_id, pd.created_at, pd.updated_at, pd.program_id as source_id,
'Program' as source_type, pd.directive_id, IFNULL(d.kind,"Policy") as destination_type, pd.context_id
FROM program_directives as pd JOIN directives as d ON pd.directive_id = d.id;
"""
op.execute(sql)
op.drop_table('program_directives')
def downgrade():
op.create_table('calendar_entries',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('name', mysql.VARCHAR(length=250), nullable=True),
sa.Column('calendar_id', mysql.VARCHAR(length=250), nullable=True),
sa.Column('created_at', mysql.DATETIME(), nullable=True),
sa.Column('modified_by_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('updated_at', mysql.DATETIME(), nullable=True),
sa.Column('context_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('owner_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'calendar_entries_ibfk_1'),
sa.ForeignKeyConstraint(['owner_id'], [u'people.id'], name=u'calendar_entries_ibfk_2'),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
|
Add drop ProgramDirective table migration
"""Remove ProgramDirective table
Revision ID: 3be12e136921
Revises: 57cc398ad417
Create Date: 2015-04-28 15:23:10.503624
"""
# revision identifiers, used by Alembic.
revision = '3be12e136921'
down_revision = '57cc398ad417'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
sql = """
INSERT INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT pd.modified_by_id, pd.created_at, pd.updated_at, pd.program_id as source_id,
'Program' as source_type, pd.directive_id, IFNULL(d.kind,"Policy") as destination_type, pd.context_id
FROM program_directives as pd JOIN directives as d ON pd.directive_id = d.id;
"""
op.execute(sql)
op.drop_table('program_directives')
def downgrade():
op.create_table('calendar_entries',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('name', mysql.VARCHAR(length=250), nullable=True),
sa.Column('calendar_id', mysql.VARCHAR(length=250), nullable=True),
sa.Column('created_at', mysql.DATETIME(), nullable=True),
sa.Column('modified_by_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('updated_at', mysql.DATETIME(), nullable=True),
sa.Column('context_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('owner_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'calendar_entries_ibfk_1'),
sa.ForeignKeyConstraint(['owner_id'], [u'people.id'], name=u'calendar_entries_ibfk_2'),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
|
<commit_before><commit_msg>Add drop ProgramDirective table migration<commit_after>
"""Remove ProgramDirective table
Revision ID: 3be12e136921
Revises: 57cc398ad417
Create Date: 2015-04-28 15:23:10.503624
"""
# revision identifiers, used by Alembic.
revision = '3be12e136921'
down_revision = '57cc398ad417'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
sql = """
INSERT INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT pd.modified_by_id, pd.created_at, pd.updated_at, pd.program_id as source_id,
'Program' as source_type, pd.directive_id, IFNULL(d.kind,"Policy") as destination_type, pd.context_id
FROM program_directives as pd JOIN directives as d ON pd.directive_id = d.id;
"""
op.execute(sql)
op.drop_table('program_directives')
def downgrade():
op.create_table('calendar_entries',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('name', mysql.VARCHAR(length=250), nullable=True),
sa.Column('calendar_id', mysql.VARCHAR(length=250), nullable=True),
sa.Column('created_at', mysql.DATETIME(), nullable=True),
sa.Column('modified_by_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('updated_at', mysql.DATETIME(), nullable=True),
sa.Column('context_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('owner_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['context_id'], [u'contexts.id'], name=u'calendar_entries_ibfk_1'),
sa.ForeignKeyConstraint(['owner_id'], [u'people.id'], name=u'calendar_entries_ibfk_2'),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset=u'utf8',
mysql_engine=u'InnoDB'
)
|
|
192bf9ea05fb5ebcf46a6f1707ca339eed96bcf0
|
cli/commands/cmd_secret.py
|
cli/commands/cmd_secret.py
|
import binascii
import logging
import os
import click
@click.command()
@click.argument('bytes', default=128)
def cli(bytes):
"""
Generate a random secret token.
:return: str
"""
return logging.info(binascii.b2a_hex(os.urandom(bytes)))
|
Add CLI command to generate a secret token
|
Add CLI command to generate a secret token
|
Python
|
mit
|
nickjj/build-a-saas-app-with-flask,nickjj/build-a-saas-app-with-flask,z123/build-a-saas-app-with-flask,nickjj/build-a-saas-app-with-flask,nickjj/build-a-saas-app-with-flask,z123/build-a-saas-app-with-flask,z123/build-a-saas-app-with-flask
|
Add CLI command to generate a secret token
|
import binascii
import logging
import os
import click
@click.command()
@click.argument('bytes', default=128)
def cli(bytes):
"""
Generate a random secret token.
:return: str
"""
return logging.info(binascii.b2a_hex(os.urandom(bytes)))
|
<commit_before><commit_msg>Add CLI command to generate a secret token<commit_after>
|
import binascii
import logging
import os
import click
@click.command()
@click.argument('bytes', default=128)
def cli(bytes):
"""
Generate a random secret token.
:return: str
"""
return logging.info(binascii.b2a_hex(os.urandom(bytes)))
|
Add CLI command to generate a secret tokenimport binascii
import logging
import os
import click
@click.command()
@click.argument('bytes', default=128)
def cli(bytes):
"""
Generate a random secret token.
:return: str
"""
return logging.info(binascii.b2a_hex(os.urandom(bytes)))
|
<commit_before><commit_msg>Add CLI command to generate a secret token<commit_after>import binascii
import logging
import os
import click
@click.command()
@click.argument('bytes', default=128)
def cli(bytes):
"""
Generate a random secret token.
:return: str
"""
return logging.info(binascii.b2a_hex(os.urandom(bytes)))
|
|
976f1881ad97df2f393bb1a090419531ce11eca7
|
test/trainer_test.py
|
test/trainer_test.py
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf')
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf', num_updates=3)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
Make HF test take much less time!
|
Make HF test take much less time!
|
Python
|
mit
|
devdoer/theanets,chrinide/theanets,lmjohns3/theanets
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf')
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
Make HF test take much less time!
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf', num_updates=3)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
<commit_before>import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf')
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
<commit_msg>Make HF test take much less time!<commit_after>
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf', num_updates=3)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf')
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
Make HF test take much less time!import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf', num_updates=3)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
<commit_before>import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf')
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
<commit_msg>Make HF test take much less time!<commit_after>import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
costs0 = next(trainer)
costs1 = next(trainer)
costs2 = next(trainer)
assert costs2['J'] < costs0['J']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_hf(self):
self.assert_progress('hf', num_updates=3)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.assert_progress('layerwise')
|
59915f5fd782cef4f5805a2f7ca616b8d615b7e1
|
osf/migrations/0009_auto_20170406_1614.py
|
osf/migrations/0009_auto_20170406_1614.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-06 21:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('osf', '0008_merge'),
]
operations = [
migrations.AlterField(
model_name='noderelation',
name='child',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='_parents', to='osf.AbstractNode'),
),
]
|
Add pointless migration to ensure python functionality
|
Add pointless migration to ensure python functionality
|
Python
|
apache-2.0
|
caneruguz/osf.io,binoculars/osf.io,laurenrevere/osf.io,hmoco/osf.io,leb2dg/osf.io,mattclark/osf.io,felliott/osf.io,chrisseto/osf.io,felliott/osf.io,caseyrollins/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,chennan47/osf.io,cwisecarver/osf.io,caneruguz/osf.io,caseyrollins/osf.io,aaxelb/osf.io,cwisecarver/osf.io,caseyrollins/osf.io,laurenrevere/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,adlius/osf.io,Nesiehr/osf.io,HalcyonChimera/osf.io,erinspace/osf.io,mfraezz/osf.io,TomBaxter/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,cwisecarver/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,Johnetordoff/osf.io,icereval/osf.io,sloria/osf.io,crcresearch/osf.io,pattisdr/osf.io,Johnetordoff/osf.io,Nesiehr/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,pattisdr/osf.io,sloria/osf.io,mfraezz/osf.io,icereval/osf.io,hmoco/osf.io,icereval/osf.io,cslzchen/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,hmoco/osf.io,sloria/osf.io,caneruguz/osf.io,Nesiehr/osf.io,cslzchen/osf.io,chrisseto/osf.io,brianjgeiger/osf.io,crcresearch/osf.io,saradbowman/osf.io,cwisecarver/osf.io,cslzchen/osf.io,mattclark/osf.io,leb2dg/osf.io,pattisdr/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,chrisseto/osf.io,hmoco/osf.io,TomBaxter/osf.io,HalcyonChimera/osf.io,erinspace/osf.io,chennan47/osf.io,adlius/osf.io,saradbowman/osf.io,mfraezz/osf.io,laurenrevere/osf.io,leb2dg/osf.io,caneruguz/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,baylee-d/osf.io,Nesiehr/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,aaxelb/osf.io,crcresearch/osf.io,adlius/osf.io,binoculars/osf.io,binoculars/osf.io,erinspace/osf.io,TomBaxter/osf.io
|
Add pointless migration to ensure python functionality
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-06 21:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('osf', '0008_merge'),
]
operations = [
migrations.AlterField(
model_name='noderelation',
name='child',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='_parents', to='osf.AbstractNode'),
),
]
|
<commit_before><commit_msg>Add pointless migration to ensure python functionality<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-06 21:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('osf', '0008_merge'),
]
operations = [
migrations.AlterField(
model_name='noderelation',
name='child',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='_parents', to='osf.AbstractNode'),
),
]
|
Add pointless migration to ensure python functionality# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-06 21:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('osf', '0008_merge'),
]
operations = [
migrations.AlterField(
model_name='noderelation',
name='child',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='_parents', to='osf.AbstractNode'),
),
]
|
<commit_before><commit_msg>Add pointless migration to ensure python functionality<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-06 21:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('osf', '0008_merge'),
]
operations = [
migrations.AlterField(
model_name='noderelation',
name='child',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='_parents', to='osf.AbstractNode'),
),
]
|
|
a53fba0f648b3472834443fa3dc31c0611bcb6a3
|
test/test_mcmc_serial.py
|
test/test_mcmc_serial.py
|
import time
import numpy as np
import yaml
import quantitation
# Set parameters
path_cfg = 'examples/basic.yml'
# Load config
cfg = yaml.load(open(path_cfg, 'rb'))
# Load data
mapping_peptides = np.loadtxt(cfg['data']['path_mapping_peptides'],
dtype=np.int)
mapping_states_obs, intensities_obs = np.loadtxt(cfg['data']['path_data_state'],
dtype=[('peptide', np.int),
('intensity',
np.float)],
unpack=True)
# Run MCMC sampler
time_start = time.time()
draws, accept_stats = quantitation.mcmc_serial(intensities_obs,
mapping_states_obs,
mapping_peptides, cfg)
time_done = time.time()
# Print timing information
print "%f seconds for %d iterations" % (time_done-time_start,
cfg['settings']['n_iterations'])
print "%f seconds per iteration" % ((time_done-time_start) /
(0.+cfg['settings']['n_iterations']))
# Extract posterior means
means = {}
for k, x in draws.iteritems():
means[k] = np.mean(x, 0)
|
Add basic test script for mcmc_serial. Code now passes with conditions on prior.
|
Add basic test script for mcmc_serial. Code now passes with conditions on prior.
Code runs with all prior inputs on ups2 and simulated data. However, variance
hyperparameters exhibit issues when used with improper priors on the rate
parameter. The shape and rate parameters diverge towards infinity as their ratio
(the expected precision) remains fixed. All logic and mathematics have been
checked extremely carefully and no errors appear to remain. I believe, at this
point, that the given problem arises from posterior impropriety. This needs to
be checked mathematically, likely on a simpler hierarchical model with no
missing data, two layers of variance parameters, and three layers of normal
means/observations.
|
Python
|
bsd-3-clause
|
awblocker/quantitation,awblocker/quantitation,awblocker/quantitation
|
Add basic test script for mcmc_serial. Code now passes with conditions on prior.
Code runs with all prior inputs on ups2 and simulated data. However, variance
hyperparameters exhibit issues when used with improper priors on the rate
parameter. The shape and rate parameters diverge towards infinity as their ratio
(the expected precision) remains fixed. All logic and mathematics have been
checked extremely carefully and no errors appear to remain. I believe, at this
point, that the given problem arises from posterior impropriety. This needs to
be checked mathematically, likely on a simpler hierarchical model with no
missing data, two layers of variance parameters, and three layers of normal
means/observations.
|
import time
import numpy as np
import yaml
import quantitation
# Set parameters
path_cfg = 'examples/basic.yml'
# Load config
cfg = yaml.load(open(path_cfg, 'rb'))
# Load data
mapping_peptides = np.loadtxt(cfg['data']['path_mapping_peptides'],
dtype=np.int)
mapping_states_obs, intensities_obs = np.loadtxt(cfg['data']['path_data_state'],
dtype=[('peptide', np.int),
('intensity',
np.float)],
unpack=True)
# Run MCMC sampler
time_start = time.time()
draws, accept_stats = quantitation.mcmc_serial(intensities_obs,
mapping_states_obs,
mapping_peptides, cfg)
time_done = time.time()
# Print timing information
print "%f seconds for %d iterations" % (time_done-time_start,
cfg['settings']['n_iterations'])
print "%f seconds per iteration" % ((time_done-time_start) /
(0.+cfg['settings']['n_iterations']))
# Extract posterior means
means = {}
for k, x in draws.iteritems():
means[k] = np.mean(x, 0)
|
<commit_before><commit_msg>Add basic test script for mcmc_serial. Code now passes with conditions on prior.
Code runs with all prior inputs on ups2 and simulated data. However, variance
hyperparameters exhibit issues when used with improper priors on the rate
parameter. The shape and rate parameters diverge towards infinity as their ratio
(the expected precision) remains fixed. All logic and mathematics have been
checked extremely carefully and no errors appear to remain. I believe, at this
point, that the given problem arises from posterior impropriety. This needs to
be checked mathematically, likely on a simpler hierarchical model with no
missing data, two layers of variance parameters, and three layers of normal
means/observations.<commit_after>
|
import time
import numpy as np
import yaml
import quantitation
# Set parameters
path_cfg = 'examples/basic.yml'
# Load config
cfg = yaml.load(open(path_cfg, 'rb'))
# Load data
mapping_peptides = np.loadtxt(cfg['data']['path_mapping_peptides'],
dtype=np.int)
mapping_states_obs, intensities_obs = np.loadtxt(cfg['data']['path_data_state'],
dtype=[('peptide', np.int),
('intensity',
np.float)],
unpack=True)
# Run MCMC sampler
time_start = time.time()
draws, accept_stats = quantitation.mcmc_serial(intensities_obs,
mapping_states_obs,
mapping_peptides, cfg)
time_done = time.time()
# Print timing information
print "%f seconds for %d iterations" % (time_done-time_start,
cfg['settings']['n_iterations'])
print "%f seconds per iteration" % ((time_done-time_start) /
(0.+cfg['settings']['n_iterations']))
# Extract posterior means
means = {}
for k, x in draws.iteritems():
means[k] = np.mean(x, 0)
|
Add basic test script for mcmc_serial. Code now passes with conditions on prior.
Code runs with all prior inputs on ups2 and simulated data. However, variance
hyperparameters exhibit issues when used with improper priors on the rate
parameter. The shape and rate parameters diverge towards infinity as their ratio
(the expected precision) remains fixed. All logic and mathematics have been
checked extremely carefully and no errors appear to remain. I believe, at this
point, that the given problem arises from posterior impropriety. This needs to
be checked mathematically, likely on a simpler hierarchical model with no
missing data, two layers of variance parameters, and three layers of normal
means/observations.import time
import numpy as np
import yaml
import quantitation
# Set parameters
path_cfg = 'examples/basic.yml'
# Load config
cfg = yaml.load(open(path_cfg, 'rb'))
# Load data
mapping_peptides = np.loadtxt(cfg['data']['path_mapping_peptides'],
dtype=np.int)
mapping_states_obs, intensities_obs = np.loadtxt(cfg['data']['path_data_state'],
dtype=[('peptide', np.int),
('intensity',
np.float)],
unpack=True)
# Run MCMC sampler
time_start = time.time()
draws, accept_stats = quantitation.mcmc_serial(intensities_obs,
mapping_states_obs,
mapping_peptides, cfg)
time_done = time.time()
# Print timing information
print "%f seconds for %d iterations" % (time_done-time_start,
cfg['settings']['n_iterations'])
print "%f seconds per iteration" % ((time_done-time_start) /
(0.+cfg['settings']['n_iterations']))
# Extract posterior means
means = {}
for k, x in draws.iteritems():
means[k] = np.mean(x, 0)
|
<commit_before><commit_msg>Add basic test script for mcmc_serial. Code now passes with conditions on prior.
Code runs with all prior inputs on ups2 and simulated data. However, variance
hyperparameters exhibit issues when used with improper priors on the rate
parameter. The shape and rate parameters diverge towards infinity as their ratio
(the expected precision) remains fixed. All logic and mathematics have been
checked extremely carefully and no errors appear to remain. I believe, at this
point, that the given problem arises from posterior impropriety. This needs to
be checked mathematically, likely on a simpler hierarchical model with no
missing data, two layers of variance parameters, and three layers of normal
means/observations.<commit_after>import time
import numpy as np
import yaml
import quantitation
# Set parameters
path_cfg = 'examples/basic.yml'
# Load config
cfg = yaml.load(open(path_cfg, 'rb'))
# Load data
mapping_peptides = np.loadtxt(cfg['data']['path_mapping_peptides'],
dtype=np.int)
mapping_states_obs, intensities_obs = np.loadtxt(cfg['data']['path_data_state'],
dtype=[('peptide', np.int),
('intensity',
np.float)],
unpack=True)
# Run MCMC sampler
time_start = time.time()
draws, accept_stats = quantitation.mcmc_serial(intensities_obs,
mapping_states_obs,
mapping_peptides, cfg)
time_done = time.time()
# Print timing information
print "%f seconds for %d iterations" % (time_done-time_start,
cfg['settings']['n_iterations'])
print "%f seconds per iteration" % ((time_done-time_start) /
(0.+cfg['settings']['n_iterations']))
# Extract posterior means
means = {}
for k, x in draws.iteritems():
means[k] = np.mean(x, 0)
|
|
342d96b2bc0665ec43ef51415bbc8a7c4f6f9b63
|
test/test_old_version.py
|
test/test_old_version.py
|
#!bin/env python
import subprocess
import os.path
import unittest, re
class TestOldVersion(unittest.TestCase):
@classmethod
def setUpClass(self):
subprocess.call('rm -rf remote local 2>> /dev/null', shell=True)
subprocess.call('mkdir remote; mkdir local', shell=True)
subprocess.call('cd remote; mkdir parent; cd parent; git init --bare', shell=True)
subprocess.call('cd remote; mkdir child; cd child; git init --bare', shell=True)
subprocess.call('cd remote; mkdir child2; cd child2; git init --bare', shell=True)
subprocess.call('cd local; git clone ../remote/parent', shell=True)
subprocess.call('cd local; git clone ../remote/child', shell=True)
subprocess.call('cd local; git clone ../remote/child2', shell=True)
subprocess.call('cd local/parent; echo "version: 99999999.9.9" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "repos:" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "\tc child ../../remote/child" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "\tc2 child/child2 ../../remote/child2" >> .gitproj', shell=True)
subprocess.call('cd local/parent; git add .gitproj; git commit -m "Initial Commit"; git push -u origin master', shell=True)
def test_init(self):
version = subprocess.check_output('git project version', shell=True).strip()
try:
subprocess.check_output('cd local/parent; git project init', shell=True)
except subprocess.CalledProcessError as err:
self.assertEqual('git-project install is out of date. .gitproj version: 99999999.9.9, git-project version: {}. Aborting'.format(version), err.output.strip())
self.assertEqual(1, err.returncode)
subprocess.call('cd local/parent; rm .gitproj', shell=True)
@classmethod
def tearDownClass(self):
subprocess.call('rm -rf remote local', shell=True)
if __name__ == '__main__':
unittest.main()
|
Test to ensure git-project exits early if out of date
|
Test to ensure git-project exits early if out of date
See: (CLD-2913)
|
Python
|
mit
|
3ptscience/git-project,aranzgeo/git-project
|
Test to ensure git-project exits early if out of date
See: (CLD-2913)
|
#!bin/env python
import subprocess
import os.path
import unittest, re
class TestOldVersion(unittest.TestCase):
@classmethod
def setUpClass(self):
subprocess.call('rm -rf remote local 2>> /dev/null', shell=True)
subprocess.call('mkdir remote; mkdir local', shell=True)
subprocess.call('cd remote; mkdir parent; cd parent; git init --bare', shell=True)
subprocess.call('cd remote; mkdir child; cd child; git init --bare', shell=True)
subprocess.call('cd remote; mkdir child2; cd child2; git init --bare', shell=True)
subprocess.call('cd local; git clone ../remote/parent', shell=True)
subprocess.call('cd local; git clone ../remote/child', shell=True)
subprocess.call('cd local; git clone ../remote/child2', shell=True)
subprocess.call('cd local/parent; echo "version: 99999999.9.9" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "repos:" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "\tc child ../../remote/child" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "\tc2 child/child2 ../../remote/child2" >> .gitproj', shell=True)
subprocess.call('cd local/parent; git add .gitproj; git commit -m "Initial Commit"; git push -u origin master', shell=True)
def test_init(self):
version = subprocess.check_output('git project version', shell=True).strip()
try:
subprocess.check_output('cd local/parent; git project init', shell=True)
except subprocess.CalledProcessError as err:
self.assertEqual('git-project install is out of date. .gitproj version: 99999999.9.9, git-project version: {}. Aborting'.format(version), err.output.strip())
self.assertEqual(1, err.returncode)
subprocess.call('cd local/parent; rm .gitproj', shell=True)
@classmethod
def tearDownClass(self):
subprocess.call('rm -rf remote local', shell=True)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test to ensure git-project exits early if out of date
See: (CLD-2913)<commit_after>
|
#!bin/env python
import subprocess
import os.path
import unittest, re
class TestOldVersion(unittest.TestCase):
@classmethod
def setUpClass(self):
subprocess.call('rm -rf remote local 2>> /dev/null', shell=True)
subprocess.call('mkdir remote; mkdir local', shell=True)
subprocess.call('cd remote; mkdir parent; cd parent; git init --bare', shell=True)
subprocess.call('cd remote; mkdir child; cd child; git init --bare', shell=True)
subprocess.call('cd remote; mkdir child2; cd child2; git init --bare', shell=True)
subprocess.call('cd local; git clone ../remote/parent', shell=True)
subprocess.call('cd local; git clone ../remote/child', shell=True)
subprocess.call('cd local; git clone ../remote/child2', shell=True)
subprocess.call('cd local/parent; echo "version: 99999999.9.9" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "repos:" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "\tc child ../../remote/child" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "\tc2 child/child2 ../../remote/child2" >> .gitproj', shell=True)
subprocess.call('cd local/parent; git add .gitproj; git commit -m "Initial Commit"; git push -u origin master', shell=True)
def test_init(self):
version = subprocess.check_output('git project version', shell=True).strip()
try:
subprocess.check_output('cd local/parent; git project init', shell=True)
except subprocess.CalledProcessError as err:
self.assertEqual('git-project install is out of date. .gitproj version: 99999999.9.9, git-project version: {}. Aborting'.format(version), err.output.strip())
self.assertEqual(1, err.returncode)
subprocess.call('cd local/parent; rm .gitproj', shell=True)
@classmethod
def tearDownClass(self):
subprocess.call('rm -rf remote local', shell=True)
if __name__ == '__main__':
unittest.main()
|
Test to ensure git-project exits early if out of date
See: (CLD-2913)#!bin/env python
import subprocess
import os.path
import unittest, re
class TestOldVersion(unittest.TestCase):
@classmethod
def setUpClass(self):
subprocess.call('rm -rf remote local 2>> /dev/null', shell=True)
subprocess.call('mkdir remote; mkdir local', shell=True)
subprocess.call('cd remote; mkdir parent; cd parent; git init --bare', shell=True)
subprocess.call('cd remote; mkdir child; cd child; git init --bare', shell=True)
subprocess.call('cd remote; mkdir child2; cd child2; git init --bare', shell=True)
subprocess.call('cd local; git clone ../remote/parent', shell=True)
subprocess.call('cd local; git clone ../remote/child', shell=True)
subprocess.call('cd local; git clone ../remote/child2', shell=True)
subprocess.call('cd local/parent; echo "version: 99999999.9.9" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "repos:" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "\tc child ../../remote/child" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "\tc2 child/child2 ../../remote/child2" >> .gitproj', shell=True)
subprocess.call('cd local/parent; git add .gitproj; git commit -m "Initial Commit"; git push -u origin master', shell=True)
def test_init(self):
version = subprocess.check_output('git project version', shell=True).strip()
try:
subprocess.check_output('cd local/parent; git project init', shell=True)
except subprocess.CalledProcessError as err:
self.assertEqual('git-project install is out of date. .gitproj version: 99999999.9.9, git-project version: {}. Aborting'.format(version), err.output.strip())
self.assertEqual(1, err.returncode)
subprocess.call('cd local/parent; rm .gitproj', shell=True)
@classmethod
def tearDownClass(self):
subprocess.call('rm -rf remote local', shell=True)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test to ensure git-project exits early if out of date
See: (CLD-2913)<commit_after>#!bin/env python
import subprocess
import os.path
import unittest, re
class TestOldVersion(unittest.TestCase):
@classmethod
def setUpClass(self):
subprocess.call('rm -rf remote local 2>> /dev/null', shell=True)
subprocess.call('mkdir remote; mkdir local', shell=True)
subprocess.call('cd remote; mkdir parent; cd parent; git init --bare', shell=True)
subprocess.call('cd remote; mkdir child; cd child; git init --bare', shell=True)
subprocess.call('cd remote; mkdir child2; cd child2; git init --bare', shell=True)
subprocess.call('cd local; git clone ../remote/parent', shell=True)
subprocess.call('cd local; git clone ../remote/child', shell=True)
subprocess.call('cd local; git clone ../remote/child2', shell=True)
subprocess.call('cd local/parent; echo "version: 99999999.9.9" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "repos:" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "\tc child ../../remote/child" >> .gitproj', shell=True)
subprocess.call('cd local/parent; echo "\tc2 child/child2 ../../remote/child2" >> .gitproj', shell=True)
subprocess.call('cd local/parent; git add .gitproj; git commit -m "Initial Commit"; git push -u origin master', shell=True)
def test_init(self):
version = subprocess.check_output('git project version', shell=True).strip()
try:
subprocess.check_output('cd local/parent; git project init', shell=True)
except subprocess.CalledProcessError as err:
self.assertEqual('git-project install is out of date. .gitproj version: 99999999.9.9, git-project version: {}. Aborting'.format(version), err.output.strip())
self.assertEqual(1, err.returncode)
subprocess.call('cd local/parent; rm .gitproj', shell=True)
@classmethod
def tearDownClass(self):
subprocess.call('rm -rf remote local', shell=True)
if __name__ == '__main__':
unittest.main()
|
|
1c3871b5f9effcbd859d530b3105c67eaf385ed2
|
tests/test_delete.py
|
tests/test_delete.py
|
import os, os.path
from quilt.patch import Patch
from six.moves import cStringIO
import sys
from helpers import QuiltTest, make_file, tmp_mapping, tmp_series
from quilt.delete import Delete
from quilt.cli.delete import DeleteCommand
class Test(QuiltTest):
def test_next_first(self):
""" Delete the next patch with only unapplied patches """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
cmd = Delete(dir, quilt_pc=dir, quilt_patches=patches.dirname)
cmd.delete_next()
patches.read()
self.assertTrue(patches.is_empty())
def test_no_backup_next(self):
""" Remove the next patch without leaving a backup """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
patch = os.path.join(patches.dirname, "patch")
make_file(b"", patch)
class args:
next = True
patch = None
remove = True
backup = False
with tmp_mapping(os.environ) as env, \
tmp_mapping(vars(sys)) as tmp_sys:
env.set("QUILT_PATCHES", patches.dirname)
env.set("QUILT_PC", dir)
tmp_sys.set("stdout", cStringIO())
DeleteCommand().run(args)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists(patch + "~"))
def test_no_backup_named(self):
""" Remove a specified patch without leaving a backup """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
patch = os.path.join(patches.dirname, "patch")
make_file(b"", patch)
class args:
patch = "patch"
next = False
remove = True
backup = False
with tmp_mapping(os.environ) as env, \
tmp_mapping(vars(sys)) as tmp_sys:
env.set("QUILT_PATCHES", patches.dirname)
env.set("QUILT_PC", dir)
tmp_sys.set("stdout", cStringIO())
DeleteCommand().run(args)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists(patch + "~"))
|
Test deletion with only unapplied patches, and without the --backup option
|
Test deletion with only unapplied patches, and without the --backup option
|
Python
|
mit
|
bjoernricks/python-quilt
|
Test deletion with only unapplied patches, and without the --backup option
|
import os, os.path
from quilt.patch import Patch
from six.moves import cStringIO
import sys
from helpers import QuiltTest, make_file, tmp_mapping, tmp_series
from quilt.delete import Delete
from quilt.cli.delete import DeleteCommand
class Test(QuiltTest):
def test_next_first(self):
""" Delete the next patch with only unapplied patches """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
cmd = Delete(dir, quilt_pc=dir, quilt_patches=patches.dirname)
cmd.delete_next()
patches.read()
self.assertTrue(patches.is_empty())
def test_no_backup_next(self):
""" Remove the next patch without leaving a backup """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
patch = os.path.join(patches.dirname, "patch")
make_file(b"", patch)
class args:
next = True
patch = None
remove = True
backup = False
with tmp_mapping(os.environ) as env, \
tmp_mapping(vars(sys)) as tmp_sys:
env.set("QUILT_PATCHES", patches.dirname)
env.set("QUILT_PC", dir)
tmp_sys.set("stdout", cStringIO())
DeleteCommand().run(args)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists(patch + "~"))
def test_no_backup_named(self):
""" Remove a specified patch without leaving a backup """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
patch = os.path.join(patches.dirname, "patch")
make_file(b"", patch)
class args:
patch = "patch"
next = False
remove = True
backup = False
with tmp_mapping(os.environ) as env, \
tmp_mapping(vars(sys)) as tmp_sys:
env.set("QUILT_PATCHES", patches.dirname)
env.set("QUILT_PC", dir)
tmp_sys.set("stdout", cStringIO())
DeleteCommand().run(args)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists(patch + "~"))
|
<commit_before><commit_msg>Test deletion with only unapplied patches, and without the --backup option<commit_after>
|
import os, os.path
from quilt.patch import Patch
from six.moves import cStringIO
import sys
from helpers import QuiltTest, make_file, tmp_mapping, tmp_series
from quilt.delete import Delete
from quilt.cli.delete import DeleteCommand
class Test(QuiltTest):
def test_next_first(self):
""" Delete the next patch with only unapplied patches """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
cmd = Delete(dir, quilt_pc=dir, quilt_patches=patches.dirname)
cmd.delete_next()
patches.read()
self.assertTrue(patches.is_empty())
def test_no_backup_next(self):
""" Remove the next patch without leaving a backup """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
patch = os.path.join(patches.dirname, "patch")
make_file(b"", patch)
class args:
next = True
patch = None
remove = True
backup = False
with tmp_mapping(os.environ) as env, \
tmp_mapping(vars(sys)) as tmp_sys:
env.set("QUILT_PATCHES", patches.dirname)
env.set("QUILT_PC", dir)
tmp_sys.set("stdout", cStringIO())
DeleteCommand().run(args)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists(patch + "~"))
def test_no_backup_named(self):
""" Remove a specified patch without leaving a backup """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
patch = os.path.join(patches.dirname, "patch")
make_file(b"", patch)
class args:
patch = "patch"
next = False
remove = True
backup = False
with tmp_mapping(os.environ) as env, \
tmp_mapping(vars(sys)) as tmp_sys:
env.set("QUILT_PATCHES", patches.dirname)
env.set("QUILT_PC", dir)
tmp_sys.set("stdout", cStringIO())
DeleteCommand().run(args)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists(patch + "~"))
|
Test deletion with only unapplied patches, and without the --backup optionimport os, os.path
from quilt.patch import Patch
from six.moves import cStringIO
import sys
from helpers import QuiltTest, make_file, tmp_mapping, tmp_series
from quilt.delete import Delete
from quilt.cli.delete import DeleteCommand
class Test(QuiltTest):
def test_next_first(self):
""" Delete the next patch with only unapplied patches """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
cmd = Delete(dir, quilt_pc=dir, quilt_patches=patches.dirname)
cmd.delete_next()
patches.read()
self.assertTrue(patches.is_empty())
def test_no_backup_next(self):
""" Remove the next patch without leaving a backup """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
patch = os.path.join(patches.dirname, "patch")
make_file(b"", patch)
class args:
next = True
patch = None
remove = True
backup = False
with tmp_mapping(os.environ) as env, \
tmp_mapping(vars(sys)) as tmp_sys:
env.set("QUILT_PATCHES", patches.dirname)
env.set("QUILT_PC", dir)
tmp_sys.set("stdout", cStringIO())
DeleteCommand().run(args)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists(patch + "~"))
def test_no_backup_named(self):
""" Remove a specified patch without leaving a backup """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
patch = os.path.join(patches.dirname, "patch")
make_file(b"", patch)
class args:
patch = "patch"
next = False
remove = True
backup = False
with tmp_mapping(os.environ) as env, \
tmp_mapping(vars(sys)) as tmp_sys:
env.set("QUILT_PATCHES", patches.dirname)
env.set("QUILT_PC", dir)
tmp_sys.set("stdout", cStringIO())
DeleteCommand().run(args)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists(patch + "~"))
|
<commit_before><commit_msg>Test deletion with only unapplied patches, and without the --backup option<commit_after>import os, os.path
from quilt.patch import Patch
from six.moves import cStringIO
import sys
from helpers import QuiltTest, make_file, tmp_mapping, tmp_series
from quilt.delete import Delete
from quilt.cli.delete import DeleteCommand
class Test(QuiltTest):
def test_next_first(self):
""" Delete the next patch with only unapplied patches """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
cmd = Delete(dir, quilt_pc=dir, quilt_patches=patches.dirname)
cmd.delete_next()
patches.read()
self.assertTrue(patches.is_empty())
def test_no_backup_next(self):
""" Remove the next patch without leaving a backup """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
patch = os.path.join(patches.dirname, "patch")
make_file(b"", patch)
class args:
next = True
patch = None
remove = True
backup = False
with tmp_mapping(os.environ) as env, \
tmp_mapping(vars(sys)) as tmp_sys:
env.set("QUILT_PATCHES", patches.dirname)
env.set("QUILT_PC", dir)
tmp_sys.set("stdout", cStringIO())
DeleteCommand().run(args)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists(patch + "~"))
def test_no_backup_named(self):
""" Remove a specified patch without leaving a backup """
with tmp_series() as [dir, patches]:
patches.add_patch(Patch("patch"))
patches.save()
patch = os.path.join(patches.dirname, "patch")
make_file(b"", patch)
class args:
patch = "patch"
next = False
remove = True
backup = False
with tmp_mapping(os.environ) as env, \
tmp_mapping(vars(sys)) as tmp_sys:
env.set("QUILT_PATCHES", patches.dirname)
env.set("QUILT_PC", dir)
tmp_sys.set("stdout", cStringIO())
DeleteCommand().run(args)
self.assertFalse(os.path.exists(patch))
self.assertFalse(os.path.exists(patch + "~"))
|
|
42f32ddcd33d1c8325400405b27e20a3cd54ce36
|
merge_in_place.py
|
merge_in_place.py
|
a = [1, 3, 8, 12, 15]
b = [4, 12, 17, 0, 0, 0, 0, 0]
i = len(a) - 1
j = len(b) - len(a) - 1
k = len(b) - 1
while i >= 0:
if j >= 0:
if a[i] > b[j]:
while i >= 0 and a[i] >= b[j]:
b[k] = a[i]
i -= 1
k -= 1
else:
while j >= 0 and a[i] < b[j]:
b[k] = b[j]
j -= 1
k -= 1
else:
while i >= 0:
b[k] = a[i]
i -= 1
k -= 1
print(b)
|
Add algorithm for merging arrays in place
|
Add algorithm for merging arrays in place
|
Python
|
mit
|
dnl-blkv/algorithms
|
Add algorithm for merging arrays in place
|
a = [1, 3, 8, 12, 15]
b = [4, 12, 17, 0, 0, 0, 0, 0]
i = len(a) - 1
j = len(b) - len(a) - 1
k = len(b) - 1
while i >= 0:
if j >= 0:
if a[i] > b[j]:
while i >= 0 and a[i] >= b[j]:
b[k] = a[i]
i -= 1
k -= 1
else:
while j >= 0 and a[i] < b[j]:
b[k] = b[j]
j -= 1
k -= 1
else:
while i >= 0:
b[k] = a[i]
i -= 1
k -= 1
print(b)
|
<commit_before><commit_msg>Add algorithm for merging arrays in place<commit_after>
|
a = [1, 3, 8, 12, 15]
b = [4, 12, 17, 0, 0, 0, 0, 0]
i = len(a) - 1
j = len(b) - len(a) - 1
k = len(b) - 1
while i >= 0:
if j >= 0:
if a[i] > b[j]:
while i >= 0 and a[i] >= b[j]:
b[k] = a[i]
i -= 1
k -= 1
else:
while j >= 0 and a[i] < b[j]:
b[k] = b[j]
j -= 1
k -= 1
else:
while i >= 0:
b[k] = a[i]
i -= 1
k -= 1
print(b)
|
Add algorithm for merging arrays in placea = [1, 3, 8, 12, 15]
b = [4, 12, 17, 0, 0, 0, 0, 0]
i = len(a) - 1
j = len(b) - len(a) - 1
k = len(b) - 1
while i >= 0:
if j >= 0:
if a[i] > b[j]:
while i >= 0 and a[i] >= b[j]:
b[k] = a[i]
i -= 1
k -= 1
else:
while j >= 0 and a[i] < b[j]:
b[k] = b[j]
j -= 1
k -= 1
else:
while i >= 0:
b[k] = a[i]
i -= 1
k -= 1
print(b)
|
<commit_before><commit_msg>Add algorithm for merging arrays in place<commit_after>a = [1, 3, 8, 12, 15]
b = [4, 12, 17, 0, 0, 0, 0, 0]
i = len(a) - 1
j = len(b) - len(a) - 1
k = len(b) - 1
while i >= 0:
if j >= 0:
if a[i] > b[j]:
while i >= 0 and a[i] >= b[j]:
b[k] = a[i]
i -= 1
k -= 1
else:
while j >= 0 and a[i] < b[j]:
b[k] = b[j]
j -= 1
k -= 1
else:
while i >= 0:
b[k] = a[i]
i -= 1
k -= 1
print(b)
|
|
b5b65ce4c7a592372690b6d1fecc68a6d3f33bfb
|
doc/example1.py
|
doc/example1.py
|
"""
Draw the example used in the README file.
"""
# Standard library modules.
# Third party modules.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
# Local modules.
from matplotlib_colorbar.colorbar import ColorBar
# Globals and constants variables.
plt.figure()
data = np.array(plt.imread(cbook.get_sample_data('grace_hopper.png')))
mappable = plt.imshow(data[..., 0], cmap='viridis')
colorbar = ColorBar(mappable, location='lower left')
colorbar.set_ticks([0.0, 0.5, 1.0])
plt.gca().add_artist(colorbar)
plt.savefig('example1.png', bbox_inches='tight')
|
Add script to generate README figure.
|
Add script to generate README figure.
|
Python
|
bsd-2-clause
|
ppinard/matplotlib-colorbar
|
Add script to generate README figure.
|
"""
Draw the example used in the README file.
"""
# Standard library modules.
# Third party modules.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
# Local modules.
from matplotlib_colorbar.colorbar import ColorBar
# Globals and constants variables.
plt.figure()
data = np.array(plt.imread(cbook.get_sample_data('grace_hopper.png')))
mappable = plt.imshow(data[..., 0], cmap='viridis')
colorbar = ColorBar(mappable, location='lower left')
colorbar.set_ticks([0.0, 0.5, 1.0])
plt.gca().add_artist(colorbar)
plt.savefig('example1.png', bbox_inches='tight')
|
<commit_before><commit_msg>Add script to generate README figure.<commit_after>
|
"""
Draw the example used in the README file.
"""
# Standard library modules.
# Third party modules.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
# Local modules.
from matplotlib_colorbar.colorbar import ColorBar
# Globals and constants variables.
plt.figure()
data = np.array(plt.imread(cbook.get_sample_data('grace_hopper.png')))
mappable = plt.imshow(data[..., 0], cmap='viridis')
colorbar = ColorBar(mappable, location='lower left')
colorbar.set_ticks([0.0, 0.5, 1.0])
plt.gca().add_artist(colorbar)
plt.savefig('example1.png', bbox_inches='tight')
|
Add script to generate README figure."""
Draw the example used in the README file.
"""
# Standard library modules.
# Third party modules.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
# Local modules.
from matplotlib_colorbar.colorbar import ColorBar
# Globals and constants variables.
plt.figure()
data = np.array(plt.imread(cbook.get_sample_data('grace_hopper.png')))
mappable = plt.imshow(data[..., 0], cmap='viridis')
colorbar = ColorBar(mappable, location='lower left')
colorbar.set_ticks([0.0, 0.5, 1.0])
plt.gca().add_artist(colorbar)
plt.savefig('example1.png', bbox_inches='tight')
|
<commit_before><commit_msg>Add script to generate README figure.<commit_after>"""
Draw the example used in the README file.
"""
# Standard library modules.
# Third party modules.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
# Local modules.
from matplotlib_colorbar.colorbar import ColorBar
# Globals and constants variables.
plt.figure()
data = np.array(plt.imread(cbook.get_sample_data('grace_hopper.png')))
mappable = plt.imshow(data[..., 0], cmap='viridis')
colorbar = ColorBar(mappable, location='lower left')
colorbar.set_ticks([0.0, 0.5, 1.0])
plt.gca().add_artist(colorbar)
plt.savefig('example1.png', bbox_inches='tight')
|
|
ffb8292bc4d91771458126677016d8f883c29bbf
|
tests/test_config.py
|
tests/test_config.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
import click
from click.testing import CliRunner
import mock
import yaml
from tldr.config import get_config
from tldr import cli
class TestConfig(unittest.TestCase):
def setUp(self):
self.config_path = path.join(path.expanduser('~'), '.tldrrc')
if path.exists(self.config_path):
os.remove(self.config_path)
self.runner = CliRunner()
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
result = self.runner.invoke(cli.init)
def tearDown(self):
if path.exists(self.config_path):
os.remove(self.config_path)
def test_config_not_exist(self):
with mock.patch('os.path.exists', side_effect=[False, True]):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
("Can't find config file at: {0}. You may use `tldr init` to "
"init the config file.").format(self.config_path)
)
def test_invalid_yaml_file(self):
with mock.patch('__builtin__.open',
mock.mock_open(read_data="%YAML:1.0\nname:jhon")):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"The config file is not a valid YAML file."
)
def test_unsupported_color_in_config(self):
mock_config = {
'colors': {
'command': 'indigo',
'description': 'orange',
'usage': 'green'
},
'platform': 'linux',
'repo_directory': '/tmp/tldr'
}
with mock.patch('yaml.safe_load', return_value=mock_config):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"Unsupported colors in config file: orange, indigo."
)
def test_repo_directory_not_exist(self):
with mock.patch('os.path.exists', side_effect=[True, False]):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"Can't find the tldr repo, check the `repo_direcotry` "
"setting in config file."
)
|
Add test for the config file
|
Add test for the config file
|
Python
|
mit
|
lord63/tldr.py
|
Add test for the config file
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
import click
from click.testing import CliRunner
import mock
import yaml
from tldr.config import get_config
from tldr import cli
class TestConfig(unittest.TestCase):
def setUp(self):
self.config_path = path.join(path.expanduser('~'), '.tldrrc')
if path.exists(self.config_path):
os.remove(self.config_path)
self.runner = CliRunner()
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
result = self.runner.invoke(cli.init)
def tearDown(self):
if path.exists(self.config_path):
os.remove(self.config_path)
def test_config_not_exist(self):
with mock.patch('os.path.exists', side_effect=[False, True]):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
("Can't find config file at: {0}. You may use `tldr init` to "
"init the config file.").format(self.config_path)
)
def test_invalid_yaml_file(self):
with mock.patch('__builtin__.open',
mock.mock_open(read_data="%YAML:1.0\nname:jhon")):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"The config file is not a valid YAML file."
)
def test_unsupported_color_in_config(self):
mock_config = {
'colors': {
'command': 'indigo',
'description': 'orange',
'usage': 'green'
},
'platform': 'linux',
'repo_directory': '/tmp/tldr'
}
with mock.patch('yaml.safe_load', return_value=mock_config):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"Unsupported colors in config file: orange, indigo."
)
def test_repo_directory_not_exist(self):
with mock.patch('os.path.exists', side_effect=[True, False]):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"Can't find the tldr repo, check the `repo_direcotry` "
"setting in config file."
)
|
<commit_before><commit_msg>Add test for the config file<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
import click
from click.testing import CliRunner
import mock
import yaml
from tldr.config import get_config
from tldr import cli
class TestConfig(unittest.TestCase):
def setUp(self):
self.config_path = path.join(path.expanduser('~'), '.tldrrc')
if path.exists(self.config_path):
os.remove(self.config_path)
self.runner = CliRunner()
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
result = self.runner.invoke(cli.init)
def tearDown(self):
if path.exists(self.config_path):
os.remove(self.config_path)
def test_config_not_exist(self):
with mock.patch('os.path.exists', side_effect=[False, True]):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
("Can't find config file at: {0}. You may use `tldr init` to "
"init the config file.").format(self.config_path)
)
def test_invalid_yaml_file(self):
with mock.patch('__builtin__.open',
mock.mock_open(read_data="%YAML:1.0\nname:jhon")):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"The config file is not a valid YAML file."
)
def test_unsupported_color_in_config(self):
mock_config = {
'colors': {
'command': 'indigo',
'description': 'orange',
'usage': 'green'
},
'platform': 'linux',
'repo_directory': '/tmp/tldr'
}
with mock.patch('yaml.safe_load', return_value=mock_config):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"Unsupported colors in config file: orange, indigo."
)
def test_repo_directory_not_exist(self):
with mock.patch('os.path.exists', side_effect=[True, False]):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"Can't find the tldr repo, check the `repo_direcotry` "
"setting in config file."
)
|
Add test for the config file#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
import click
from click.testing import CliRunner
import mock
import yaml
from tldr.config import get_config
from tldr import cli
class TestConfig(unittest.TestCase):
def setUp(self):
self.config_path = path.join(path.expanduser('~'), '.tldrrc')
if path.exists(self.config_path):
os.remove(self.config_path)
self.runner = CliRunner()
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
result = self.runner.invoke(cli.init)
def tearDown(self):
if path.exists(self.config_path):
os.remove(self.config_path)
def test_config_not_exist(self):
with mock.patch('os.path.exists', side_effect=[False, True]):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
("Can't find config file at: {0}. You may use `tldr init` to "
"init the config file.").format(self.config_path)
)
def test_invalid_yaml_file(self):
with mock.patch('__builtin__.open',
mock.mock_open(read_data="%YAML:1.0\nname:jhon")):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"The config file is not a valid YAML file."
)
def test_unsupported_color_in_config(self):
mock_config = {
'colors': {
'command': 'indigo',
'description': 'orange',
'usage': 'green'
},
'platform': 'linux',
'repo_directory': '/tmp/tldr'
}
with mock.patch('yaml.safe_load', return_value=mock_config):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"Unsupported colors in config file: orange, indigo."
)
def test_repo_directory_not_exist(self):
with mock.patch('os.path.exists', side_effect=[True, False]):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"Can't find the tldr repo, check the `repo_direcotry` "
"setting in config file."
)
|
<commit_before><commit_msg>Add test for the config file<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
import click
from click.testing import CliRunner
import mock
import yaml
from tldr.config import get_config
from tldr import cli
class TestConfig(unittest.TestCase):
def setUp(self):
self.config_path = path.join(path.expanduser('~'), '.tldrrc')
if path.exists(self.config_path):
os.remove(self.config_path)
self.runner = CliRunner()
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
result = self.runner.invoke(cli.init)
def tearDown(self):
if path.exists(self.config_path):
os.remove(self.config_path)
def test_config_not_exist(self):
with mock.patch('os.path.exists', side_effect=[False, True]):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
("Can't find config file at: {0}. You may use `tldr init` to "
"init the config file.").format(self.config_path)
)
def test_invalid_yaml_file(self):
with mock.patch('__builtin__.open',
mock.mock_open(read_data="%YAML:1.0\nname:jhon")):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"The config file is not a valid YAML file."
)
def test_unsupported_color_in_config(self):
mock_config = {
'colors': {
'command': 'indigo',
'description': 'orange',
'usage': 'green'
},
'platform': 'linux',
'repo_directory': '/tmp/tldr'
}
with mock.patch('yaml.safe_load', return_value=mock_config):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"Unsupported colors in config file: orange, indigo."
)
def test_repo_directory_not_exist(self):
with mock.patch('os.path.exists', side_effect=[True, False]):
with self.assertRaises(SystemExit) as error:
get_config()
assert error.exception.message == (
"Can't find the tldr repo, check the `repo_direcotry` "
"setting in config file."
)
|
|
4bb1c93f1e88b6472c1e3ec058a52156160a0aaf
|
dipy/reconst/tests/test_dsi.py
|
dipy/reconst/tests/test_dsi.py
|
import numpy as np
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
import nibabel as nib
from dipy.data import get_data, get_sphere
from dipy.reconst.recspeed import peak_finding
from dipy.reconst.gqi import GeneralizedQSampling
from dipy.reconst.dsi import DiffusionSpectrumImaging
def test_dandelion():
fimg,fbvals,fbvecs=get_data('small_101D')
bvals=np.loadtxt(fbvals)
gradients=np.loadtxt(fbvecs).T
data=nib.load(fimg).get_data()
"""
print(bvals.shape, gradients.shape, data.shape)
sd=SphericalDandelion(data,bvals,gradients)
sdf=sd.spherical_diffusivity(data[5,5,5])
XA=sd.xa()
np.set_printoptions(2)
print XA.min(),XA.max(),XA.mean()
print sdf*10**4
"""
|
TEST added test for dsi
|
TEST added test for dsi
|
Python
|
bsd-3-clause
|
rfdougherty/dipy,oesteban/dipy,nilgoyyou/dipy,samuelstjean/dipy,matthieudumont/dipy,sinkpoint/dipy,beni55/dipy,mdesco/dipy,oesteban/dipy,samuelstjean/dipy,rfdougherty/dipy,JohnGriffiths/dipy,jyeatman/dipy,maurozucchelli/dipy,StongeEtienne/dipy,Messaoud-Boudjada/dipy,villalonreina/dipy,samuelstjean/dipy,demianw/dipy,nilgoyyou/dipy,maurozucchelli/dipy,FrancoisRheaultUS/dipy,JohnGriffiths/dipy,StongeEtienne/dipy,sinkpoint/dipy,mdesco/dipy,Messaoud-Boudjada/dipy,maurozucchelli/dipy,villalonreina/dipy,jyeatman/dipy,FrancoisRheaultUS/dipy,beni55/dipy,demianw/dipy,matthieudumont/dipy
|
TEST added test for dsi
|
import numpy as np
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
import nibabel as nib
from dipy.data import get_data, get_sphere
from dipy.reconst.recspeed import peak_finding
from dipy.reconst.gqi import GeneralizedQSampling
from dipy.reconst.dsi import DiffusionSpectrumImaging
def test_dandelion():
fimg,fbvals,fbvecs=get_data('small_101D')
bvals=np.loadtxt(fbvals)
gradients=np.loadtxt(fbvecs).T
data=nib.load(fimg).get_data()
"""
print(bvals.shape, gradients.shape, data.shape)
sd=SphericalDandelion(data,bvals,gradients)
sdf=sd.spherical_diffusivity(data[5,5,5])
XA=sd.xa()
np.set_printoptions(2)
print XA.min(),XA.max(),XA.mean()
print sdf*10**4
"""
|
<commit_before><commit_msg>TEST added test for dsi<commit_after>
|
import numpy as np
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
import nibabel as nib
from dipy.data import get_data, get_sphere
from dipy.reconst.recspeed import peak_finding
from dipy.reconst.gqi import GeneralizedQSampling
from dipy.reconst.dsi import DiffusionSpectrumImaging
def test_dandelion():
fimg,fbvals,fbvecs=get_data('small_101D')
bvals=np.loadtxt(fbvals)
gradients=np.loadtxt(fbvecs).T
data=nib.load(fimg).get_data()
"""
print(bvals.shape, gradients.shape, data.shape)
sd=SphericalDandelion(data,bvals,gradients)
sdf=sd.spherical_diffusivity(data[5,5,5])
XA=sd.xa()
np.set_printoptions(2)
print XA.min(),XA.max(),XA.mean()
print sdf*10**4
"""
|
TEST added test for dsiimport numpy as np
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
import nibabel as nib
from dipy.data import get_data, get_sphere
from dipy.reconst.recspeed import peak_finding
from dipy.reconst.gqi import GeneralizedQSampling
from dipy.reconst.dsi import DiffusionSpectrumImaging
def test_dandelion():
fimg,fbvals,fbvecs=get_data('small_101D')
bvals=np.loadtxt(fbvals)
gradients=np.loadtxt(fbvecs).T
data=nib.load(fimg).get_data()
"""
print(bvals.shape, gradients.shape, data.shape)
sd=SphericalDandelion(data,bvals,gradients)
sdf=sd.spherical_diffusivity(data[5,5,5])
XA=sd.xa()
np.set_printoptions(2)
print XA.min(),XA.max(),XA.mean()
print sdf*10**4
"""
|
<commit_before><commit_msg>TEST added test for dsi<commit_after>import numpy as np
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
import nibabel as nib
from dipy.data import get_data, get_sphere
from dipy.reconst.recspeed import peak_finding
from dipy.reconst.gqi import GeneralizedQSampling
from dipy.reconst.dsi import DiffusionSpectrumImaging
def test_dandelion():
fimg,fbvals,fbvecs=get_data('small_101D')
bvals=np.loadtxt(fbvals)
gradients=np.loadtxt(fbvecs).T
data=nib.load(fimg).get_data()
"""
print(bvals.shape, gradients.shape, data.shape)
sd=SphericalDandelion(data,bvals,gradients)
sdf=sd.spherical_diffusivity(data[5,5,5])
XA=sd.xa()
np.set_printoptions(2)
print XA.min(),XA.max(),XA.mean()
print sdf*10**4
"""
|
|
dc6b2014b7b8cefeb1314a09184cfc2755501735
|
CodeFights/deleteDigit.py
|
CodeFights/deleteDigit.py
|
#!/usr/local/bin/python
# Code Fights Delete Digit Problem
def deleteDigit(n):
s = str(n)
return max(int(''.join(s[:i] + s[i + 1:])) for i in range(len(s)))
# idxs = [i for i in range(len(s) - 1) if int(s[i]) < int(s[i + 1])]
# if idxs:
# return int(s[:idxs[0]] + s[idxs[0] + 1:])
# else:
# return int(s[:-1])
def main():
tests = [
[152, 52],
[1001, 101],
[10, 1],
[222219, 22229]
]
for t in tests:
res = deleteDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: deleteDigit({}) returned {}"
.format(t[0], res))
else:
print("FAILED: deleteDigit({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights delete digit problem
|
Solve Code Fights delete digit problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights delete digit problem
|
#!/usr/local/bin/python
# Code Fights Delete Digit Problem
def deleteDigit(n):
s = str(n)
return max(int(''.join(s[:i] + s[i + 1:])) for i in range(len(s)))
# idxs = [i for i in range(len(s) - 1) if int(s[i]) < int(s[i + 1])]
# if idxs:
# return int(s[:idxs[0]] + s[idxs[0] + 1:])
# else:
# return int(s[:-1])
def main():
tests = [
[152, 52],
[1001, 101],
[10, 1],
[222219, 22229]
]
for t in tests:
res = deleteDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: deleteDigit({}) returned {}"
.format(t[0], res))
else:
print("FAILED: deleteDigit({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights delete digit problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Delete Digit Problem
def deleteDigit(n):
s = str(n)
return max(int(''.join(s[:i] + s[i + 1:])) for i in range(len(s)))
# idxs = [i for i in range(len(s) - 1) if int(s[i]) < int(s[i + 1])]
# if idxs:
# return int(s[:idxs[0]] + s[idxs[0] + 1:])
# else:
# return int(s[:-1])
def main():
tests = [
[152, 52],
[1001, 101],
[10, 1],
[222219, 22229]
]
for t in tests:
res = deleteDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: deleteDigit({}) returned {}"
.format(t[0], res))
else:
print("FAILED: deleteDigit({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights delete digit problem#!/usr/local/bin/python
# Code Fights Delete Digit Problem
def deleteDigit(n):
s = str(n)
return max(int(''.join(s[:i] + s[i + 1:])) for i in range(len(s)))
# idxs = [i for i in range(len(s) - 1) if int(s[i]) < int(s[i + 1])]
# if idxs:
# return int(s[:idxs[0]] + s[idxs[0] + 1:])
# else:
# return int(s[:-1])
def main():
tests = [
[152, 52],
[1001, 101],
[10, 1],
[222219, 22229]
]
for t in tests:
res = deleteDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: deleteDigit({}) returned {}"
.format(t[0], res))
else:
print("FAILED: deleteDigit({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights delete digit problem<commit_after>#!/usr/local/bin/python
# Code Fights Delete Digit Problem
def deleteDigit(n):
s = str(n)
return max(int(''.join(s[:i] + s[i + 1:])) for i in range(len(s)))
# idxs = [i for i in range(len(s) - 1) if int(s[i]) < int(s[i + 1])]
# if idxs:
# return int(s[:idxs[0]] + s[idxs[0] + 1:])
# else:
# return int(s[:-1])
def main():
tests = [
[152, 52],
[1001, 101],
[10, 1],
[222219, 22229]
]
for t in tests:
res = deleteDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: deleteDigit({}) returned {}"
.format(t[0], res))
else:
print("FAILED: deleteDigit({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
cb7515358fcc9ee2daf030c5eb0ffa68db0d95d3
|
test/test_nap_inherit.py
|
test/test_nap_inherit.py
|
"""
Tests for nap module.
These tests only focus that requests is called properly.
Everything related to HTTP requests should be tested in requests' own tests.
"""
from mock import MagicMock, patch
import unittest
import requests
from nap.api import Api
class NewApi(Api):
def before_request(self, method, request_kwargs):
request_kwargs['test'] = 'test'
return request_kwargs
def after_request(self, response):
return None
def default_kwargs(self):
# Remove all default kwargs
return {}
class TestNapInherit(unittest.TestCase):
@patch('requests.request')
def test_before_request_and_after_request(self, r_request):
"""Test overriding before_request() and after_request()"""
api = NewApi('')
r_request = MagicMock(return_value=1)
# Make sure defaults are passed for each request
response = api.resource.get()
requests.request.assert_called_with(
'GET',
'/resource',
test='test'
)
# Mocker will return 1 to after_request and we have modified it
# to return just None
self.assertEquals(
response,
None,
'after_request overriding not working'
)
@patch('requests.request')
def test_default_kwargs(self, r_request):
"""Test overriding default_kwargs()"""
# We give default arguments, but the overriding implementation
# of default_kwargs() method should throw them away
api = NewApi('', auth=('user', 'pass'))
r_request = MagicMock(return_value=1)
# Make sure defaults were removed from kwargs
api.resource.get()
requests.request.assert_called_with(
'GET',
'/resource',
# This will still be in kwargs because manipulation
# of default_kwargs and request_kwargs are separated
test='test'
)
|
Add tests for inheriting Nap class
|
Add tests for inheriting Nap class
|
Python
|
mit
|
kimmobrunfeldt/nap
|
Add tests for inheriting Nap class
|
"""
Tests for nap module.
These tests only focus that requests is called properly.
Everything related to HTTP requests should be tested in requests' own tests.
"""
from mock import MagicMock, patch
import unittest
import requests
from nap.api import Api
class NewApi(Api):
def before_request(self, method, request_kwargs):
request_kwargs['test'] = 'test'
return request_kwargs
def after_request(self, response):
return None
def default_kwargs(self):
# Remove all default kwargs
return {}
class TestNapInherit(unittest.TestCase):
@patch('requests.request')
def test_before_request_and_after_request(self, r_request):
"""Test overriding before_request() and after_request()"""
api = NewApi('')
r_request = MagicMock(return_value=1)
# Make sure defaults are passed for each request
response = api.resource.get()
requests.request.assert_called_with(
'GET',
'/resource',
test='test'
)
# Mocker will return 1 to after_request and we have modified it
# to return just None
self.assertEquals(
response,
None,
'after_request overriding not working'
)
@patch('requests.request')
def test_default_kwargs(self, r_request):
"""Test overriding default_kwargs()"""
# We give default arguments, but the overriding implementation
# of default_kwargs() method should throw them away
api = NewApi('', auth=('user', 'pass'))
r_request = MagicMock(return_value=1)
# Make sure defaults were removed from kwargs
api.resource.get()
requests.request.assert_called_with(
'GET',
'/resource',
# This will still be in kwargs because manipulation
# of default_kwargs and request_kwargs are separated
test='test'
)
|
<commit_before><commit_msg>Add tests for inheriting Nap class<commit_after>
|
"""
Tests for nap module.
These tests only focus that requests is called properly.
Everything related to HTTP requests should be tested in requests' own tests.
"""
from mock import MagicMock, patch
import unittest
import requests
from nap.api import Api
class NewApi(Api):
def before_request(self, method, request_kwargs):
request_kwargs['test'] = 'test'
return request_kwargs
def after_request(self, response):
return None
def default_kwargs(self):
# Remove all default kwargs
return {}
class TestNapInherit(unittest.TestCase):
@patch('requests.request')
def test_before_request_and_after_request(self, r_request):
"""Test overriding before_request() and after_request()"""
api = NewApi('')
r_request = MagicMock(return_value=1)
# Make sure defaults are passed for each request
response = api.resource.get()
requests.request.assert_called_with(
'GET',
'/resource',
test='test'
)
# Mocker will return 1 to after_request and we have modified it
# to return just None
self.assertEquals(
response,
None,
'after_request overriding not working'
)
@patch('requests.request')
def test_default_kwargs(self, r_request):
"""Test overriding default_kwargs()"""
# We give default arguments, but the overriding implementation
# of default_kwargs() method should throw them away
api = NewApi('', auth=('user', 'pass'))
r_request = MagicMock(return_value=1)
# Make sure defaults were removed from kwargs
api.resource.get()
requests.request.assert_called_with(
'GET',
'/resource',
# This will still be in kwargs because manipulation
# of default_kwargs and request_kwargs are separated
test='test'
)
|
Add tests for inheriting Nap class"""
Tests for nap module.
These tests only focus that requests is called properly.
Everything related to HTTP requests should be tested in requests' own tests.
"""
from mock import MagicMock, patch
import unittest
import requests
from nap.api import Api
class NewApi(Api):
def before_request(self, method, request_kwargs):
request_kwargs['test'] = 'test'
return request_kwargs
def after_request(self, response):
return None
def default_kwargs(self):
# Remove all default kwargs
return {}
class TestNapInherit(unittest.TestCase):
@patch('requests.request')
def test_before_request_and_after_request(self, r_request):
"""Test overriding before_request() and after_request()"""
api = NewApi('')
r_request = MagicMock(return_value=1)
# Make sure defaults are passed for each request
response = api.resource.get()
requests.request.assert_called_with(
'GET',
'/resource',
test='test'
)
# Mocker will return 1 to after_request and we have modified it
# to return just None
self.assertEquals(
response,
None,
'after_request overriding not working'
)
@patch('requests.request')
def test_default_kwargs(self, r_request):
"""Test overriding default_kwargs()"""
# We give default arguments, but the overriding implementation
# of default_kwargs() method should throw them away
api = NewApi('', auth=('user', 'pass'))
r_request = MagicMock(return_value=1)
# Make sure defaults were removed from kwargs
api.resource.get()
requests.request.assert_called_with(
'GET',
'/resource',
# This will still be in kwargs because manipulation
# of default_kwargs and request_kwargs are separated
test='test'
)
|
<commit_before><commit_msg>Add tests for inheriting Nap class<commit_after>"""
Tests for nap module.
These tests only focus that requests is called properly.
Everything related to HTTP requests should be tested in requests' own tests.
"""
from mock import MagicMock, patch
import unittest
import requests
from nap.api import Api
class NewApi(Api):
def before_request(self, method, request_kwargs):
request_kwargs['test'] = 'test'
return request_kwargs
def after_request(self, response):
return None
def default_kwargs(self):
# Remove all default kwargs
return {}
class TestNapInherit(unittest.TestCase):
@patch('requests.request')
def test_before_request_and_after_request(self, r_request):
"""Test overriding before_request() and after_request()"""
api = NewApi('')
r_request = MagicMock(return_value=1)
# Make sure defaults are passed for each request
response = api.resource.get()
requests.request.assert_called_with(
'GET',
'/resource',
test='test'
)
# Mocker will return 1 to after_request and we have modified it
# to return just None
self.assertEquals(
response,
None,
'after_request overriding not working'
)
@patch('requests.request')
def test_default_kwargs(self, r_request):
"""Test overriding default_kwargs()"""
# We give default arguments, but the overriding implementation
# of default_kwargs() method should throw them away
api = NewApi('', auth=('user', 'pass'))
r_request = MagicMock(return_value=1)
# Make sure defaults were removed from kwargs
api.resource.get()
requests.request.assert_called_with(
'GET',
'/resource',
# This will still be in kwargs because manipulation
# of default_kwargs and request_kwargs are separated
test='test'
)
|
|
984dd9d20814e3190ee197b47c756f2b8f4ecb52
|
django_prometheus/testutils.py
|
django_prometheus/testutils.py
|
from prometheus_client import REGISTRY
METRIC_EQUALS_ERR_EXPLANATION = """
%s%s = %s, expected %s.
The values for %s are:
%s"""
class PrometheusTestCaseMixin(object):
"""A collection of utilities that make it easier to write test cases
that interact with metrics.
"""
def setUp(self):
self.clearRegistry()
def clearRegistry(self):
"""Resets the values of all collectors in the global registry.
This is so we can test the value of exported metrics in unit
tests.
This is quite a hack since it relies on the internal
representation of the prometheus_client, and it should
probably be provided as a function there instead.
"""
with REGISTRY._lock:
for c in REGISTRY._collectors:
if hasattr(c, '_metrics'):
c._metrics = {}
if hasattr(c, '_value'):
c._value = 0.0
if hasattr(c, '_count'):
c._count = 0.0
if hasattr(c, '_sum'):
c._sum = 0.0
if hasattr(c, '_buckets'):
c._buckets = [0.0] * len(c._buckets)
def getMetric(self, metric_name, **labels):
return REGISTRY.get_sample_value(metric_name, labels=labels)
def getMetricVector(self, metric_name):
"""Returns the values for all labels of a given metric.
The result is returned as a list of (labels, value) tuples,
where `labels` is a dict.
This is quite a hack since it relies on the internal
representation of the prometheus_client, and it should
probably be provided as a function there instead.
"""
all_metrics = REGISTRY.collect()
output = []
for metric in all_metrics:
for n, l, value in metric._samples:
if n == metric_name:
output.append((l, value))
return output
def formatLabels(self, labels):
"""Format a set of labels to Prometheus representation.
In:
{'method': 'GET', 'port': '80'}
Out:
'{method="GET",port="80"}'
"""
return '{%s}' % ','.join([
'%s="%s"' % (k, v) for k, v in labels.items()])
def formatVector(self, vector):
"""Formats a list of (labels, value) where labels is a dict into a
human-readable representation.
"""
return '\n'.join([
'%s = %s' % (self.formatLabels(labels), value)
for labels, value in vector])
def assertMetricEquals(self, expected_value, metric_name, **labels):
"""Asserts that metric_name{**labels} == expected_value."""
value = self.getMetric(metric_name, **labels)
self.assertEqual(
expected_value, value, METRIC_EQUALS_ERR_EXPLANATION % (
metric_name, self.formatLabels(labels), value,
expected_value, metric_name,
self.formatVector(self.getMetricVector(metric_name))))
|
Add a mixin to test exported metrics.
|
Add a mixin to test exported metrics.
Most of this mixin should be moved to prometheus_client eventually,
since it relies heavily on its internals.
|
Python
|
apache-2.0
|
wangwanzhong/django-prometheus,obytes/django-prometheus,obytes/django-prometheus,wangwanzhong/django-prometheus,korfuri/django-prometheus,DingaGa/django-prometheus,DingaGa/django-prometheus,korfuri/django-prometheus
|
Add a mixin to test exported metrics.
Most of this mixin should be moved to prometheus_client eventually,
since it relies heavily on its internals.
|
from prometheus_client import REGISTRY
METRIC_EQUALS_ERR_EXPLANATION = """
%s%s = %s, expected %s.
The values for %s are:
%s"""
class PrometheusTestCaseMixin(object):
"""A collection of utilities that make it easier to write test cases
that interact with metrics.
"""
def setUp(self):
self.clearRegistry()
def clearRegistry(self):
"""Resets the values of all collectors in the global registry.
This is so we can test the value of exported metrics in unit
tests.
This is quite a hack since it relies on the internal
representation of the prometheus_client, and it should
probably be provided as a function there instead.
"""
with REGISTRY._lock:
for c in REGISTRY._collectors:
if hasattr(c, '_metrics'):
c._metrics = {}
if hasattr(c, '_value'):
c._value = 0.0
if hasattr(c, '_count'):
c._count = 0.0
if hasattr(c, '_sum'):
c._sum = 0.0
if hasattr(c, '_buckets'):
c._buckets = [0.0] * len(c._buckets)
def getMetric(self, metric_name, **labels):
return REGISTRY.get_sample_value(metric_name, labels=labels)
def getMetricVector(self, metric_name):
"""Returns the values for all labels of a given metric.
The result is returned as a list of (labels, value) tuples,
where `labels` is a dict.
This is quite a hack since it relies on the internal
representation of the prometheus_client, and it should
probably be provided as a function there instead.
"""
all_metrics = REGISTRY.collect()
output = []
for metric in all_metrics:
for n, l, value in metric._samples:
if n == metric_name:
output.append((l, value))
return output
def formatLabels(self, labels):
"""Format a set of labels to Prometheus representation.
In:
{'method': 'GET', 'port': '80'}
Out:
'{method="GET",port="80"}'
"""
return '{%s}' % ','.join([
'%s="%s"' % (k, v) for k, v in labels.items()])
def formatVector(self, vector):
"""Formats a list of (labels, value) where labels is a dict into a
human-readable representation.
"""
return '\n'.join([
'%s = %s' % (self.formatLabels(labels), value)
for labels, value in vector])
def assertMetricEquals(self, expected_value, metric_name, **labels):
"""Asserts that metric_name{**labels} == expected_value."""
value = self.getMetric(metric_name, **labels)
self.assertEqual(
expected_value, value, METRIC_EQUALS_ERR_EXPLANATION % (
metric_name, self.formatLabels(labels), value,
expected_value, metric_name,
self.formatVector(self.getMetricVector(metric_name))))
|
<commit_before><commit_msg>Add a mixin to test exported metrics.
Most of this mixin should be moved to prometheus_client eventually,
since it relies heavily on its internals.<commit_after>
|
from prometheus_client import REGISTRY
METRIC_EQUALS_ERR_EXPLANATION = """
%s%s = %s, expected %s.
The values for %s are:
%s"""
class PrometheusTestCaseMixin(object):
"""A collection of utilities that make it easier to write test cases
that interact with metrics.
"""
def setUp(self):
self.clearRegistry()
def clearRegistry(self):
"""Resets the values of all collectors in the global registry.
This is so we can test the value of exported metrics in unit
tests.
This is quite a hack since it relies on the internal
representation of the prometheus_client, and it should
probably be provided as a function there instead.
"""
with REGISTRY._lock:
for c in REGISTRY._collectors:
if hasattr(c, '_metrics'):
c._metrics = {}
if hasattr(c, '_value'):
c._value = 0.0
if hasattr(c, '_count'):
c._count = 0.0
if hasattr(c, '_sum'):
c._sum = 0.0
if hasattr(c, '_buckets'):
c._buckets = [0.0] * len(c._buckets)
def getMetric(self, metric_name, **labels):
return REGISTRY.get_sample_value(metric_name, labels=labels)
def getMetricVector(self, metric_name):
"""Returns the values for all labels of a given metric.
The result is returned as a list of (labels, value) tuples,
where `labels` is a dict.
This is quite a hack since it relies on the internal
representation of the prometheus_client, and it should
probably be provided as a function there instead.
"""
all_metrics = REGISTRY.collect()
output = []
for metric in all_metrics:
for n, l, value in metric._samples:
if n == metric_name:
output.append((l, value))
return output
def formatLabels(self, labels):
"""Format a set of labels to Prometheus representation.
In:
{'method': 'GET', 'port': '80'}
Out:
'{method="GET",port="80"}'
"""
return '{%s}' % ','.join([
'%s="%s"' % (k, v) for k, v in labels.items()])
def formatVector(self, vector):
"""Formats a list of (labels, value) where labels is a dict into a
human-readable representation.
"""
return '\n'.join([
'%s = %s' % (self.formatLabels(labels), value)
for labels, value in vector])
def assertMetricEquals(self, expected_value, metric_name, **labels):
"""Asserts that metric_name{**labels} == expected_value."""
value = self.getMetric(metric_name, **labels)
self.assertEqual(
expected_value, value, METRIC_EQUALS_ERR_EXPLANATION % (
metric_name, self.formatLabels(labels), value,
expected_value, metric_name,
self.formatVector(self.getMetricVector(metric_name))))
|
Add a mixin to test exported metrics.
Most of this mixin should be moved to prometheus_client eventually,
since it relies heavily on its internals.from prometheus_client import REGISTRY
METRIC_EQUALS_ERR_EXPLANATION = """
%s%s = %s, expected %s.
The values for %s are:
%s"""
class PrometheusTestCaseMixin(object):
"""A collection of utilities that make it easier to write test cases
that interact with metrics.
"""
def setUp(self):
self.clearRegistry()
def clearRegistry(self):
"""Resets the values of all collectors in the global registry.
This is so we can test the value of exported metrics in unit
tests.
This is quite a hack since it relies on the internal
representation of the prometheus_client, and it should
probably be provided as a function there instead.
"""
with REGISTRY._lock:
for c in REGISTRY._collectors:
if hasattr(c, '_metrics'):
c._metrics = {}
if hasattr(c, '_value'):
c._value = 0.0
if hasattr(c, '_count'):
c._count = 0.0
if hasattr(c, '_sum'):
c._sum = 0.0
if hasattr(c, '_buckets'):
c._buckets = [0.0] * len(c._buckets)
def getMetric(self, metric_name, **labels):
return REGISTRY.get_sample_value(metric_name, labels=labels)
def getMetricVector(self, metric_name):
"""Returns the values for all labels of a given metric.
The result is returned as a list of (labels, value) tuples,
where `labels` is a dict.
This is quite a hack since it relies on the internal
representation of the prometheus_client, and it should
probably be provided as a function there instead.
"""
all_metrics = REGISTRY.collect()
output = []
for metric in all_metrics:
for n, l, value in metric._samples:
if n == metric_name:
output.append((l, value))
return output
def formatLabels(self, labels):
"""Format a set of labels to Prometheus representation.
In:
{'method': 'GET', 'port': '80'}
Out:
'{method="GET",port="80"}'
"""
return '{%s}' % ','.join([
'%s="%s"' % (k, v) for k, v in labels.items()])
def formatVector(self, vector):
"""Formats a list of (labels, value) where labels is a dict into a
human-readable representation.
"""
return '\n'.join([
'%s = %s' % (self.formatLabels(labels), value)
for labels, value in vector])
def assertMetricEquals(self, expected_value, metric_name, **labels):
"""Asserts that metric_name{**labels} == expected_value."""
value = self.getMetric(metric_name, **labels)
self.assertEqual(
expected_value, value, METRIC_EQUALS_ERR_EXPLANATION % (
metric_name, self.formatLabels(labels), value,
expected_value, metric_name,
self.formatVector(self.getMetricVector(metric_name))))
|
<commit_before><commit_msg>Add a mixin to test exported metrics.
Most of this mixin should be moved to prometheus_client eventually,
since it relies heavily on its internals.<commit_after>from prometheus_client import REGISTRY
METRIC_EQUALS_ERR_EXPLANATION = """
%s%s = %s, expected %s.
The values for %s are:
%s"""
class PrometheusTestCaseMixin(object):
"""A collection of utilities that make it easier to write test cases
that interact with metrics.
"""
def setUp(self):
self.clearRegistry()
def clearRegistry(self):
"""Resets the values of all collectors in the global registry.
This is so we can test the value of exported metrics in unit
tests.
This is quite a hack since it relies on the internal
representation of the prometheus_client, and it should
probably be provided as a function there instead.
"""
with REGISTRY._lock:
for c in REGISTRY._collectors:
if hasattr(c, '_metrics'):
c._metrics = {}
if hasattr(c, '_value'):
c._value = 0.0
if hasattr(c, '_count'):
c._count = 0.0
if hasattr(c, '_sum'):
c._sum = 0.0
if hasattr(c, '_buckets'):
c._buckets = [0.0] * len(c._buckets)
def getMetric(self, metric_name, **labels):
return REGISTRY.get_sample_value(metric_name, labels=labels)
def getMetricVector(self, metric_name):
"""Returns the values for all labels of a given metric.
The result is returned as a list of (labels, value) tuples,
where `labels` is a dict.
This is quite a hack since it relies on the internal
representation of the prometheus_client, and it should
probably be provided as a function there instead.
"""
all_metrics = REGISTRY.collect()
output = []
for metric in all_metrics:
for n, l, value in metric._samples:
if n == metric_name:
output.append((l, value))
return output
def formatLabels(self, labels):
"""Format a set of labels to Prometheus representation.
In:
{'method': 'GET', 'port': '80'}
Out:
'{method="GET",port="80"}'
"""
return '{%s}' % ','.join([
'%s="%s"' % (k, v) for k, v in labels.items()])
def formatVector(self, vector):
"""Formats a list of (labels, value) where labels is a dict into a
human-readable representation.
"""
return '\n'.join([
'%s = %s' % (self.formatLabels(labels), value)
for labels, value in vector])
def assertMetricEquals(self, expected_value, metric_name, **labels):
"""Asserts that metric_name{**labels} == expected_value."""
value = self.getMetric(metric_name, **labels)
self.assertEqual(
expected_value, value, METRIC_EQUALS_ERR_EXPLANATION % (
metric_name, self.formatLabels(labels), value,
expected_value, metric_name,
self.formatVector(self.getMetricVector(metric_name))))
|
|
bc97749944eeb5b5b1366f4da6f2f5c04eaba434
|
deepLearningWithNeuralNetworks/regularDeepLearningWithNeuralNetworks.py
|
deepLearningWithNeuralNetworks/regularDeepLearningWithNeuralNetworks.py
|
# -*- coding: utf-8 -*-
"""Deep Learning with Neural Networks and TensorFlow.
Deep learning is part of a broader family of machine learning methods based on
learning data representations, as opposed to task-specific algorithms. Learning
can be supervised, partially supervised or unsupervised.
A deep neural network (DNN) is an artificial neural network with multiple
hidden layers between the input and output layers.
We will use the library Tensorflow to do number crunching. A package like
TensorFlow allows us to perform specific machine learning number-crunching
operations tensors with large efficiency. We can also easily distribute this
processing across our CPU cores, GPU cores, or even multiple devices like
multiple GPUs. But that's not all! We can even distribute computations across
a distributed network of computers with TensorFlow.
Example:
$ python regularDeepLearningWithNeuralNetworks.py
Todo:
*
"""
|
Add Deep Learning with Neural Networks and TensorFlow
|
Add Deep Learning with Neural Networks and TensorFlow
|
Python
|
mit
|
a-holm/MachinelearningAlgorithms,a-holm/MachinelearningAlgorithms
|
Add Deep Learning with Neural Networks and TensorFlow
|
# -*- coding: utf-8 -*-
"""Deep Learning with Neural Networks and TensorFlow.
Deep learning is part of a broader family of machine learning methods based on
learning data representations, as opposed to task-specific algorithms. Learning
can be supervised, partially supervised or unsupervised.
A deep neural network (DNN) is an artificial neural network with multiple
hidden layers between the input and output layers.
We will use the library Tensorflow to do number crunching. A package like
TensorFlow allows us to perform specific machine learning number-crunching
operations tensors with large efficiency. We can also easily distribute this
processing across our CPU cores, GPU cores, or even multiple devices like
multiple GPUs. But that's not all! We can even distribute computations across
a distributed network of computers with TensorFlow.
Example:
$ python regularDeepLearningWithNeuralNetworks.py
Todo:
*
"""
|
<commit_before><commit_msg>Add Deep Learning with Neural Networks and TensorFlow<commit_after>
|
# -*- coding: utf-8 -*-
"""Deep Learning with Neural Networks and TensorFlow.
Deep learning is part of a broader family of machine learning methods based on
learning data representations, as opposed to task-specific algorithms. Learning
can be supervised, partially supervised or unsupervised.
A deep neural network (DNN) is an artificial neural network with multiple
hidden layers between the input and output layers.
We will use the library Tensorflow to do number crunching. A package like
TensorFlow allows us to perform specific machine learning number-crunching
operations tensors with large efficiency. We can also easily distribute this
processing across our CPU cores, GPU cores, or even multiple devices like
multiple GPUs. But that's not all! We can even distribute computations across
a distributed network of computers with TensorFlow.
Example:
$ python regularDeepLearningWithNeuralNetworks.py
Todo:
*
"""
|
Add Deep Learning with Neural Networks and TensorFlow# -*- coding: utf-8 -*-
"""Deep Learning with Neural Networks and TensorFlow.
Deep learning is part of a broader family of machine learning methods based on
learning data representations, as opposed to task-specific algorithms. Learning
can be supervised, partially supervised or unsupervised.
A deep neural network (DNN) is an artificial neural network with multiple
hidden layers between the input and output layers.
We will use the library Tensorflow to do number crunching. A package like
TensorFlow allows us to perform specific machine learning number-crunching
operations tensors with large efficiency. We can also easily distribute this
processing across our CPU cores, GPU cores, or even multiple devices like
multiple GPUs. But that's not all! We can even distribute computations across
a distributed network of computers with TensorFlow.
Example:
$ python regularDeepLearningWithNeuralNetworks.py
Todo:
*
"""
|
<commit_before><commit_msg>Add Deep Learning with Neural Networks and TensorFlow<commit_after># -*- coding: utf-8 -*-
"""Deep Learning with Neural Networks and TensorFlow.
Deep learning is part of a broader family of machine learning methods based on
learning data representations, as opposed to task-specific algorithms. Learning
can be supervised, partially supervised or unsupervised.
A deep neural network (DNN) is an artificial neural network with multiple
hidden layers between the input and output layers.
We will use the library Tensorflow to do number crunching. A package like
TensorFlow allows us to perform specific machine learning number-crunching
operations tensors with large efficiency. We can also easily distribute this
processing across our CPU cores, GPU cores, or even multiple devices like
multiple GPUs. But that's not all! We can even distribute computations across
a distributed network of computers with TensorFlow.
Example:
$ python regularDeepLearningWithNeuralNetworks.py
Todo:
*
"""
|
|
d8bcdced24e9787711cbf5787011d88a086d4956
|
seleniumbase/console_scripts/logo_helper.py
|
seleniumbase/console_scripts/logo_helper.py
|
""" SeleniumBase Logo Processing (for the console scripts interface)
Logo generated from:
http://www.patorjk.com/software/taag/#p=display&f=Slant&t=SeleniumBase """
import colorama
r'''
______ __ _ ____
/ ____/__ / /__ ____ (_)_ ______ ___ / __ `____ ________
\__ \/ _ \/ / _ \/ __ \/ / / / / __ `__ \/ /_/ / __ `/ ___/ _ \
___/ / __/ / __/ / / / / /_/ / / / / / / /_) / /_/ (__ ) __/
/____/\___/_/\___/_/ /_/_/\__,_/_/ /_/ /_/_____/\__,_/____/\___/
'''
def get_seleniumbase_logo():
colorama.init(autoreset=True)
c1 = colorama.Fore.BLUE + colorama.Back.CYAN
c2 = colorama.Fore.CYAN + colorama.Back.BLUE
cr = colorama.Style.RESET_ALL
sb = c1
sb += "\n"
sb += " ______ __ _ "
sb += c2
sb += "____ "
sb += c1
sb += "\n"
sb += c1
sb += " / ____/__ / /__ ____ (_)_ ______ ___ "
sb += c2
sb += "/ __ `____ ________ "
sb += c1
sb += "\n"
sb += c1
sb += " \\__ \\/ _ \\/ / _ \\/ __ \\/ / / / / __ `__ \\"
sb += c2
sb += "/ /_/ / __ `/ ___/ _ \\"
sb += c1
sb += "\n"
sb += c1
sb += " ___/ / __/ / __/ / / / / /_/ / / / / / "
sb += c2
sb += "/ /_) / /_/ (__ ) __/"
sb += c1
sb += "\n"
sb += c1
sb += "/____/\\___/_/\\___/_/ /_/_/\\__,_/_/ /_/ /_"
sb += c2
sb += "/_____/\\__,_/____/\\___/ "
sb += c1
sb += "\n"
sb += c1
sb += " "
sb += c2
sb += " "
sb += c1
sb += cr
return sb
|
Create a SeleniumBase logo for console interfaces
|
Create a SeleniumBase logo for console interfaces
|
Python
|
mit
|
mdmintz/seleniumspot,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/seleniumspot,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase
|
Create a SeleniumBase logo for console interfaces
|
""" SeleniumBase Logo Processing (for the console scripts interface)
Logo generated from:
http://www.patorjk.com/software/taag/#p=display&f=Slant&t=SeleniumBase """
import colorama
r'''
______ __ _ ____
/ ____/__ / /__ ____ (_)_ ______ ___ / __ `____ ________
\__ \/ _ \/ / _ \/ __ \/ / / / / __ `__ \/ /_/ / __ `/ ___/ _ \
___/ / __/ / __/ / / / / /_/ / / / / / / /_) / /_/ (__ ) __/
/____/\___/_/\___/_/ /_/_/\__,_/_/ /_/ /_/_____/\__,_/____/\___/
'''
def get_seleniumbase_logo():
colorama.init(autoreset=True)
c1 = colorama.Fore.BLUE + colorama.Back.CYAN
c2 = colorama.Fore.CYAN + colorama.Back.BLUE
cr = colorama.Style.RESET_ALL
sb = c1
sb += "\n"
sb += " ______ __ _ "
sb += c2
sb += "____ "
sb += c1
sb += "\n"
sb += c1
sb += " / ____/__ / /__ ____ (_)_ ______ ___ "
sb += c2
sb += "/ __ `____ ________ "
sb += c1
sb += "\n"
sb += c1
sb += " \\__ \\/ _ \\/ / _ \\/ __ \\/ / / / / __ `__ \\"
sb += c2
sb += "/ /_/ / __ `/ ___/ _ \\"
sb += c1
sb += "\n"
sb += c1
sb += " ___/ / __/ / __/ / / / / /_/ / / / / / "
sb += c2
sb += "/ /_) / /_/ (__ ) __/"
sb += c1
sb += "\n"
sb += c1
sb += "/____/\\___/_/\\___/_/ /_/_/\\__,_/_/ /_/ /_"
sb += c2
sb += "/_____/\\__,_/____/\\___/ "
sb += c1
sb += "\n"
sb += c1
sb += " "
sb += c2
sb += " "
sb += c1
sb += cr
return sb
|
<commit_before><commit_msg>Create a SeleniumBase logo for console interfaces<commit_after>
|
""" SeleniumBase Logo Processing (for the console scripts interface)
Logo generated from:
http://www.patorjk.com/software/taag/#p=display&f=Slant&t=SeleniumBase """
import colorama
r'''
______ __ _ ____
/ ____/__ / /__ ____ (_)_ ______ ___ / __ `____ ________
\__ \/ _ \/ / _ \/ __ \/ / / / / __ `__ \/ /_/ / __ `/ ___/ _ \
___/ / __/ / __/ / / / / /_/ / / / / / / /_) / /_/ (__ ) __/
/____/\___/_/\___/_/ /_/_/\__,_/_/ /_/ /_/_____/\__,_/____/\___/
'''
def get_seleniumbase_logo():
colorama.init(autoreset=True)
c1 = colorama.Fore.BLUE + colorama.Back.CYAN
c2 = colorama.Fore.CYAN + colorama.Back.BLUE
cr = colorama.Style.RESET_ALL
sb = c1
sb += "\n"
sb += " ______ __ _ "
sb += c2
sb += "____ "
sb += c1
sb += "\n"
sb += c1
sb += " / ____/__ / /__ ____ (_)_ ______ ___ "
sb += c2
sb += "/ __ `____ ________ "
sb += c1
sb += "\n"
sb += c1
sb += " \\__ \\/ _ \\/ / _ \\/ __ \\/ / / / / __ `__ \\"
sb += c2
sb += "/ /_/ / __ `/ ___/ _ \\"
sb += c1
sb += "\n"
sb += c1
sb += " ___/ / __/ / __/ / / / / /_/ / / / / / "
sb += c2
sb += "/ /_) / /_/ (__ ) __/"
sb += c1
sb += "\n"
sb += c1
sb += "/____/\\___/_/\\___/_/ /_/_/\\__,_/_/ /_/ /_"
sb += c2
sb += "/_____/\\__,_/____/\\___/ "
sb += c1
sb += "\n"
sb += c1
sb += " "
sb += c2
sb += " "
sb += c1
sb += cr
return sb
|
Create a SeleniumBase logo for console interfaces""" SeleniumBase Logo Processing (for the console scripts interface)
Logo generated from:
http://www.patorjk.com/software/taag/#p=display&f=Slant&t=SeleniumBase """
import colorama
r'''
______ __ _ ____
/ ____/__ / /__ ____ (_)_ ______ ___ / __ `____ ________
\__ \/ _ \/ / _ \/ __ \/ / / / / __ `__ \/ /_/ / __ `/ ___/ _ \
___/ / __/ / __/ / / / / /_/ / / / / / / /_) / /_/ (__ ) __/
/____/\___/_/\___/_/ /_/_/\__,_/_/ /_/ /_/_____/\__,_/____/\___/
'''
def get_seleniumbase_logo():
colorama.init(autoreset=True)
c1 = colorama.Fore.BLUE + colorama.Back.CYAN
c2 = colorama.Fore.CYAN + colorama.Back.BLUE
cr = colorama.Style.RESET_ALL
sb = c1
sb += "\n"
sb += " ______ __ _ "
sb += c2
sb += "____ "
sb += c1
sb += "\n"
sb += c1
sb += " / ____/__ / /__ ____ (_)_ ______ ___ "
sb += c2
sb += "/ __ `____ ________ "
sb += c1
sb += "\n"
sb += c1
sb += " \\__ \\/ _ \\/ / _ \\/ __ \\/ / / / / __ `__ \\"
sb += c2
sb += "/ /_/ / __ `/ ___/ _ \\"
sb += c1
sb += "\n"
sb += c1
sb += " ___/ / __/ / __/ / / / / /_/ / / / / / "
sb += c2
sb += "/ /_) / /_/ (__ ) __/"
sb += c1
sb += "\n"
sb += c1
sb += "/____/\\___/_/\\___/_/ /_/_/\\__,_/_/ /_/ /_"
sb += c2
sb += "/_____/\\__,_/____/\\___/ "
sb += c1
sb += "\n"
sb += c1
sb += " "
sb += c2
sb += " "
sb += c1
sb += cr
return sb
|
<commit_before><commit_msg>Create a SeleniumBase logo for console interfaces<commit_after>""" SeleniumBase Logo Processing (for the console scripts interface)
Logo generated from:
http://www.patorjk.com/software/taag/#p=display&f=Slant&t=SeleniumBase """
import colorama
r'''
______ __ _ ____
/ ____/__ / /__ ____ (_)_ ______ ___ / __ `____ ________
\__ \/ _ \/ / _ \/ __ \/ / / / / __ `__ \/ /_/ / __ `/ ___/ _ \
___/ / __/ / __/ / / / / /_/ / / / / / / /_) / /_/ (__ ) __/
/____/\___/_/\___/_/ /_/_/\__,_/_/ /_/ /_/_____/\__,_/____/\___/
'''
def get_seleniumbase_logo():
colorama.init(autoreset=True)
c1 = colorama.Fore.BLUE + colorama.Back.CYAN
c2 = colorama.Fore.CYAN + colorama.Back.BLUE
cr = colorama.Style.RESET_ALL
sb = c1
sb += "\n"
sb += " ______ __ _ "
sb += c2
sb += "____ "
sb += c1
sb += "\n"
sb += c1
sb += " / ____/__ / /__ ____ (_)_ ______ ___ "
sb += c2
sb += "/ __ `____ ________ "
sb += c1
sb += "\n"
sb += c1
sb += " \\__ \\/ _ \\/ / _ \\/ __ \\/ / / / / __ `__ \\"
sb += c2
sb += "/ /_/ / __ `/ ___/ _ \\"
sb += c1
sb += "\n"
sb += c1
sb += " ___/ / __/ / __/ / / / / /_/ / / / / / "
sb += c2
sb += "/ /_) / /_/ (__ ) __/"
sb += c1
sb += "\n"
sb += c1
sb += "/____/\\___/_/\\___/_/ /_/_/\\__,_/_/ /_/ /_"
sb += c2
sb += "/_____/\\__,_/____/\\___/ "
sb += c1
sb += "\n"
sb += c1
sb += " "
sb += c2
sb += " "
sb += c1
sb += cr
return sb
|
|
dde3f2e15c0c8db29140cd2d26e2c75e89661a41
|
analysis/data_process/uk_2017/generate_notebook.py
|
analysis/data_process/uk_2017/generate_notebook.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__: 'Olivier PHILIPPE'
__licence__: 'BSD3-clause'
"""
Scrip to programatically generate notebook for exploratory analysis
Use the code from: https://gist.github.com/fperez/9716279
"""
import nbformat as nbf
class GenerateNotebook:
"""
"""
def __init__(self, notebook_filename):
"""
"""
self.outfilename = notebook_filename
# Generate an empty notebook
self.nb = nbf.v4.new_notebook()
def add_text(self, text_to_add):
"""
"""
formatting_text = nbf.v4.new_markdown_cell(text_to_add)
self._append_notebook(formatting_text)
def add_code(self, code_to_add):
"""
"""
formatting_code = nbf.v4.new_markdown_cell(code_to_add)
self._append_notebook(formatting_code)
def _append_notebook(self, cell_to_add):
"""
"""
self.nb['cell'].append(cell_to_add)
def example_generate_notebook():
"""
Scrip to programatically generate notebook for exploratory analysis
Use the code from: https://gist.github.com/fperez/9716279
"""
text = """\
# My first automatic Jupyter Notebook
This is an auto-generated notebook.\
"""
code = """\
%pylab inline
hist(normal(size=2000), bins=50);"""
nb['cells'] = [nbf.v4.new_markdown_cell(text),
nbf.v4.new_code_cell(code)]
fname = './notebooks/test.ipynb'
def save_notebook(self):
"""
Save the notebook on the hard drive
"""
with open(self.outfilename, 'w') as f:
nbf.write(nb, f)
def main():
pass
if __name__ == "__main__":
main()
|
Move into uk_2017 folder and transforming the notebook into a class
|
Move into uk_2017 folder and transforming the notebook into a class
|
Python
|
bsd-3-clause
|
softwaresaved/international-survey
|
Move into uk_2017 folder and transforming the notebook into a class
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__: 'Olivier PHILIPPE'
__licence__: 'BSD3-clause'
"""
Scrip to programatically generate notebook for exploratory analysis
Use the code from: https://gist.github.com/fperez/9716279
"""
import nbformat as nbf
class GenerateNotebook:
"""
"""
def __init__(self, notebook_filename):
"""
"""
self.outfilename = notebook_filename
# Generate an empty notebook
self.nb = nbf.v4.new_notebook()
def add_text(self, text_to_add):
"""
"""
formatting_text = nbf.v4.new_markdown_cell(text_to_add)
self._append_notebook(formatting_text)
def add_code(self, code_to_add):
"""
"""
formatting_code = nbf.v4.new_markdown_cell(code_to_add)
self._append_notebook(formatting_code)
def _append_notebook(self, cell_to_add):
"""
"""
self.nb['cell'].append(cell_to_add)
def example_generate_notebook():
"""
Scrip to programatically generate notebook for exploratory analysis
Use the code from: https://gist.github.com/fperez/9716279
"""
text = """\
# My first automatic Jupyter Notebook
This is an auto-generated notebook.\
"""
code = """\
%pylab inline
hist(normal(size=2000), bins=50);"""
nb['cells'] = [nbf.v4.new_markdown_cell(text),
nbf.v4.new_code_cell(code)]
fname = './notebooks/test.ipynb'
def save_notebook(self):
"""
Save the notebook on the hard drive
"""
with open(self.outfilename, 'w') as f:
nbf.write(nb, f)
def main():
pass
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Move into uk_2017 folder and transforming the notebook into a class<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__: 'Olivier PHILIPPE'
__licence__: 'BSD3-clause'
"""
Scrip to programatically generate notebook for exploratory analysis
Use the code from: https://gist.github.com/fperez/9716279
"""
import nbformat as nbf
class GenerateNotebook:
"""
"""
def __init__(self, notebook_filename):
"""
"""
self.outfilename = notebook_filename
# Generate an empty notebook
self.nb = nbf.v4.new_notebook()
def add_text(self, text_to_add):
"""
"""
formatting_text = nbf.v4.new_markdown_cell(text_to_add)
self._append_notebook(formatting_text)
def add_code(self, code_to_add):
"""
"""
formatting_code = nbf.v4.new_markdown_cell(code_to_add)
self._append_notebook(formatting_code)
def _append_notebook(self, cell_to_add):
"""
"""
self.nb['cell'].append(cell_to_add)
def example_generate_notebook():
"""
Scrip to programatically generate notebook for exploratory analysis
Use the code from: https://gist.github.com/fperez/9716279
"""
text = """\
# My first automatic Jupyter Notebook
This is an auto-generated notebook.\
"""
code = """\
%pylab inline
hist(normal(size=2000), bins=50);"""
nb['cells'] = [nbf.v4.new_markdown_cell(text),
nbf.v4.new_code_cell(code)]
fname = './notebooks/test.ipynb'
def save_notebook(self):
"""
Save the notebook on the hard drive
"""
with open(self.outfilename, 'w') as f:
nbf.write(nb, f)
def main():
pass
if __name__ == "__main__":
main()
|
Move into uk_2017 folder and transforming the notebook into a class#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__: 'Olivier PHILIPPE'
__licence__: 'BSD3-clause'
"""
Scrip to programatically generate notebook for exploratory analysis
Use the code from: https://gist.github.com/fperez/9716279
"""
import nbformat as nbf
class GenerateNotebook:
"""
"""
def __init__(self, notebook_filename):
"""
"""
self.outfilename = notebook_filename
# Generate an empty notebook
self.nb = nbf.v4.new_notebook()
def add_text(self, text_to_add):
"""
"""
formatting_text = nbf.v4.new_markdown_cell(text_to_add)
self._append_notebook(formatting_text)
def add_code(self, code_to_add):
"""
"""
formatting_code = nbf.v4.new_markdown_cell(code_to_add)
self._append_notebook(formatting_code)
def _append_notebook(self, cell_to_add):
"""
"""
self.nb['cell'].append(cell_to_add)
def example_generate_notebook():
"""
Scrip to programatically generate notebook for exploratory analysis
Use the code from: https://gist.github.com/fperez/9716279
"""
text = """\
# My first automatic Jupyter Notebook
This is an auto-generated notebook.\
"""
code = """\
%pylab inline
hist(normal(size=2000), bins=50);"""
nb['cells'] = [nbf.v4.new_markdown_cell(text),
nbf.v4.new_code_cell(code)]
fname = './notebooks/test.ipynb'
def save_notebook(self):
"""
Save the notebook on the hard drive
"""
with open(self.outfilename, 'w') as f:
nbf.write(nb, f)
def main():
pass
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Move into uk_2017 folder and transforming the notebook into a class<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__: 'Olivier PHILIPPE'
__licence__: 'BSD3-clause'
"""
Scrip to programatically generate notebook for exploratory analysis
Use the code from: https://gist.github.com/fperez/9716279
"""
import nbformat as nbf
class GenerateNotebook:
"""
"""
def __init__(self, notebook_filename):
"""
"""
self.outfilename = notebook_filename
# Generate an empty notebook
self.nb = nbf.v4.new_notebook()
def add_text(self, text_to_add):
"""
"""
formatting_text = nbf.v4.new_markdown_cell(text_to_add)
self._append_notebook(formatting_text)
def add_code(self, code_to_add):
"""
"""
formatting_code = nbf.v4.new_markdown_cell(code_to_add)
self._append_notebook(formatting_code)
def _append_notebook(self, cell_to_add):
"""
"""
self.nb['cell'].append(cell_to_add)
def example_generate_notebook():
"""
Scrip to programatically generate notebook for exploratory analysis
Use the code from: https://gist.github.com/fperez/9716279
"""
text = """\
# My first automatic Jupyter Notebook
This is an auto-generated notebook.\
"""
code = """\
%pylab inline
hist(normal(size=2000), bins=50);"""
nb['cells'] = [nbf.v4.new_markdown_cell(text),
nbf.v4.new_code_cell(code)]
fname = './notebooks/test.ipynb'
def save_notebook(self):
"""
Save the notebook on the hard drive
"""
with open(self.outfilename, 'w') as f:
nbf.write(nb, f)
def main():
pass
if __name__ == "__main__":
main()
|
|
7ace1149a2aa2c1725438cabf123cadfb326e82b
|
meinberlin/apps/contrib/management/commands/cleanup_unverified_users.py
|
meinberlin/apps/contrib/management/commands/cleanup_unverified_users.py
|
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from meinberlin.apps.users.models import User
class Command(BaseCommand):
help = 'Remove all users that registered more than n days ago but never ' \
'logged in. This implies they never verified their email or ' \
'added an external / social account'
def add_arguments(self, parser):
parser.add_argument('days')
parser.add_argument('test')
def handle(self, *args, **options):
test = options['test'] != "False"
days = int(options['days'])
all_users = User.objects.all()
qs = User.objects.filter(last_login=None,
date_joined__lt=(
timezone.now() - timedelta(days=days)))
if qs:
print("Users: {} Removing: {} Left: {}".format(
all_users.count(), qs.count(), all_users.count() - qs.count()))
for user in qs:
if test:
print("Would remove user {} (date_joined: {})".format(
user.username, user.date_joined
))
else:
print("Removing user {} (date_joined: {})".format(
user.username, user.date_joined
))
qs.delete()
|
Add command to clean up unverified users
|
contrib/management: Add command to clean up unverified users
Strictly speaking we should check for the email verified status here,
but if a user never logged in that pretty much implies the same.
Prints a list of deleted users, which we'll recieve as mail from
a cron job.
|
Python
|
agpl-3.0
|
liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin
|
contrib/management: Add command to clean up unverified users
Strictly speaking we should check for the email verified status here,
but if a user never logged in that pretty much implies the same.
Prints a list of deleted users, which we'll recieve as mail from
a cron job.
|
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from meinberlin.apps.users.models import User
class Command(BaseCommand):
help = 'Remove all users that registered more than n days ago but never ' \
'logged in. This implies they never verified their email or ' \
'added an external / social account'
def add_arguments(self, parser):
parser.add_argument('days')
parser.add_argument('test')
def handle(self, *args, **options):
test = options['test'] != "False"
days = int(options['days'])
all_users = User.objects.all()
qs = User.objects.filter(last_login=None,
date_joined__lt=(
timezone.now() - timedelta(days=days)))
if qs:
print("Users: {} Removing: {} Left: {}".format(
all_users.count(), qs.count(), all_users.count() - qs.count()))
for user in qs:
if test:
print("Would remove user {} (date_joined: {})".format(
user.username, user.date_joined
))
else:
print("Removing user {} (date_joined: {})".format(
user.username, user.date_joined
))
qs.delete()
|
<commit_before><commit_msg>contrib/management: Add command to clean up unverified users
Strictly speaking we should check for the email verified status here,
but if a user never logged in that pretty much implies the same.
Prints a list of deleted users, which we'll recieve as mail from
a cron job.<commit_after>
|
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from meinberlin.apps.users.models import User
class Command(BaseCommand):
help = 'Remove all users that registered more than n days ago but never ' \
'logged in. This implies they never verified their email or ' \
'added an external / social account'
def add_arguments(self, parser):
parser.add_argument('days')
parser.add_argument('test')
def handle(self, *args, **options):
test = options['test'] != "False"
days = int(options['days'])
all_users = User.objects.all()
qs = User.objects.filter(last_login=None,
date_joined__lt=(
timezone.now() - timedelta(days=days)))
if qs:
print("Users: {} Removing: {} Left: {}".format(
all_users.count(), qs.count(), all_users.count() - qs.count()))
for user in qs:
if test:
print("Would remove user {} (date_joined: {})".format(
user.username, user.date_joined
))
else:
print("Removing user {} (date_joined: {})".format(
user.username, user.date_joined
))
qs.delete()
|
contrib/management: Add command to clean up unverified users
Strictly speaking we should check for the email verified status here,
but if a user never logged in that pretty much implies the same.
Prints a list of deleted users, which we'll recieve as mail from
a cron job.from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from meinberlin.apps.users.models import User
class Command(BaseCommand):
help = 'Remove all users that registered more than n days ago but never ' \
'logged in. This implies they never verified their email or ' \
'added an external / social account'
def add_arguments(self, parser):
parser.add_argument('days')
parser.add_argument('test')
def handle(self, *args, **options):
test = options['test'] != "False"
days = int(options['days'])
all_users = User.objects.all()
qs = User.objects.filter(last_login=None,
date_joined__lt=(
timezone.now() - timedelta(days=days)))
if qs:
print("Users: {} Removing: {} Left: {}".format(
all_users.count(), qs.count(), all_users.count() - qs.count()))
for user in qs:
if test:
print("Would remove user {} (date_joined: {})".format(
user.username, user.date_joined
))
else:
print("Removing user {} (date_joined: {})".format(
user.username, user.date_joined
))
qs.delete()
|
<commit_before><commit_msg>contrib/management: Add command to clean up unverified users
Strictly speaking we should check for the email verified status here,
but if a user never logged in that pretty much implies the same.
Prints a list of deleted users, which we'll recieve as mail from
a cron job.<commit_after>from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from meinberlin.apps.users.models import User
class Command(BaseCommand):
help = 'Remove all users that registered more than n days ago but never ' \
'logged in. This implies they never verified their email or ' \
'added an external / social account'
def add_arguments(self, parser):
parser.add_argument('days')
parser.add_argument('test')
def handle(self, *args, **options):
test = options['test'] != "False"
days = int(options['days'])
all_users = User.objects.all()
qs = User.objects.filter(last_login=None,
date_joined__lt=(
timezone.now() - timedelta(days=days)))
if qs:
print("Users: {} Removing: {} Left: {}".format(
all_users.count(), qs.count(), all_users.count() - qs.count()))
for user in qs:
if test:
print("Would remove user {} (date_joined: {})".format(
user.username, user.date_joined
))
else:
print("Removing user {} (date_joined: {})".format(
user.username, user.date_joined
))
qs.delete()
|
|
9f7ed73485f94a1a23f7318a5daab82dbafab413
|
tests/graphics/ticket2925.py
|
tests/graphics/ticket2925.py
|
# Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test the style of toggle and radio buttons inside a palette. The buttons
contains only an icon and should be rendered similarly to the toolbar
controls. Ticket #2855.
"""
import gtk
from sugar.graphics.palette import Palette
import common
test = common.TestPalette()
palette = Palette('Test radio and toggle')
test.set_palette(palette)
scale = gtk.HScale(gtk.Adjustment(upper=100))
palette.set_content(scale)
scale.show()
if __name__ == "__main__":
common.main(test)
|
Test for ticket 2925, it doesn't reproduce the issue for some reason.
|
Test for ticket 2925, it doesn't reproduce the issue for some reason.
|
Python
|
lgpl-2.1
|
sugarlabs/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,quozl/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,ceibal-tatu/sugar-toolkit,puneetgkaur/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,sugarlabs/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,i5o/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,sugarlabs/sugar-toolkit,tchx84/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,sugarlabs/sugar-toolkit,samdroid-apps/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,tchx84/debian-pkg-sugar-toolkit,tchx84/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,samdroid-apps/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,quozl/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3
|
Test for ticket 2925, it doesn't reproduce the issue for some reason.
|
# Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test the style of toggle and radio buttons inside a palette. The buttons
contains only an icon and should be rendered similarly to the toolbar
controls. Ticket #2855.
"""
import gtk
from sugar.graphics.palette import Palette
import common
test = common.TestPalette()
palette = Palette('Test radio and toggle')
test.set_palette(palette)
scale = gtk.HScale(gtk.Adjustment(upper=100))
palette.set_content(scale)
scale.show()
if __name__ == "__main__":
common.main(test)
|
<commit_before><commit_msg>Test for ticket 2925, it doesn't reproduce the issue for some reason.<commit_after>
|
# Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test the style of toggle and radio buttons inside a palette. The buttons
contains only an icon and should be rendered similarly to the toolbar
controls. Ticket #2855.
"""
import gtk
from sugar.graphics.palette import Palette
import common
test = common.TestPalette()
palette = Palette('Test radio and toggle')
test.set_palette(palette)
scale = gtk.HScale(gtk.Adjustment(upper=100))
palette.set_content(scale)
scale.show()
if __name__ == "__main__":
common.main(test)
|
Test for ticket 2925, it doesn't reproduce the issue for some reason.# Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test the style of toggle and radio buttons inside a palette. The buttons
contains only an icon and should be rendered similarly to the toolbar
controls. Ticket #2855.
"""
import gtk
from sugar.graphics.palette import Palette
import common
test = common.TestPalette()
palette = Palette('Test radio and toggle')
test.set_palette(palette)
scale = gtk.HScale(gtk.Adjustment(upper=100))
palette.set_content(scale)
scale.show()
if __name__ == "__main__":
common.main(test)
|
<commit_before><commit_msg>Test for ticket 2925, it doesn't reproduce the issue for some reason.<commit_after># Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test the style of toggle and radio buttons inside a palette. The buttons
contains only an icon and should be rendered similarly to the toolbar
controls. Ticket #2855.
"""
import gtk
from sugar.graphics.palette import Palette
import common
test = common.TestPalette()
palette = Palette('Test radio and toggle')
test.set_palette(palette)
scale = gtk.HScale(gtk.Adjustment(upper=100))
palette.set_content(scale)
scale.show()
if __name__ == "__main__":
common.main(test)
|
|
ba1a32ca744d001697e52be7a0c36dd76c81446c
|
tools/droplets/add_mentor.py
|
tools/droplets/add_mentor.py
|
# Allows a mentor to ssh into a Digital Ocean droplet. This is designed to be
# executed on the target machine.
#
# This script takes the username of the mentor as an argument:
#
# $ python3 add_mentor.py <mentor's username>
#
# Alternatively you can pass in --remove to remove their ssh key from the
# machine:
#
# $ python3 add_mentor.py --remove <mentor's username>
import os
import sys
from argparse import ArgumentParser
from typing import List
import socket
import re
import requests
parser = ArgumentParser(description='Give a mentor ssh access to this machine.')
parser.add_argument('username', help='Github username of the mentor.')
parser.add_argument('--remove', help='Remove his/her key from the machine.',
action='store_true', default=False)
# Wrap keys with line comments for easier key removal.
append_key = """\
#<{username}>{{{{
{key}
#}}}}<{username}>
"""
def get_mentor_keys(username: str) -> List[str]:
url = 'https://api.github.com/users/{}/keys'.format(username)
r = requests.get(url)
if r.status_code != 200:
print('Cannot connect to Github...')
sys.exit(1)
keys = r.json()
if not keys:
print('Mentor "{}" has no public key.'.format(username))
sys.exit(1)
return [key['key'] for key in keys]
if __name__ == '__main__':
args = parser.parse_args()
authorized_keys = os.path.expanduser('~/.ssh/authorized_keys')
if args.remove:
remove_re = re.compile('#<{0}>{{{{.+}}}}<{0}>(\n)?'.format(args.username),
re.DOTALL | re.MULTILINE)
with open(authorized_keys, 'r+') as f:
old_content = f.read()
new_content = re.sub(remove_re, '', old_content)
f.seek(0)
f.write(new_content)
f.truncate()
print('Successfully removed {}\' SSH key!'.format(args.username))
else:
keys = get_mentor_keys(args.username)
with open(authorized_keys, 'a') as f:
for key in keys:
f.write(append_key.format(username=args.username, key=key))
print('Successfully added {}\'s SSH key!'.format(args.username))
print('Can you let your mentor know that they can connect to this machine with:\n')
print(' $ ssh zulipdev@{}\n'.format(socket.gethostname()))
|
Create script to add and remove mentor's ssh key from DO droplets.
|
Create script to add and remove mentor's ssh key from DO droplets.
|
Python
|
apache-2.0
|
andersk/zulip,zulip/zulip,rishig/zulip,eeshangarg/zulip,dhcrzf/zulip,timabbott/zulip,rishig/zulip,hackerkid/zulip,tommyip/zulip,jackrzhang/zulip,showell/zulip,andersk/zulip,brainwane/zulip,punchagan/zulip,jackrzhang/zulip,showell/zulip,brainwane/zulip,eeshangarg/zulip,timabbott/zulip,mahim97/zulip,rht/zulip,synicalsyntax/zulip,kou/zulip,shubhamdhama/zulip,tommyip/zulip,zulip/zulip,timabbott/zulip,dhcrzf/zulip,hackerkid/zulip,rht/zulip,rht/zulip,jackrzhang/zulip,punchagan/zulip,dhcrzf/zulip,punchagan/zulip,kou/zulip,zulip/zulip,showell/zulip,rishig/zulip,showell/zulip,timabbott/zulip,mahim97/zulip,jackrzhang/zulip,hackerkid/zulip,andersk/zulip,punchagan/zulip,mahim97/zulip,tommyip/zulip,rht/zulip,shubhamdhama/zulip,brainwane/zulip,shubhamdhama/zulip,kou/zulip,hackerkid/zulip,synicalsyntax/zulip,tommyip/zulip,zulip/zulip,hackerkid/zulip,showell/zulip,andersk/zulip,shubhamdhama/zulip,kou/zulip,jackrzhang/zulip,rht/zulip,tommyip/zulip,andersk/zulip,dhcrzf/zulip,synicalsyntax/zulip,brainwane/zulip,showell/zulip,kou/zulip,synicalsyntax/zulip,timabbott/zulip,eeshangarg/zulip,dhcrzf/zulip,andersk/zulip,showell/zulip,hackerkid/zulip,punchagan/zulip,shubhamdhama/zulip,mahim97/zulip,synicalsyntax/zulip,synicalsyntax/zulip,kou/zulip,eeshangarg/zulip,rht/zulip,shubhamdhama/zulip,shubhamdhama/zulip,jackrzhang/zulip,timabbott/zulip,kou/zulip,andersk/zulip,rishig/zulip,synicalsyntax/zulip,brainwane/zulip,rishig/zulip,rishig/zulip,eeshangarg/zulip,punchagan/zulip,zulip/zulip,zulip/zulip,zulip/zulip,dhcrzf/zulip,eeshangarg/zulip,tommyip/zulip,punchagan/zulip,tommyip/zulip,timabbott/zulip,mahim97/zulip,eeshangarg/zulip,mahim97/zulip,rishig/zulip,dhcrzf/zulip,brainwane/zulip,jackrzhang/zulip,hackerkid/zulip,rht/zulip,brainwane/zulip
|
Create script to add and remove mentor's ssh key from DO droplets.
|
# Allows a mentor to ssh into a Digital Ocean droplet. This is designed to be
# executed on the target machine.
#
# This script takes the username of the mentor as an argument:
#
# $ python3 add_mentor.py <mentor's username>
#
# Alternatively you can pass in --remove to remove their ssh key from the
# machine:
#
# $ python3 add_mentor.py --remove <mentor's username>
import os
import sys
from argparse import ArgumentParser
from typing import List
import socket
import re
import requests
parser = ArgumentParser(description='Give a mentor ssh access to this machine.')
parser.add_argument('username', help='Github username of the mentor.')
parser.add_argument('--remove', help='Remove his/her key from the machine.',
action='store_true', default=False)
# Wrap keys with line comments for easier key removal.
append_key = """\
#<{username}>{{{{
{key}
#}}}}<{username}>
"""
def get_mentor_keys(username: str) -> List[str]:
url = 'https://api.github.com/users/{}/keys'.format(username)
r = requests.get(url)
if r.status_code != 200:
print('Cannot connect to Github...')
sys.exit(1)
keys = r.json()
if not keys:
print('Mentor "{}" has no public key.'.format(username))
sys.exit(1)
return [key['key'] for key in keys]
if __name__ == '__main__':
args = parser.parse_args()
authorized_keys = os.path.expanduser('~/.ssh/authorized_keys')
if args.remove:
remove_re = re.compile('#<{0}>{{{{.+}}}}<{0}>(\n)?'.format(args.username),
re.DOTALL | re.MULTILINE)
with open(authorized_keys, 'r+') as f:
old_content = f.read()
new_content = re.sub(remove_re, '', old_content)
f.seek(0)
f.write(new_content)
f.truncate()
print('Successfully removed {}\' SSH key!'.format(args.username))
else:
keys = get_mentor_keys(args.username)
with open(authorized_keys, 'a') as f:
for key in keys:
f.write(append_key.format(username=args.username, key=key))
print('Successfully added {}\'s SSH key!'.format(args.username))
print('Can you let your mentor know that they can connect to this machine with:\n')
print(' $ ssh zulipdev@{}\n'.format(socket.gethostname()))
|
<commit_before><commit_msg>Create script to add and remove mentor's ssh key from DO droplets.<commit_after>
|
# Allows a mentor to ssh into a Digital Ocean droplet. This is designed to be
# executed on the target machine.
#
# This script takes the username of the mentor as an argument:
#
# $ python3 add_mentor.py <mentor's username>
#
# Alternatively you can pass in --remove to remove their ssh key from the
# machine:
#
# $ python3 add_mentor.py --remove <mentor's username>
import os
import sys
from argparse import ArgumentParser
from typing import List
import socket
import re
import requests
parser = ArgumentParser(description='Give a mentor ssh access to this machine.')
parser.add_argument('username', help='Github username of the mentor.')
parser.add_argument('--remove', help='Remove his/her key from the machine.',
action='store_true', default=False)
# Wrap keys with line comments for easier key removal.
append_key = """\
#<{username}>{{{{
{key}
#}}}}<{username}>
"""
def get_mentor_keys(username: str) -> List[str]:
url = 'https://api.github.com/users/{}/keys'.format(username)
r = requests.get(url)
if r.status_code != 200:
print('Cannot connect to Github...')
sys.exit(1)
keys = r.json()
if not keys:
print('Mentor "{}" has no public key.'.format(username))
sys.exit(1)
return [key['key'] for key in keys]
if __name__ == '__main__':
args = parser.parse_args()
authorized_keys = os.path.expanduser('~/.ssh/authorized_keys')
if args.remove:
remove_re = re.compile('#<{0}>{{{{.+}}}}<{0}>(\n)?'.format(args.username),
re.DOTALL | re.MULTILINE)
with open(authorized_keys, 'r+') as f:
old_content = f.read()
new_content = re.sub(remove_re, '', old_content)
f.seek(0)
f.write(new_content)
f.truncate()
print('Successfully removed {}\' SSH key!'.format(args.username))
else:
keys = get_mentor_keys(args.username)
with open(authorized_keys, 'a') as f:
for key in keys:
f.write(append_key.format(username=args.username, key=key))
print('Successfully added {}\'s SSH key!'.format(args.username))
print('Can you let your mentor know that they can connect to this machine with:\n')
print(' $ ssh zulipdev@{}\n'.format(socket.gethostname()))
|
Create script to add and remove mentor's ssh key from DO droplets.# Allows a mentor to ssh into a Digital Ocean droplet. This is designed to be
# executed on the target machine.
#
# This script takes the username of the mentor as an argument:
#
# $ python3 add_mentor.py <mentor's username>
#
# Alternatively you can pass in --remove to remove their ssh key from the
# machine:
#
# $ python3 add_mentor.py --remove <mentor's username>
import os
import sys
from argparse import ArgumentParser
from typing import List
import socket
import re
import requests
parser = ArgumentParser(description='Give a mentor ssh access to this machine.')
parser.add_argument('username', help='Github username of the mentor.')
parser.add_argument('--remove', help='Remove his/her key from the machine.',
action='store_true', default=False)
# Wrap keys with line comments for easier key removal.
append_key = """\
#<{username}>{{{{
{key}
#}}}}<{username}>
"""
def get_mentor_keys(username: str) -> List[str]:
url = 'https://api.github.com/users/{}/keys'.format(username)
r = requests.get(url)
if r.status_code != 200:
print('Cannot connect to Github...')
sys.exit(1)
keys = r.json()
if not keys:
print('Mentor "{}" has no public key.'.format(username))
sys.exit(1)
return [key['key'] for key in keys]
if __name__ == '__main__':
args = parser.parse_args()
authorized_keys = os.path.expanduser('~/.ssh/authorized_keys')
if args.remove:
remove_re = re.compile('#<{0}>{{{{.+}}}}<{0}>(\n)?'.format(args.username),
re.DOTALL | re.MULTILINE)
with open(authorized_keys, 'r+') as f:
old_content = f.read()
new_content = re.sub(remove_re, '', old_content)
f.seek(0)
f.write(new_content)
f.truncate()
print('Successfully removed {}\' SSH key!'.format(args.username))
else:
keys = get_mentor_keys(args.username)
with open(authorized_keys, 'a') as f:
for key in keys:
f.write(append_key.format(username=args.username, key=key))
print('Successfully added {}\'s SSH key!'.format(args.username))
print('Can you let your mentor know that they can connect to this machine with:\n')
print(' $ ssh zulipdev@{}\n'.format(socket.gethostname()))
|
<commit_before><commit_msg>Create script to add and remove mentor's ssh key from DO droplets.<commit_after># Allows a mentor to ssh into a Digital Ocean droplet. This is designed to be
# executed on the target machine.
#
# This script takes the username of the mentor as an argument:
#
# $ python3 add_mentor.py <mentor's username>
#
# Alternatively you can pass in --remove to remove their ssh key from the
# machine:
#
# $ python3 add_mentor.py --remove <mentor's username>
import os
import sys
from argparse import ArgumentParser
from typing import List
import socket
import re
import requests
parser = ArgumentParser(description='Give a mentor ssh access to this machine.')
parser.add_argument('username', help='Github username of the mentor.')
parser.add_argument('--remove', help='Remove his/her key from the machine.',
action='store_true', default=False)
# Wrap keys with line comments for easier key removal.
append_key = """\
#<{username}>{{{{
{key}
#}}}}<{username}>
"""
def get_mentor_keys(username: str) -> List[str]:
url = 'https://api.github.com/users/{}/keys'.format(username)
r = requests.get(url)
if r.status_code != 200:
print('Cannot connect to Github...')
sys.exit(1)
keys = r.json()
if not keys:
print('Mentor "{}" has no public key.'.format(username))
sys.exit(1)
return [key['key'] for key in keys]
if __name__ == '__main__':
args = parser.parse_args()
authorized_keys = os.path.expanduser('~/.ssh/authorized_keys')
if args.remove:
remove_re = re.compile('#<{0}>{{{{.+}}}}<{0}>(\n)?'.format(args.username),
re.DOTALL | re.MULTILINE)
with open(authorized_keys, 'r+') as f:
old_content = f.read()
new_content = re.sub(remove_re, '', old_content)
f.seek(0)
f.write(new_content)
f.truncate()
print('Successfully removed {}\' SSH key!'.format(args.username))
else:
keys = get_mentor_keys(args.username)
with open(authorized_keys, 'a') as f:
for key in keys:
f.write(append_key.format(username=args.username, key=key))
print('Successfully added {}\'s SSH key!'.format(args.username))
print('Can you let your mentor know that they can connect to this machine with:\n')
print(' $ ssh zulipdev@{}\n'.format(socket.gethostname()))
|
|
0f6056989079323277329825534f9290cad8c019
|
grow/deployments/scp.py
|
grow/deployments/scp.py
|
from grow.deployments import base
import errno
import os
import paramiko
class ScpDeployment(base.BaseDeployment):
def get_destination_address(self):
return '{}:{}'.format(self.host, self.root_dir)
def set_params(self, host, root_dir, port=22):
# TODO(jeremydw): Behavior of set_params and __init__
# really needs to be switched...
self.ssh = paramiko.SSHClient()
self.host = host
self.port = port
self.root_dir = root_dir
# One SSH client cannot accept multiple connections, so
# this deployment is not parallelized (for now).
self.threaded = False
def prelaunch(self):
super(ScpDeployment, self).prelaunch()
self.ssh.load_system_host_keys()
self.ssh.connect(self.host, port=self.port)
self.sftp = self.ssh.open_sftp()
def postlaunch(self):
super(ScpDeployment, self).postlaunch()
self.sftp.close()
self.ssh.close()
def read_file(self, path):
path = os.path.join(self.root_dir, path.lstrip('/'))
fp = self.sftp.open(path)
content = fp.read()
fp.close()
return content
def delete_file(self, path):
path = os.path.join(self.root_dir, path.lstrip('/'))
self.sftp.remove(path)
def write_file(self, path, content):
if isinstance(content, unicode):
content = content.encode('utf-8')
path = os.path.join(self.root_dir, path.lstrip('/'))
self._mkdirs(os.path.dirname(path))
fp = self.sftp.open(path, 'w')
fp.write(content)
fp.close()
return content
def _mkdirs(self, path):
"""Recursively creates directories."""
base = ''
for part in path.split('/'):
base += part + '/'
try:
self.sftp.lstat(base)
except IOError, e:
if e.errno == errno.ENOENT:
self.sftp.mkdir(base)
|
Add missing SCP deployment file.
|
Add missing SCP deployment file.
|
Python
|
mit
|
codedcolors/pygrow,grow/grow,grow/pygrow,vitorio/pygrow,denmojo/pygrow,codedcolors/pygrow,codedcolors/pygrow,grow/pygrow,denmojo/pygrow,vitorio/pygrow,grow/grow,vitorio/pygrow,denmojo/pygrow,grow/grow,grow/grow,grow/pygrow,denmojo/pygrow
|
Add missing SCP deployment file.
|
from grow.deployments import base
import errno
import os
import paramiko
class ScpDeployment(base.BaseDeployment):
def get_destination_address(self):
return '{}:{}'.format(self.host, self.root_dir)
def set_params(self, host, root_dir, port=22):
# TODO(jeremydw): Behavior of set_params and __init__
# really needs to be switched...
self.ssh = paramiko.SSHClient()
self.host = host
self.port = port
self.root_dir = root_dir
# One SSH client cannot accept multiple connections, so
# this deployment is not parallelized (for now).
self.threaded = False
def prelaunch(self):
super(ScpDeployment, self).prelaunch()
self.ssh.load_system_host_keys()
self.ssh.connect(self.host, port=self.port)
self.sftp = self.ssh.open_sftp()
def postlaunch(self):
super(ScpDeployment, self).postlaunch()
self.sftp.close()
self.ssh.close()
def read_file(self, path):
path = os.path.join(self.root_dir, path.lstrip('/'))
fp = self.sftp.open(path)
content = fp.read()
fp.close()
return content
def delete_file(self, path):
path = os.path.join(self.root_dir, path.lstrip('/'))
self.sftp.remove(path)
def write_file(self, path, content):
if isinstance(content, unicode):
content = content.encode('utf-8')
path = os.path.join(self.root_dir, path.lstrip('/'))
self._mkdirs(os.path.dirname(path))
fp = self.sftp.open(path, 'w')
fp.write(content)
fp.close()
return content
def _mkdirs(self, path):
"""Recursively creates directories."""
base = ''
for part in path.split('/'):
base += part + '/'
try:
self.sftp.lstat(base)
except IOError, e:
if e.errno == errno.ENOENT:
self.sftp.mkdir(base)
|
<commit_before><commit_msg>Add missing SCP deployment file.<commit_after>
|
from grow.deployments import base
import errno
import os
import paramiko
class ScpDeployment(base.BaseDeployment):
def get_destination_address(self):
return '{}:{}'.format(self.host, self.root_dir)
def set_params(self, host, root_dir, port=22):
# TODO(jeremydw): Behavior of set_params and __init__
# really needs to be switched...
self.ssh = paramiko.SSHClient()
self.host = host
self.port = port
self.root_dir = root_dir
# One SSH client cannot accept multiple connections, so
# this deployment is not parallelized (for now).
self.threaded = False
def prelaunch(self):
super(ScpDeployment, self).prelaunch()
self.ssh.load_system_host_keys()
self.ssh.connect(self.host, port=self.port)
self.sftp = self.ssh.open_sftp()
def postlaunch(self):
super(ScpDeployment, self).postlaunch()
self.sftp.close()
self.ssh.close()
def read_file(self, path):
path = os.path.join(self.root_dir, path.lstrip('/'))
fp = self.sftp.open(path)
content = fp.read()
fp.close()
return content
def delete_file(self, path):
path = os.path.join(self.root_dir, path.lstrip('/'))
self.sftp.remove(path)
def write_file(self, path, content):
if isinstance(content, unicode):
content = content.encode('utf-8')
path = os.path.join(self.root_dir, path.lstrip('/'))
self._mkdirs(os.path.dirname(path))
fp = self.sftp.open(path, 'w')
fp.write(content)
fp.close()
return content
def _mkdirs(self, path):
"""Recursively creates directories."""
base = ''
for part in path.split('/'):
base += part + '/'
try:
self.sftp.lstat(base)
except IOError, e:
if e.errno == errno.ENOENT:
self.sftp.mkdir(base)
|
Add missing SCP deployment file.from grow.deployments import base
import errno
import os
import paramiko
class ScpDeployment(base.BaseDeployment):
def get_destination_address(self):
return '{}:{}'.format(self.host, self.root_dir)
def set_params(self, host, root_dir, port=22):
# TODO(jeremydw): Behavior of set_params and __init__
# really needs to be switched...
self.ssh = paramiko.SSHClient()
self.host = host
self.port = port
self.root_dir = root_dir
# One SSH client cannot accept multiple connections, so
# this deployment is not parallelized (for now).
self.threaded = False
def prelaunch(self):
super(ScpDeployment, self).prelaunch()
self.ssh.load_system_host_keys()
self.ssh.connect(self.host, port=self.port)
self.sftp = self.ssh.open_sftp()
def postlaunch(self):
super(ScpDeployment, self).postlaunch()
self.sftp.close()
self.ssh.close()
def read_file(self, path):
path = os.path.join(self.root_dir, path.lstrip('/'))
fp = self.sftp.open(path)
content = fp.read()
fp.close()
return content
def delete_file(self, path):
path = os.path.join(self.root_dir, path.lstrip('/'))
self.sftp.remove(path)
def write_file(self, path, content):
if isinstance(content, unicode):
content = content.encode('utf-8')
path = os.path.join(self.root_dir, path.lstrip('/'))
self._mkdirs(os.path.dirname(path))
fp = self.sftp.open(path, 'w')
fp.write(content)
fp.close()
return content
def _mkdirs(self, path):
"""Recursively creates directories."""
base = ''
for part in path.split('/'):
base += part + '/'
try:
self.sftp.lstat(base)
except IOError, e:
if e.errno == errno.ENOENT:
self.sftp.mkdir(base)
|
<commit_before><commit_msg>Add missing SCP deployment file.<commit_after>from grow.deployments import base
import errno
import os
import paramiko
class ScpDeployment(base.BaseDeployment):
def get_destination_address(self):
return '{}:{}'.format(self.host, self.root_dir)
def set_params(self, host, root_dir, port=22):
# TODO(jeremydw): Behavior of set_params and __init__
# really needs to be switched...
self.ssh = paramiko.SSHClient()
self.host = host
self.port = port
self.root_dir = root_dir
# One SSH client cannot accept multiple connections, so
# this deployment is not parallelized (for now).
self.threaded = False
def prelaunch(self):
super(ScpDeployment, self).prelaunch()
self.ssh.load_system_host_keys()
self.ssh.connect(self.host, port=self.port)
self.sftp = self.ssh.open_sftp()
def postlaunch(self):
super(ScpDeployment, self).postlaunch()
self.sftp.close()
self.ssh.close()
def read_file(self, path):
path = os.path.join(self.root_dir, path.lstrip('/'))
fp = self.sftp.open(path)
content = fp.read()
fp.close()
return content
def delete_file(self, path):
path = os.path.join(self.root_dir, path.lstrip('/'))
self.sftp.remove(path)
def write_file(self, path, content):
if isinstance(content, unicode):
content = content.encode('utf-8')
path = os.path.join(self.root_dir, path.lstrip('/'))
self._mkdirs(os.path.dirname(path))
fp = self.sftp.open(path, 'w')
fp.write(content)
fp.close()
return content
def _mkdirs(self, path):
"""Recursively creates directories."""
base = ''
for part in path.split('/'):
base += part + '/'
try:
self.sftp.lstat(base)
except IOError, e:
if e.errno == errno.ENOENT:
self.sftp.mkdir(base)
|
|
66543a7330a0898146076adb5269b75708f763a9
|
boardinghouse/tests/test_sql.py
|
boardinghouse/tests/test_sql.py
|
"""
Tests for the RAW sql functions.
"""
from django.conf import settings
from django.test import TestCase
from django.db.models import connection
from boardinghouse.models import Schema
class TestRejectSchemaColumnChange(TestCase):
def test_exception_is_raised(self):
Schema.objects.mass_create('a')
cursor = connection.cursor()
UPDATE = "UPDATE boardinghouse_schema SET schema='foo' WHERE schema='a'"
self.assertRaises(Exception, cursor.execute, UPDATE)
|
Add test for exception raising.
|
Add test for exception raising.
|
Python
|
bsd-3-clause
|
schinckel/django-boardinghouse,schinckel/django-boardinghouse,schinckel/django-boardinghouse
|
Add test for exception raising.
|
"""
Tests for the RAW sql functions.
"""
from django.conf import settings
from django.test import TestCase
from django.db.models import connection
from boardinghouse.models import Schema
class TestRejectSchemaColumnChange(TestCase):
def test_exception_is_raised(self):
Schema.objects.mass_create('a')
cursor = connection.cursor()
UPDATE = "UPDATE boardinghouse_schema SET schema='foo' WHERE schema='a'"
self.assertRaises(Exception, cursor.execute, UPDATE)
|
<commit_before><commit_msg>Add test for exception raising.<commit_after>
|
"""
Tests for the RAW sql functions.
"""
from django.conf import settings
from django.test import TestCase
from django.db.models import connection
from boardinghouse.models import Schema
class TestRejectSchemaColumnChange(TestCase):
def test_exception_is_raised(self):
Schema.objects.mass_create('a')
cursor = connection.cursor()
UPDATE = "UPDATE boardinghouse_schema SET schema='foo' WHERE schema='a'"
self.assertRaises(Exception, cursor.execute, UPDATE)
|
Add test for exception raising."""
Tests for the RAW sql functions.
"""
from django.conf import settings
from django.test import TestCase
from django.db.models import connection
from boardinghouse.models import Schema
class TestRejectSchemaColumnChange(TestCase):
def test_exception_is_raised(self):
Schema.objects.mass_create('a')
cursor = connection.cursor()
UPDATE = "UPDATE boardinghouse_schema SET schema='foo' WHERE schema='a'"
self.assertRaises(Exception, cursor.execute, UPDATE)
|
<commit_before><commit_msg>Add test for exception raising.<commit_after>"""
Tests for the RAW sql functions.
"""
from django.conf import settings
from django.test import TestCase
from django.db.models import connection
from boardinghouse.models import Schema
class TestRejectSchemaColumnChange(TestCase):
def test_exception_is_raised(self):
Schema.objects.mass_create('a')
cursor = connection.cursor()
UPDATE = "UPDATE boardinghouse_schema SET schema='foo' WHERE schema='a'"
self.assertRaises(Exception, cursor.execute, UPDATE)
|
|
d691fa14cc7ecf1b2dc53473553407008591b503
|
git-lang-guesser/gitRequester.py
|
git-lang-guesser/gitRequester.py
|
import http.client
import requests
from . import exceptions
USER_URL = 'https://api.github.com/users/{username}'
USER_PUBLIC_REPO_URL = 'https://api.github.com/users/{username}/repos'
def do_request(url_format, format_args):
r = requests.get(url_format.format(**format_args))
if r.status_code == http.client.OK:
return r.json()
else:
raise exceptions.RequestFailed(r)
def get_user(username):
return do_request(USER_URL, {"username": username})
def get_public_projects_for_user(username):
return do_request(USER_PUBLIC_REPO_URL, {"username": username})
if __name__ == "__main__":
import pprint
pprint.pprint(get_user("robbie-c"))
pprint.pprint(get_public_projects_for_user("robbie-c"))
|
Add git api request helpers
|
Add git api request helpers
Add helper functions for getting info on a user, and all public repos
owned by a user
|
Python
|
mit
|
robbie-c/git-lang-guesser
|
Add git api request helpers
Add helper functions for getting info on a user, and all public repos
owned by a user
|
import http.client
import requests
from . import exceptions
USER_URL = 'https://api.github.com/users/{username}'
USER_PUBLIC_REPO_URL = 'https://api.github.com/users/{username}/repos'
def do_request(url_format, format_args):
r = requests.get(url_format.format(**format_args))
if r.status_code == http.client.OK:
return r.json()
else:
raise exceptions.RequestFailed(r)
def get_user(username):
return do_request(USER_URL, {"username": username})
def get_public_projects_for_user(username):
return do_request(USER_PUBLIC_REPO_URL, {"username": username})
if __name__ == "__main__":
import pprint
pprint.pprint(get_user("robbie-c"))
pprint.pprint(get_public_projects_for_user("robbie-c"))
|
<commit_before><commit_msg>Add git api request helpers
Add helper functions for getting info on a user, and all public repos
owned by a user<commit_after>
|
import http.client
import requests
from . import exceptions
USER_URL = 'https://api.github.com/users/{username}'
USER_PUBLIC_REPO_URL = 'https://api.github.com/users/{username}/repos'
def do_request(url_format, format_args):
r = requests.get(url_format.format(**format_args))
if r.status_code == http.client.OK:
return r.json()
else:
raise exceptions.RequestFailed(r)
def get_user(username):
return do_request(USER_URL, {"username": username})
def get_public_projects_for_user(username):
return do_request(USER_PUBLIC_REPO_URL, {"username": username})
if __name__ == "__main__":
import pprint
pprint.pprint(get_user("robbie-c"))
pprint.pprint(get_public_projects_for_user("robbie-c"))
|
Add git api request helpers
Add helper functions for getting info on a user, and all public repos
owned by a userimport http.client
import requests
from . import exceptions
USER_URL = 'https://api.github.com/users/{username}'
USER_PUBLIC_REPO_URL = 'https://api.github.com/users/{username}/repos'
def do_request(url_format, format_args):
r = requests.get(url_format.format(**format_args))
if r.status_code == http.client.OK:
return r.json()
else:
raise exceptions.RequestFailed(r)
def get_user(username):
return do_request(USER_URL, {"username": username})
def get_public_projects_for_user(username):
return do_request(USER_PUBLIC_REPO_URL, {"username": username})
if __name__ == "__main__":
import pprint
pprint.pprint(get_user("robbie-c"))
pprint.pprint(get_public_projects_for_user("robbie-c"))
|
<commit_before><commit_msg>Add git api request helpers
Add helper functions for getting info on a user, and all public repos
owned by a user<commit_after>import http.client
import requests
from . import exceptions
USER_URL = 'https://api.github.com/users/{username}'
USER_PUBLIC_REPO_URL = 'https://api.github.com/users/{username}/repos'
def do_request(url_format, format_args):
r = requests.get(url_format.format(**format_args))
if r.status_code == http.client.OK:
return r.json()
else:
raise exceptions.RequestFailed(r)
def get_user(username):
return do_request(USER_URL, {"username": username})
def get_public_projects_for_user(username):
return do_request(USER_PUBLIC_REPO_URL, {"username": username})
if __name__ == "__main__":
import pprint
pprint.pprint(get_user("robbie-c"))
pprint.pprint(get_public_projects_for_user("robbie-c"))
|
|
cb99e3f0ae298fb7065b661190a88d144d663183
|
gffutils-utils/scripts/gtf_to_bed.py
|
gffutils-utils/scripts/gtf_to_bed.py
|
import tempfile
from argparse import ArgumentParser
import gffutils
import os
def disable_infer_extent(gtf_file):
"""
guess if we need to use the gene extent option when making a gffutils
database by making a tiny database of 1000 lines from the original
GTF and looking for all of the features
"""
_, ext = os.path.splitext(gtf_file)
tmp_out = tempfile.NamedTemporaryFile(suffix=".gtf", delete=False).name
with open(tmp_out, "w") as out_handle:
count = 0
in_handle = open(gtf_file) if ext != ".gz" else gzip.open(gtf_file)
for line in in_handle:
if count > 1000:
break
out_handle.write(line)
count += 1
in_handle.close()
db = gffutils.create_db(tmp_out, dbfn=":memory:",
disable_infer_transcripts=False,
disable_infer_genes=False)
os.remove(tmp_out)
features = [x for x in db.featuretypes()]
if "gene" in features and "transcript" in features:
return True
else:
return False
def get_gtf_db(gtf, in_memory=False):
"""
create a gffutils DB from a GTF file and will use an existing gffutils
database if it is named {gtf}.db
"""
db_file = ":memory:" if in_memory else gtf + ".db"
disable_infer = disable_infer_extent(gtf)
if in_memory or not os.path.exists(db_file):
db = gffutils.create_db(gtf, dbfn=db_file,
disable_infer_transcripts=disable_infer,
disable_infer_genes=disable_infer)
if in_memory:
return db
else:
return gffutils.FeatureDB(db_file)
def gtf_to_bed(gtf):
db = get_gtf_db(gtf)
out_file = os.path.splitext(gtf)[0] + ".bed"
if os.path.exists(out_file):
return out_file
with open(out_file, "w") as out_handle:
for feature in db.all_features():
chrom = feature.chrom
start = feature.start
end = feature.end
attributes = feature.attributes.keys()
strand = feature.strand
name = (feature['gene_name'][0] if 'gene_name' in attributes else
feature['gene_id'][0])
line = "\t".join(map(str, [chrom, start, end, name, ".", strand]))
out_handle.write(line + "\n")
return out_file
if __name__ == "__main__":
description = ("Convert a GTF file to a BED file.")
parser = ArgumentParser(description)
parser.add_argument("gtf", help="GTF to convert")
args = parser.parse_args()
gtf_to_bed(args.gtf)
|
Add script to convert GTF to BED file.
|
Add script to convert GTF to BED file.
|
Python
|
mit
|
roryk/junkdrawer,roryk/junkdrawer
|
Add script to convert GTF to BED file.
|
import tempfile
from argparse import ArgumentParser
import gffutils
import os
def disable_infer_extent(gtf_file):
"""
guess if we need to use the gene extent option when making a gffutils
database by making a tiny database of 1000 lines from the original
GTF and looking for all of the features
"""
_, ext = os.path.splitext(gtf_file)
tmp_out = tempfile.NamedTemporaryFile(suffix=".gtf", delete=False).name
with open(tmp_out, "w") as out_handle:
count = 0
in_handle = open(gtf_file) if ext != ".gz" else gzip.open(gtf_file)
for line in in_handle:
if count > 1000:
break
out_handle.write(line)
count += 1
in_handle.close()
db = gffutils.create_db(tmp_out, dbfn=":memory:",
disable_infer_transcripts=False,
disable_infer_genes=False)
os.remove(tmp_out)
features = [x for x in db.featuretypes()]
if "gene" in features and "transcript" in features:
return True
else:
return False
def get_gtf_db(gtf, in_memory=False):
"""
create a gffutils DB from a GTF file and will use an existing gffutils
database if it is named {gtf}.db
"""
db_file = ":memory:" if in_memory else gtf + ".db"
disable_infer = disable_infer_extent(gtf)
if in_memory or not os.path.exists(db_file):
db = gffutils.create_db(gtf, dbfn=db_file,
disable_infer_transcripts=disable_infer,
disable_infer_genes=disable_infer)
if in_memory:
return db
else:
return gffutils.FeatureDB(db_file)
def gtf_to_bed(gtf):
db = get_gtf_db(gtf)
out_file = os.path.splitext(gtf)[0] + ".bed"
if os.path.exists(out_file):
return out_file
with open(out_file, "w") as out_handle:
for feature in db.all_features():
chrom = feature.chrom
start = feature.start
end = feature.end
attributes = feature.attributes.keys()
strand = feature.strand
name = (feature['gene_name'][0] if 'gene_name' in attributes else
feature['gene_id'][0])
line = "\t".join(map(str, [chrom, start, end, name, ".", strand]))
out_handle.write(line + "\n")
return out_file
if __name__ == "__main__":
description = ("Convert a GTF file to a BED file.")
parser = ArgumentParser(description)
parser.add_argument("gtf", help="GTF to convert")
args = parser.parse_args()
gtf_to_bed(args.gtf)
|
<commit_before><commit_msg>Add script to convert GTF to BED file.<commit_after>
|
import tempfile
from argparse import ArgumentParser
import gffutils
import os
def disable_infer_extent(gtf_file):
"""
guess if we need to use the gene extent option when making a gffutils
database by making a tiny database of 1000 lines from the original
GTF and looking for all of the features
"""
_, ext = os.path.splitext(gtf_file)
tmp_out = tempfile.NamedTemporaryFile(suffix=".gtf", delete=False).name
with open(tmp_out, "w") as out_handle:
count = 0
in_handle = open(gtf_file) if ext != ".gz" else gzip.open(gtf_file)
for line in in_handle:
if count > 1000:
break
out_handle.write(line)
count += 1
in_handle.close()
db = gffutils.create_db(tmp_out, dbfn=":memory:",
disable_infer_transcripts=False,
disable_infer_genes=False)
os.remove(tmp_out)
features = [x for x in db.featuretypes()]
if "gene" in features and "transcript" in features:
return True
else:
return False
def get_gtf_db(gtf, in_memory=False):
"""
create a gffutils DB from a GTF file and will use an existing gffutils
database if it is named {gtf}.db
"""
db_file = ":memory:" if in_memory else gtf + ".db"
disable_infer = disable_infer_extent(gtf)
if in_memory or not os.path.exists(db_file):
db = gffutils.create_db(gtf, dbfn=db_file,
disable_infer_transcripts=disable_infer,
disable_infer_genes=disable_infer)
if in_memory:
return db
else:
return gffutils.FeatureDB(db_file)
def gtf_to_bed(gtf):
db = get_gtf_db(gtf)
out_file = os.path.splitext(gtf)[0] + ".bed"
if os.path.exists(out_file):
return out_file
with open(out_file, "w") as out_handle:
for feature in db.all_features():
chrom = feature.chrom
start = feature.start
end = feature.end
attributes = feature.attributes.keys()
strand = feature.strand
name = (feature['gene_name'][0] if 'gene_name' in attributes else
feature['gene_id'][0])
line = "\t".join(map(str, [chrom, start, end, name, ".", strand]))
out_handle.write(line + "\n")
return out_file
if __name__ == "__main__":
description = ("Convert a GTF file to a BED file.")
parser = ArgumentParser(description)
parser.add_argument("gtf", help="GTF to convert")
args = parser.parse_args()
gtf_to_bed(args.gtf)
|
Add script to convert GTF to BED file.import tempfile
from argparse import ArgumentParser
import gffutils
import os
def disable_infer_extent(gtf_file):
"""
guess if we need to use the gene extent option when making a gffutils
database by making a tiny database of 1000 lines from the original
GTF and looking for all of the features
"""
_, ext = os.path.splitext(gtf_file)
tmp_out = tempfile.NamedTemporaryFile(suffix=".gtf", delete=False).name
with open(tmp_out, "w") as out_handle:
count = 0
in_handle = open(gtf_file) if ext != ".gz" else gzip.open(gtf_file)
for line in in_handle:
if count > 1000:
break
out_handle.write(line)
count += 1
in_handle.close()
db = gffutils.create_db(tmp_out, dbfn=":memory:",
disable_infer_transcripts=False,
disable_infer_genes=False)
os.remove(tmp_out)
features = [x for x in db.featuretypes()]
if "gene" in features and "transcript" in features:
return True
else:
return False
def get_gtf_db(gtf, in_memory=False):
"""
create a gffutils DB from a GTF file and will use an existing gffutils
database if it is named {gtf}.db
"""
db_file = ":memory:" if in_memory else gtf + ".db"
disable_infer = disable_infer_extent(gtf)
if in_memory or not os.path.exists(db_file):
db = gffutils.create_db(gtf, dbfn=db_file,
disable_infer_transcripts=disable_infer,
disable_infer_genes=disable_infer)
if in_memory:
return db
else:
return gffutils.FeatureDB(db_file)
def gtf_to_bed(gtf):
db = get_gtf_db(gtf)
out_file = os.path.splitext(gtf)[0] + ".bed"
if os.path.exists(out_file):
return out_file
with open(out_file, "w") as out_handle:
for feature in db.all_features():
chrom = feature.chrom
start = feature.start
end = feature.end
attributes = feature.attributes.keys()
strand = feature.strand
name = (feature['gene_name'][0] if 'gene_name' in attributes else
feature['gene_id'][0])
line = "\t".join(map(str, [chrom, start, end, name, ".", strand]))
out_handle.write(line + "\n")
return out_file
if __name__ == "__main__":
description = ("Convert a GTF file to a BED file.")
parser = ArgumentParser(description)
parser.add_argument("gtf", help="GTF to convert")
args = parser.parse_args()
gtf_to_bed(args.gtf)
|
<commit_before><commit_msg>Add script to convert GTF to BED file.<commit_after>import tempfile
from argparse import ArgumentParser
import gffutils
import os
def disable_infer_extent(gtf_file):
"""
guess if we need to use the gene extent option when making a gffutils
database by making a tiny database of 1000 lines from the original
GTF and looking for all of the features
"""
_, ext = os.path.splitext(gtf_file)
tmp_out = tempfile.NamedTemporaryFile(suffix=".gtf", delete=False).name
with open(tmp_out, "w") as out_handle:
count = 0
in_handle = open(gtf_file) if ext != ".gz" else gzip.open(gtf_file)
for line in in_handle:
if count > 1000:
break
out_handle.write(line)
count += 1
in_handle.close()
db = gffutils.create_db(tmp_out, dbfn=":memory:",
disable_infer_transcripts=False,
disable_infer_genes=False)
os.remove(tmp_out)
features = [x for x in db.featuretypes()]
if "gene" in features and "transcript" in features:
return True
else:
return False
def get_gtf_db(gtf, in_memory=False):
"""
create a gffutils DB from a GTF file and will use an existing gffutils
database if it is named {gtf}.db
"""
db_file = ":memory:" if in_memory else gtf + ".db"
disable_infer = disable_infer_extent(gtf)
if in_memory or not os.path.exists(db_file):
db = gffutils.create_db(gtf, dbfn=db_file,
disable_infer_transcripts=disable_infer,
disable_infer_genes=disable_infer)
if in_memory:
return db
else:
return gffutils.FeatureDB(db_file)
def gtf_to_bed(gtf):
db = get_gtf_db(gtf)
out_file = os.path.splitext(gtf)[0] + ".bed"
if os.path.exists(out_file):
return out_file
with open(out_file, "w") as out_handle:
for feature in db.all_features():
chrom = feature.chrom
start = feature.start
end = feature.end
attributes = feature.attributes.keys()
strand = feature.strand
name = (feature['gene_name'][0] if 'gene_name' in attributes else
feature['gene_id'][0])
line = "\t".join(map(str, [chrom, start, end, name, ".", strand]))
out_handle.write(line + "\n")
return out_file
if __name__ == "__main__":
description = ("Convert a GTF file to a BED file.")
parser = ArgumentParser(description)
parser.add_argument("gtf", help="GTF to convert")
args = parser.parse_args()
gtf_to_bed(args.gtf)
|
|
c711928fd84f37d1e9f7035eaa3fc010d7a4355f
|
docker/switch-server.py
|
docker/switch-server.py
|
#!/usr/bin/env python3
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('server', choices=['blue', 'green',],
help='Specify server to switch to')
args = parser.parse_args()
server = 'muzhack-{}'.format(args.server)
subprocess.check_call(['tutum', 'service', 'set', '--link', '{0}:{0}'.format(server), 'lb'],
stdout=subprocess.PIPE)
print('* Successfully switched production server to {}'.format(server))
|
Add script for switching production server
|
Add script for switching production server
|
Python
|
mit
|
muzhack/musitechhub,muzhack/muzhack,muzhack/muzhack,muzhack/muzhack,muzhack/muzhack,muzhack/musitechhub,muzhack/musitechhub,muzhack/musitechhub
|
Add script for switching production server
|
#!/usr/bin/env python3
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('server', choices=['blue', 'green',],
help='Specify server to switch to')
args = parser.parse_args()
server = 'muzhack-{}'.format(args.server)
subprocess.check_call(['tutum', 'service', 'set', '--link', '{0}:{0}'.format(server), 'lb'],
stdout=subprocess.PIPE)
print('* Successfully switched production server to {}'.format(server))
|
<commit_before><commit_msg>Add script for switching production server<commit_after>
|
#!/usr/bin/env python3
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('server', choices=['blue', 'green',],
help='Specify server to switch to')
args = parser.parse_args()
server = 'muzhack-{}'.format(args.server)
subprocess.check_call(['tutum', 'service', 'set', '--link', '{0}:{0}'.format(server), 'lb'],
stdout=subprocess.PIPE)
print('* Successfully switched production server to {}'.format(server))
|
Add script for switching production server#!/usr/bin/env python3
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('server', choices=['blue', 'green',],
help='Specify server to switch to')
args = parser.parse_args()
server = 'muzhack-{}'.format(args.server)
subprocess.check_call(['tutum', 'service', 'set', '--link', '{0}:{0}'.format(server), 'lb'],
stdout=subprocess.PIPE)
print('* Successfully switched production server to {}'.format(server))
|
<commit_before><commit_msg>Add script for switching production server<commit_after>#!/usr/bin/env python3
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('server', choices=['blue', 'green',],
help='Specify server to switch to')
args = parser.parse_args()
server = 'muzhack-{}'.format(args.server)
subprocess.check_call(['tutum', 'service', 'set', '--link', '{0}:{0}'.format(server), 'lb'],
stdout=subprocess.PIPE)
print('* Successfully switched production server to {}'.format(server))
|
|
40a1730100091c6eee03ee5fe7687bd542e92077
|
cli_progress_bar.py
|
cli_progress_bar.py
|
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
from time import sleep
items = list(range(0, 57))
l = len(items)
printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
for i, item in enumerate(items):
sleep(0.1)
printProgressBar(i + 1, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
|
Add cli progress bar example
|
Add cli progress bar example
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add cli progress bar example
|
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
from time import sleep
items = list(range(0, 57))
l = len(items)
printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
for i, item in enumerate(items):
sleep(0.1)
printProgressBar(i + 1, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
|
<commit_before><commit_msg>Add cli progress bar example<commit_after>
|
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
from time import sleep
items = list(range(0, 57))
l = len(items)
printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
for i, item in enumerate(items):
sleep(0.1)
printProgressBar(i + 1, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
|
Add cli progress bar example# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
from time import sleep
items = list(range(0, 57))
l = len(items)
printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
for i, item in enumerate(items):
sleep(0.1)
printProgressBar(i + 1, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
|
<commit_before><commit_msg>Add cli progress bar example<commit_after># Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
from time import sleep
items = list(range(0, 57))
l = len(items)
printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
for i, item in enumerate(items):
sleep(0.1)
printProgressBar(i + 1, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
|
|
b4f7bbe1d018316dce0c82b64c74efabbeea523e
|
backend/scripts/conversion/addprojs.py
|
backend/scripts/conversion/addprojs.py
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
def main(conn):
groups = list(r.table('usergroups').run(conn))
for group in groups:
owner = group['owner']
projects = list(r.table('projects').filter({'owner': owner})
.pluck('id', 'name').run(conn))
group['projects'] = projects
r.table('usergroups').get(group['id']).update(group).run(conn)
samples = list(r.table('samples').run(conn))
for sample in samples:
owner = sample['owner']
projects = list(r.table('projects').filter({'owner': owner})
.pluck('id', 'name').run(conn))
sample['projects'] = projects
r.table('samples').get(sample['id']).update(sample).run(conn)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
main(conn)
|
Add projects field to samples and usergroups.
|
Add projects field to samples and usergroups.
|
Python
|
mit
|
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
|
Add projects field to samples and usergroups.
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
def main(conn):
groups = list(r.table('usergroups').run(conn))
for group in groups:
owner = group['owner']
projects = list(r.table('projects').filter({'owner': owner})
.pluck('id', 'name').run(conn))
group['projects'] = projects
r.table('usergroups').get(group['id']).update(group).run(conn)
samples = list(r.table('samples').run(conn))
for sample in samples:
owner = sample['owner']
projects = list(r.table('projects').filter({'owner': owner})
.pluck('id', 'name').run(conn))
sample['projects'] = projects
r.table('samples').get(sample['id']).update(sample).run(conn)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
main(conn)
|
<commit_before><commit_msg>Add projects field to samples and usergroups.<commit_after>
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
def main(conn):
groups = list(r.table('usergroups').run(conn))
for group in groups:
owner = group['owner']
projects = list(r.table('projects').filter({'owner': owner})
.pluck('id', 'name').run(conn))
group['projects'] = projects
r.table('usergroups').get(group['id']).update(group).run(conn)
samples = list(r.table('samples').run(conn))
for sample in samples:
owner = sample['owner']
projects = list(r.table('projects').filter({'owner': owner})
.pluck('id', 'name').run(conn))
sample['projects'] = projects
r.table('samples').get(sample['id']).update(sample).run(conn)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
main(conn)
|
Add projects field to samples and usergroups.#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
def main(conn):
groups = list(r.table('usergroups').run(conn))
for group in groups:
owner = group['owner']
projects = list(r.table('projects').filter({'owner': owner})
.pluck('id', 'name').run(conn))
group['projects'] = projects
r.table('usergroups').get(group['id']).update(group).run(conn)
samples = list(r.table('samples').run(conn))
for sample in samples:
owner = sample['owner']
projects = list(r.table('projects').filter({'owner': owner})
.pluck('id', 'name').run(conn))
sample['projects'] = projects
r.table('samples').get(sample['id']).update(sample).run(conn)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
main(conn)
|
<commit_before><commit_msg>Add projects field to samples and usergroups.<commit_after>#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
def main(conn):
groups = list(r.table('usergroups').run(conn))
for group in groups:
owner = group['owner']
projects = list(r.table('projects').filter({'owner': owner})
.pluck('id', 'name').run(conn))
group['projects'] = projects
r.table('usergroups').get(group['id']).update(group).run(conn)
samples = list(r.table('samples').run(conn))
for sample in samples:
owner = sample['owner']
projects = list(r.table('projects').filter({'owner': owner})
.pluck('id', 'name').run(conn))
sample['projects'] = projects
r.table('samples').get(sample['id']).update(sample).run(conn)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
main(conn)
|
|
f595b62b53b25155e6e5aa65cd15e7393b979e82
|
tools/fix_from_default.py
|
tools/fix_from_default.py
|
import datetime
from appcomposer import app
from appcomposer.db import db
from appcomposer.models import ActiveTranslationMessage, TranslationBundle, TranslationMessageHistory
from appcomposer.translator.ops import get_golab_default_user
with app.app_context():
total = 0
for bundle in db.session.query(TranslationBundle).all():
if bundle.language == 'en_ALL':
continue
if not bundle.from_developer:
continue
original_bundle = ([ b for b in bundle.translation_url.bundles if b.language == 'en_ALL' and b.target == 'ALL' ] or [None])[0]
if original_bundle is None:
continue
original_messages = {
# key: value
}
for active_message in original_bundle.active_messages:
original_messages[active_message.key] = active_message.value
for active_message in bundle.active_messages:
if active_message.value == '{0}':
continue
if not active_message.taken_from_default and active_message.from_developer and active_message.value == original_messages.get(active_message.key, '________this.will.never.be'):
total += 1
print "Processing {0}::{1}_{2}::{3} ({4!r})".format(bundle.translation_url.url, bundle.language, bundle.target, active_message.key, active_message.value)
ph = active_message.history
new_history = TranslationMessageHistory(bundle = ph.bundle, key = ph.key, value = ph.value, user = get_golab_default_user(), datetime = datetime.datetime.utcnow(), parent_translation_id = ph.id, taken_from_default = True, same_tool = ph.same_tool, tool_id = ph.tool_id, fmt = ph.fmt, position = ph.position, category = ph.category, from_developer = ph.from_developer, namespace = ph.namespace)
db.session.add(new_history)
db.session.flush()
db.session.refresh(new_history)
am = active_message
new_active_message = ActiveTranslationMessage(bundle = bundle, key = am.key, value = am.value, history = new_history, datetime = datetime.datetime.utcnow(), taken_from_default = True, position = am.position, category = am.category, from_developer = am.from_developer, namespace = am.namespace, tool_id = am.tool_id, same_tool = am.same_tool, fmt = am.fmt)
db.session.delete(active_message)
db.session.add(new_active_message)
db.session.commit()
print("{0} records processed".format(total))
|
Fix the from_default field in the database
|
Fix the from_default field in the database
|
Python
|
bsd-2-clause
|
morelab/appcomposer,go-lab/appcomposer,porduna/appcomposer,morelab/appcomposer,porduna/appcomposer,go-lab/appcomposer,go-lab/appcomposer,morelab/appcomposer,morelab/appcomposer,porduna/appcomposer,porduna/appcomposer,go-lab/appcomposer
|
Fix the from_default field in the database
|
import datetime
from appcomposer import app
from appcomposer.db import db
from appcomposer.models import ActiveTranslationMessage, TranslationBundle, TranslationMessageHistory
from appcomposer.translator.ops import get_golab_default_user
with app.app_context():
total = 0
for bundle in db.session.query(TranslationBundle).all():
if bundle.language == 'en_ALL':
continue
if not bundle.from_developer:
continue
original_bundle = ([ b for b in bundle.translation_url.bundles if b.language == 'en_ALL' and b.target == 'ALL' ] or [None])[0]
if original_bundle is None:
continue
original_messages = {
# key: value
}
for active_message in original_bundle.active_messages:
original_messages[active_message.key] = active_message.value
for active_message in bundle.active_messages:
if active_message.value == '{0}':
continue
if not active_message.taken_from_default and active_message.from_developer and active_message.value == original_messages.get(active_message.key, '________this.will.never.be'):
total += 1
print "Processing {0}::{1}_{2}::{3} ({4!r})".format(bundle.translation_url.url, bundle.language, bundle.target, active_message.key, active_message.value)
ph = active_message.history
new_history = TranslationMessageHistory(bundle = ph.bundle, key = ph.key, value = ph.value, user = get_golab_default_user(), datetime = datetime.datetime.utcnow(), parent_translation_id = ph.id, taken_from_default = True, same_tool = ph.same_tool, tool_id = ph.tool_id, fmt = ph.fmt, position = ph.position, category = ph.category, from_developer = ph.from_developer, namespace = ph.namespace)
db.session.add(new_history)
db.session.flush()
db.session.refresh(new_history)
am = active_message
new_active_message = ActiveTranslationMessage(bundle = bundle, key = am.key, value = am.value, history = new_history, datetime = datetime.datetime.utcnow(), taken_from_default = True, position = am.position, category = am.category, from_developer = am.from_developer, namespace = am.namespace, tool_id = am.tool_id, same_tool = am.same_tool, fmt = am.fmt)
db.session.delete(active_message)
db.session.add(new_active_message)
db.session.commit()
print("{0} records processed".format(total))
|
<commit_before><commit_msg>Fix the from_default field in the database<commit_after>
|
import datetime
from appcomposer import app
from appcomposer.db import db
from appcomposer.models import ActiveTranslationMessage, TranslationBundle, TranslationMessageHistory
from appcomposer.translator.ops import get_golab_default_user
with app.app_context():
total = 0
for bundle in db.session.query(TranslationBundle).all():
if bundle.language == 'en_ALL':
continue
if not bundle.from_developer:
continue
original_bundle = ([ b for b in bundle.translation_url.bundles if b.language == 'en_ALL' and b.target == 'ALL' ] or [None])[0]
if original_bundle is None:
continue
original_messages = {
# key: value
}
for active_message in original_bundle.active_messages:
original_messages[active_message.key] = active_message.value
for active_message in bundle.active_messages:
if active_message.value == '{0}':
continue
if not active_message.taken_from_default and active_message.from_developer and active_message.value == original_messages.get(active_message.key, '________this.will.never.be'):
total += 1
print "Processing {0}::{1}_{2}::{3} ({4!r})".format(bundle.translation_url.url, bundle.language, bundle.target, active_message.key, active_message.value)
ph = active_message.history
new_history = TranslationMessageHistory(bundle = ph.bundle, key = ph.key, value = ph.value, user = get_golab_default_user(), datetime = datetime.datetime.utcnow(), parent_translation_id = ph.id, taken_from_default = True, same_tool = ph.same_tool, tool_id = ph.tool_id, fmt = ph.fmt, position = ph.position, category = ph.category, from_developer = ph.from_developer, namespace = ph.namespace)
db.session.add(new_history)
db.session.flush()
db.session.refresh(new_history)
am = active_message
new_active_message = ActiveTranslationMessage(bundle = bundle, key = am.key, value = am.value, history = new_history, datetime = datetime.datetime.utcnow(), taken_from_default = True, position = am.position, category = am.category, from_developer = am.from_developer, namespace = am.namespace, tool_id = am.tool_id, same_tool = am.same_tool, fmt = am.fmt)
db.session.delete(active_message)
db.session.add(new_active_message)
db.session.commit()
print("{0} records processed".format(total))
|
Fix the from_default field in the databaseimport datetime
from appcomposer import app
from appcomposer.db import db
from appcomposer.models import ActiveTranslationMessage, TranslationBundle, TranslationMessageHistory
from appcomposer.translator.ops import get_golab_default_user
with app.app_context():
total = 0
for bundle in db.session.query(TranslationBundle).all():
if bundle.language == 'en_ALL':
continue
if not bundle.from_developer:
continue
original_bundle = ([ b for b in bundle.translation_url.bundles if b.language == 'en_ALL' and b.target == 'ALL' ] or [None])[0]
if original_bundle is None:
continue
original_messages = {
# key: value
}
for active_message in original_bundle.active_messages:
original_messages[active_message.key] = active_message.value
for active_message in bundle.active_messages:
if active_message.value == '{0}':
continue
if not active_message.taken_from_default and active_message.from_developer and active_message.value == original_messages.get(active_message.key, '________this.will.never.be'):
total += 1
print "Processing {0}::{1}_{2}::{3} ({4!r})".format(bundle.translation_url.url, bundle.language, bundle.target, active_message.key, active_message.value)
ph = active_message.history
new_history = TranslationMessageHistory(bundle = ph.bundle, key = ph.key, value = ph.value, user = get_golab_default_user(), datetime = datetime.datetime.utcnow(), parent_translation_id = ph.id, taken_from_default = True, same_tool = ph.same_tool, tool_id = ph.tool_id, fmt = ph.fmt, position = ph.position, category = ph.category, from_developer = ph.from_developer, namespace = ph.namespace)
db.session.add(new_history)
db.session.flush()
db.session.refresh(new_history)
am = active_message
new_active_message = ActiveTranslationMessage(bundle = bundle, key = am.key, value = am.value, history = new_history, datetime = datetime.datetime.utcnow(), taken_from_default = True, position = am.position, category = am.category, from_developer = am.from_developer, namespace = am.namespace, tool_id = am.tool_id, same_tool = am.same_tool, fmt = am.fmt)
db.session.delete(active_message)
db.session.add(new_active_message)
db.session.commit()
print("{0} records processed".format(total))
|
<commit_before><commit_msg>Fix the from_default field in the database<commit_after>import datetime
from appcomposer import app
from appcomposer.db import db
from appcomposer.models import ActiveTranslationMessage, TranslationBundle, TranslationMessageHistory
from appcomposer.translator.ops import get_golab_default_user
with app.app_context():
total = 0
for bundle in db.session.query(TranslationBundle).all():
if bundle.language == 'en_ALL':
continue
if not bundle.from_developer:
continue
original_bundle = ([ b for b in bundle.translation_url.bundles if b.language == 'en_ALL' and b.target == 'ALL' ] or [None])[0]
if original_bundle is None:
continue
original_messages = {
# key: value
}
for active_message in original_bundle.active_messages:
original_messages[active_message.key] = active_message.value
for active_message in bundle.active_messages:
if active_message.value == '{0}':
continue
if not active_message.taken_from_default and active_message.from_developer and active_message.value == original_messages.get(active_message.key, '________this.will.never.be'):
total += 1
print "Processing {0}::{1}_{2}::{3} ({4!r})".format(bundle.translation_url.url, bundle.language, bundle.target, active_message.key, active_message.value)
ph = active_message.history
new_history = TranslationMessageHistory(bundle = ph.bundle, key = ph.key, value = ph.value, user = get_golab_default_user(), datetime = datetime.datetime.utcnow(), parent_translation_id = ph.id, taken_from_default = True, same_tool = ph.same_tool, tool_id = ph.tool_id, fmt = ph.fmt, position = ph.position, category = ph.category, from_developer = ph.from_developer, namespace = ph.namespace)
db.session.add(new_history)
db.session.flush()
db.session.refresh(new_history)
am = active_message
new_active_message = ActiveTranslationMessage(bundle = bundle, key = am.key, value = am.value, history = new_history, datetime = datetime.datetime.utcnow(), taken_from_default = True, position = am.position, category = am.category, from_developer = am.from_developer, namespace = am.namespace, tool_id = am.tool_id, same_tool = am.same_tool, fmt = am.fmt)
db.session.delete(active_message)
db.session.add(new_active_message)
db.session.commit()
print("{0} records processed".format(total))
|
|
ce736e08082b9c5048851a2db77aefbb389b39cb
|
openrcv/test/test_resource.py
|
openrcv/test/test_resource.py
|
from openrcv.resource import tracked
from openrcv.utiltest.helpers import skipIfTravis, UnitCase
class TrackedTest(UnitCase):
"""Tests for tracked()."""
def test(self):
seq = [1, "a"]
with self.assertRaises(ValueError) as cm:
with tracked(seq) as items:
for item in items:
int(item)
# Check the exception text.
err = cm.exception
self.assertEqual(str(err), "during item number 2: 'a'")
|
Add a unit test for tracked().
|
Add a unit test for tracked().
|
Python
|
mit
|
cjerdonek/open-rcv,cjerdonek/open-rcv
|
Add a unit test for tracked().
|
from openrcv.resource import tracked
from openrcv.utiltest.helpers import skipIfTravis, UnitCase
class TrackedTest(UnitCase):
"""Tests for tracked()."""
def test(self):
seq = [1, "a"]
with self.assertRaises(ValueError) as cm:
with tracked(seq) as items:
for item in items:
int(item)
# Check the exception text.
err = cm.exception
self.assertEqual(str(err), "during item number 2: 'a'")
|
<commit_before><commit_msg>Add a unit test for tracked().<commit_after>
|
from openrcv.resource import tracked
from openrcv.utiltest.helpers import skipIfTravis, UnitCase
class TrackedTest(UnitCase):
"""Tests for tracked()."""
def test(self):
seq = [1, "a"]
with self.assertRaises(ValueError) as cm:
with tracked(seq) as items:
for item in items:
int(item)
# Check the exception text.
err = cm.exception
self.assertEqual(str(err), "during item number 2: 'a'")
|
Add a unit test for tracked().
from openrcv.resource import tracked
from openrcv.utiltest.helpers import skipIfTravis, UnitCase
class TrackedTest(UnitCase):
"""Tests for tracked()."""
def test(self):
seq = [1, "a"]
with self.assertRaises(ValueError) as cm:
with tracked(seq) as items:
for item in items:
int(item)
# Check the exception text.
err = cm.exception
self.assertEqual(str(err), "during item number 2: 'a'")
|
<commit_before><commit_msg>Add a unit test for tracked().<commit_after>
from openrcv.resource import tracked
from openrcv.utiltest.helpers import skipIfTravis, UnitCase
class TrackedTest(UnitCase):
"""Tests for tracked()."""
def test(self):
seq = [1, "a"]
with self.assertRaises(ValueError) as cm:
with tracked(seq) as items:
for item in items:
int(item)
# Check the exception text.
err = cm.exception
self.assertEqual(str(err), "during item number 2: 'a'")
|
|
a0deef06e23e4c81e55d83afb63d4bbab1bdaaa5
|
migrations/versions/0217_default_email_branding.py
|
migrations/versions/0217_default_email_branding.py
|
"""
Revision ID: 0217_default_email_branding
Revises: 0216_remove_colours
Create Date: 2018-08-24 13:36:49.346156
"""
from alembic import op
from app.models import BRANDING_ORG
revision = '0217_default_email_branding'
down_revision = '0216_remove_colours'
def upgrade():
op.execute("""
update
email_branding
set
brand_type = '{}'
where
brand_type = null
""".format(BRANDING_ORG))
def downgrade():
pass
|
Set branding_type to org if it’s none
|
Set branding_type to org if it’s none
So later we can:
- make it non-nullable later
- remove `govuk` as an option
This is mostly for people’s local databases, the manual work here has
been done on production already.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Set branding_type to org if it’s none
So later we can:
- make it non-nullable later
- remove `govuk` as an option
This is mostly for people’s local databases, the manual work here has
been done on production already.
|
"""
Revision ID: 0217_default_email_branding
Revises: 0216_remove_colours
Create Date: 2018-08-24 13:36:49.346156
"""
from alembic import op
from app.models import BRANDING_ORG
revision = '0217_default_email_branding'
down_revision = '0216_remove_colours'
def upgrade():
op.execute("""
update
email_branding
set
brand_type = '{}'
where
brand_type = null
""".format(BRANDING_ORG))
def downgrade():
pass
|
<commit_before><commit_msg>Set branding_type to org if it’s none
So later we can:
- make it non-nullable later
- remove `govuk` as an option
This is mostly for people’s local databases, the manual work here has
been done on production already.<commit_after>
|
"""
Revision ID: 0217_default_email_branding
Revises: 0216_remove_colours
Create Date: 2018-08-24 13:36:49.346156
"""
from alembic import op
from app.models import BRANDING_ORG
revision = '0217_default_email_branding'
down_revision = '0216_remove_colours'
def upgrade():
op.execute("""
update
email_branding
set
brand_type = '{}'
where
brand_type = null
""".format(BRANDING_ORG))
def downgrade():
pass
|
Set branding_type to org if it’s none
So later we can:
- make it non-nullable later
- remove `govuk` as an option
This is mostly for people’s local databases, the manual work here has
been done on production already."""
Revision ID: 0217_default_email_branding
Revises: 0216_remove_colours
Create Date: 2018-08-24 13:36:49.346156
"""
from alembic import op
from app.models import BRANDING_ORG
revision = '0217_default_email_branding'
down_revision = '0216_remove_colours'
def upgrade():
op.execute("""
update
email_branding
set
brand_type = '{}'
where
brand_type = null
""".format(BRANDING_ORG))
def downgrade():
pass
|
<commit_before><commit_msg>Set branding_type to org if it’s none
So later we can:
- make it non-nullable later
- remove `govuk` as an option
This is mostly for people’s local databases, the manual work here has
been done on production already.<commit_after>"""
Revision ID: 0217_default_email_branding
Revises: 0216_remove_colours
Create Date: 2018-08-24 13:36:49.346156
"""
from alembic import op
from app.models import BRANDING_ORG
revision = '0217_default_email_branding'
down_revision = '0216_remove_colours'
def upgrade():
op.execute("""
update
email_branding
set
brand_type = '{}'
where
brand_type = null
""".format(BRANDING_ORG))
def downgrade():
pass
|
|
9f2231ecd546f9d29d3ebbd354c3376cb7ebd417
|
migrations/versions/53eb8abce4de_industry_index.py
|
migrations/versions/53eb8abce4de_industry_index.py
|
"""industry_index
Revision ID: 53eb8abce4de
Revises: 1e3725cda0b7
Create Date: 2015-06-19 10:38:31.398000
"""
# revision identifiers, used by Alembic.
revision = '53eb8abce4de'
down_revision = '1e3725cda0b7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('industry_index',
sa.Column('solarsystem_id', sa.Integer(), nullable=False),
sa.Column('activity', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('date', sa.DateTime(timezone=True), nullable=False),
sa.Column('cost_index', sa.Numeric(precision=20, scale=19, decimal_return_scale=19), nullable=True),
sa.ForeignKeyConstraint(['solarsystem_id'], ['solar_system.id'], ),
sa.PrimaryKeyConstraint('solarsystem_id', 'activity', 'date')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('industry_index')
### end Alembic commands ###
|
Add new table for industry index history
|
Add new table for industry index history
|
Python
|
bsd-3-clause
|
Kyria/LazyBlacksmith,Kyria/LazyBlacksmith,Kyria/LazyBlacksmith,Kyria/LazyBlacksmith
|
Add new table for industry index history
|
"""industry_index
Revision ID: 53eb8abce4de
Revises: 1e3725cda0b7
Create Date: 2015-06-19 10:38:31.398000
"""
# revision identifiers, used by Alembic.
revision = '53eb8abce4de'
down_revision = '1e3725cda0b7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('industry_index',
sa.Column('solarsystem_id', sa.Integer(), nullable=False),
sa.Column('activity', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('date', sa.DateTime(timezone=True), nullable=False),
sa.Column('cost_index', sa.Numeric(precision=20, scale=19, decimal_return_scale=19), nullable=True),
sa.ForeignKeyConstraint(['solarsystem_id'], ['solar_system.id'], ),
sa.PrimaryKeyConstraint('solarsystem_id', 'activity', 'date')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('industry_index')
### end Alembic commands ###
|
<commit_before><commit_msg>Add new table for industry index history<commit_after>
|
"""industry_index
Revision ID: 53eb8abce4de
Revises: 1e3725cda0b7
Create Date: 2015-06-19 10:38:31.398000
"""
# revision identifiers, used by Alembic.
revision = '53eb8abce4de'
down_revision = '1e3725cda0b7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('industry_index',
sa.Column('solarsystem_id', sa.Integer(), nullable=False),
sa.Column('activity', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('date', sa.DateTime(timezone=True), nullable=False),
sa.Column('cost_index', sa.Numeric(precision=20, scale=19, decimal_return_scale=19), nullable=True),
sa.ForeignKeyConstraint(['solarsystem_id'], ['solar_system.id'], ),
sa.PrimaryKeyConstraint('solarsystem_id', 'activity', 'date')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('industry_index')
### end Alembic commands ###
|
Add new table for industry index history"""industry_index
Revision ID: 53eb8abce4de
Revises: 1e3725cda0b7
Create Date: 2015-06-19 10:38:31.398000
"""
# revision identifiers, used by Alembic.
revision = '53eb8abce4de'
down_revision = '1e3725cda0b7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('industry_index',
sa.Column('solarsystem_id', sa.Integer(), nullable=False),
sa.Column('activity', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('date', sa.DateTime(timezone=True), nullable=False),
sa.Column('cost_index', sa.Numeric(precision=20, scale=19, decimal_return_scale=19), nullable=True),
sa.ForeignKeyConstraint(['solarsystem_id'], ['solar_system.id'], ),
sa.PrimaryKeyConstraint('solarsystem_id', 'activity', 'date')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('industry_index')
### end Alembic commands ###
|
<commit_before><commit_msg>Add new table for industry index history<commit_after>"""industry_index
Revision ID: 53eb8abce4de
Revises: 1e3725cda0b7
Create Date: 2015-06-19 10:38:31.398000
"""
# revision identifiers, used by Alembic.
revision = '53eb8abce4de'
down_revision = '1e3725cda0b7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('industry_index',
sa.Column('solarsystem_id', sa.Integer(), nullable=False),
sa.Column('activity', sa.Integer(), autoincrement=False, nullable=False),
sa.Column('date', sa.DateTime(timezone=True), nullable=False),
sa.Column('cost_index', sa.Numeric(precision=20, scale=19, decimal_return_scale=19), nullable=True),
sa.ForeignKeyConstraint(['solarsystem_id'], ['solar_system.id'], ),
sa.PrimaryKeyConstraint('solarsystem_id', 'activity', 'date')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('industry_index')
### end Alembic commands ###
|
|
71f3befd5bc3763c302a78d1147d4f203eac71c1
|
test/forward/TestForwardDeclaration.py
|
test/forward/TestForwardDeclaration.py
|
"""Test that forward declaration of a data structure gets resolved correctly."""
import os, time
import unittest2
import lldb
from lldbtest import *
class ForwardDeclarationTestCase(TestBase):
mydir = "forward"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym_and_run_command(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
self.buildDsym()
self.forward_declaration()
# rdar://problem/8546815
# './dotest.py -v -t forward' fails for test_with_dwarf_and_run_command
@unittest2.expectedFailure
def test_with_dwarf_and_run_command(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
self.buildDwarf()
self.forward_declaration()
def forward_declaration(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the foo function which takes a bar_ptr argument.
self.expect("breakpoint set -n foo", BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: name = 'foo', locations = 1")
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['state is Stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list", BREAKPOINT_HIT_ONCE,
substrs = [' resolved, hit count = 1'])
# This should display correctly.
# Note that the member fields of a = 1 and b = 2 is by design.
self.expect("frame variable *bar_ptr", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['(struct bar *) bar_ptr = ',
'(int) a = 1',
'(int) b = 2'])
# And so should this.
self.expect("expr *bar_ptr", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['(struct bar)',
'(int) a = 1',
'(int) b = 2'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a test case for the test/forward directory with @expectedFailure decorator for test_with_dwarf_and_run_command(self).
|
Add a test case for the test/forward directory with @expectedFailure decorator
for test_with_dwarf_and_run_command(self).
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@116416 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb
|
Add a test case for the test/forward directory with @expectedFailure decorator
for test_with_dwarf_and_run_command(self).
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@116416 91177308-0d34-0410-b5e6-96231b3b80d8
|
"""Test that forward declaration of a data structure gets resolved correctly."""
import os, time
import unittest2
import lldb
from lldbtest import *
class ForwardDeclarationTestCase(TestBase):
mydir = "forward"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym_and_run_command(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
self.buildDsym()
self.forward_declaration()
# rdar://problem/8546815
# './dotest.py -v -t forward' fails for test_with_dwarf_and_run_command
@unittest2.expectedFailure
def test_with_dwarf_and_run_command(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
self.buildDwarf()
self.forward_declaration()
def forward_declaration(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the foo function which takes a bar_ptr argument.
self.expect("breakpoint set -n foo", BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: name = 'foo', locations = 1")
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['state is Stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list", BREAKPOINT_HIT_ONCE,
substrs = [' resolved, hit count = 1'])
# This should display correctly.
# Note that the member fields of a = 1 and b = 2 is by design.
self.expect("frame variable *bar_ptr", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['(struct bar *) bar_ptr = ',
'(int) a = 1',
'(int) b = 2'])
# And so should this.
self.expect("expr *bar_ptr", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['(struct bar)',
'(int) a = 1',
'(int) b = 2'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a test case for the test/forward directory with @expectedFailure decorator
for test_with_dwarf_and_run_command(self).
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@116416 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
"""Test that forward declaration of a data structure gets resolved correctly."""
import os, time
import unittest2
import lldb
from lldbtest import *
class ForwardDeclarationTestCase(TestBase):
mydir = "forward"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym_and_run_command(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
self.buildDsym()
self.forward_declaration()
# rdar://problem/8546815
# './dotest.py -v -t forward' fails for test_with_dwarf_and_run_command
@unittest2.expectedFailure
def test_with_dwarf_and_run_command(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
self.buildDwarf()
self.forward_declaration()
def forward_declaration(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the foo function which takes a bar_ptr argument.
self.expect("breakpoint set -n foo", BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: name = 'foo', locations = 1")
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['state is Stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list", BREAKPOINT_HIT_ONCE,
substrs = [' resolved, hit count = 1'])
# This should display correctly.
# Note that the member fields of a = 1 and b = 2 is by design.
self.expect("frame variable *bar_ptr", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['(struct bar *) bar_ptr = ',
'(int) a = 1',
'(int) b = 2'])
# And so should this.
self.expect("expr *bar_ptr", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['(struct bar)',
'(int) a = 1',
'(int) b = 2'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a test case for the test/forward directory with @expectedFailure decorator
for test_with_dwarf_and_run_command(self).
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@116416 91177308-0d34-0410-b5e6-96231b3b80d8"""Test that forward declaration of a data structure gets resolved correctly."""
import os, time
import unittest2
import lldb
from lldbtest import *
class ForwardDeclarationTestCase(TestBase):
mydir = "forward"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym_and_run_command(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
self.buildDsym()
self.forward_declaration()
# rdar://problem/8546815
# './dotest.py -v -t forward' fails for test_with_dwarf_and_run_command
@unittest2.expectedFailure
def test_with_dwarf_and_run_command(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
self.buildDwarf()
self.forward_declaration()
def forward_declaration(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the foo function which takes a bar_ptr argument.
self.expect("breakpoint set -n foo", BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: name = 'foo', locations = 1")
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['state is Stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list", BREAKPOINT_HIT_ONCE,
substrs = [' resolved, hit count = 1'])
# This should display correctly.
# Note that the member fields of a = 1 and b = 2 is by design.
self.expect("frame variable *bar_ptr", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['(struct bar *) bar_ptr = ',
'(int) a = 1',
'(int) b = 2'])
# And so should this.
self.expect("expr *bar_ptr", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['(struct bar)',
'(int) a = 1',
'(int) b = 2'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a test case for the test/forward directory with @expectedFailure decorator
for test_with_dwarf_and_run_command(self).
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@116416 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>"""Test that forward declaration of a data structure gets resolved correctly."""
import os, time
import unittest2
import lldb
from lldbtest import *
class ForwardDeclarationTestCase(TestBase):
mydir = "forward"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym_and_run_command(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
self.buildDsym()
self.forward_declaration()
# rdar://problem/8546815
# './dotest.py -v -t forward' fails for test_with_dwarf_and_run_command
@unittest2.expectedFailure
def test_with_dwarf_and_run_command(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
self.buildDwarf()
self.forward_declaration()
def forward_declaration(self):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the foo function which takes a bar_ptr argument.
self.expect("breakpoint set -n foo", BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: name = 'foo', locations = 1")
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['state is Stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list", BREAKPOINT_HIT_ONCE,
substrs = [' resolved, hit count = 1'])
# This should display correctly.
# Note that the member fields of a = 1 and b = 2 is by design.
self.expect("frame variable *bar_ptr", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['(struct bar *) bar_ptr = ',
'(int) a = 1',
'(int) b = 2'])
# And so should this.
self.expect("expr *bar_ptr", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['(struct bar)',
'(int) a = 1',
'(int) b = 2'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
|
18be114d831d9c7204a3eaefae769b83ca674651
|
moksha/hook.py
|
moksha/hook.py
|
# This file is part of Moksha.
#
# Moksha is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Moksha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Moksha. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2008, Red Hat, Inc.
# Authors: Luke Macken <lmacken@redhat.com>
import logging
import sqlalchemy
import pkg_resources
from sqlalchemy.orm.interfaces import MapperExtension
log = logging.getLogger(__name__)
class Hook(object):
""" The parent Hook class """
def after_insert(self, instance):
pass
def after_update(self, instance):
pass
def after_delete(self, instance):
pass
class MokshaHookMapperExtension(MapperExtension):
"""
This is a SQLAlchemy MapperExtension that handles loading up all of the
moksha hooks, and running them when new objects are inserted, updated,
and deleted from the model.
"""
hooks = {'after_insert': [], 'after_update': [], 'after_delete': []}
def __init__(self):
super(MokshaHookMapperExtension, self).__init__()
for hook_entry in pkg_resources.iter_entry_points('moksha.hook'):
log.info('Loading %s hook' % hook_entry.name)
hook_class = hook_entry.load()
self.hooks[hook_entry.name].append(hook_class())
def after_insert(self, mapper, connection, instance):
for hook in self.hooks['after_insert']:
hook.after_insert(instance)
return sqlalchemy.orm.EXT_CONTINUE
def after_update(self, mapper, connection, instance):
for hook in self.hooks['after_update']:
hook.after_update(instance)
return sqlalchemy.orm.EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
for hook in self.hooks['after_delete']:
hook.after_delete(instance)
return sqlalchemy.orm.EXT_CONTINUE
|
Add an initial "Hook" object and MokshaHookMapperExtension.
|
Add an initial "Hook" object and MokshaHookMapperExtension.
These area designed to make it simple to write plugins that
monitor and analyze database activity.
|
Python
|
apache-2.0
|
pombredanne/moksha,ralphbean/moksha,mokshaproject/moksha,pombredanne/moksha,lmacken/moksha,ralphbean/moksha,pombredanne/moksha,ralphbean/moksha,pombredanne/moksha,lmacken/moksha,lmacken/moksha,mokshaproject/moksha,mokshaproject/moksha,mokshaproject/moksha
|
Add an initial "Hook" object and MokshaHookMapperExtension.
These area designed to make it simple to write plugins that
monitor and analyze database activity.
|
# This file is part of Moksha.
#
# Moksha is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Moksha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Moksha. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2008, Red Hat, Inc.
# Authors: Luke Macken <lmacken@redhat.com>
import logging
import sqlalchemy
import pkg_resources
from sqlalchemy.orm.interfaces import MapperExtension
log = logging.getLogger(__name__)
class Hook(object):
""" The parent Hook class """
def after_insert(self, instance):
pass
def after_update(self, instance):
pass
def after_delete(self, instance):
pass
class MokshaHookMapperExtension(MapperExtension):
"""
This is a SQLAlchemy MapperExtension that handles loading up all of the
moksha hooks, and running them when new objects are inserted, updated,
and deleted from the model.
"""
hooks = {'after_insert': [], 'after_update': [], 'after_delete': []}
def __init__(self):
super(MokshaHookMapperExtension, self).__init__()
for hook_entry in pkg_resources.iter_entry_points('moksha.hook'):
log.info('Loading %s hook' % hook_entry.name)
hook_class = hook_entry.load()
self.hooks[hook_entry.name].append(hook_class())
def after_insert(self, mapper, connection, instance):
for hook in self.hooks['after_insert']:
hook.after_insert(instance)
return sqlalchemy.orm.EXT_CONTINUE
def after_update(self, mapper, connection, instance):
for hook in self.hooks['after_update']:
hook.after_update(instance)
return sqlalchemy.orm.EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
for hook in self.hooks['after_delete']:
hook.after_delete(instance)
return sqlalchemy.orm.EXT_CONTINUE
|
<commit_before><commit_msg>Add an initial "Hook" object and MokshaHookMapperExtension.
These area designed to make it simple to write plugins that
monitor and analyze database activity.<commit_after>
|
# This file is part of Moksha.
#
# Moksha is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Moksha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Moksha. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2008, Red Hat, Inc.
# Authors: Luke Macken <lmacken@redhat.com>
import logging
import sqlalchemy
import pkg_resources
from sqlalchemy.orm.interfaces import MapperExtension
log = logging.getLogger(__name__)
class Hook(object):
""" The parent Hook class """
def after_insert(self, instance):
pass
def after_update(self, instance):
pass
def after_delete(self, instance):
pass
class MokshaHookMapperExtension(MapperExtension):
"""
This is a SQLAlchemy MapperExtension that handles loading up all of the
moksha hooks, and running them when new objects are inserted, updated,
and deleted from the model.
"""
hooks = {'after_insert': [], 'after_update': [], 'after_delete': []}
def __init__(self):
super(MokshaHookMapperExtension, self).__init__()
for hook_entry in pkg_resources.iter_entry_points('moksha.hook'):
log.info('Loading %s hook' % hook_entry.name)
hook_class = hook_entry.load()
self.hooks[hook_entry.name].append(hook_class())
def after_insert(self, mapper, connection, instance):
for hook in self.hooks['after_insert']:
hook.after_insert(instance)
return sqlalchemy.orm.EXT_CONTINUE
def after_update(self, mapper, connection, instance):
for hook in self.hooks['after_update']:
hook.after_update(instance)
return sqlalchemy.orm.EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
for hook in self.hooks['after_delete']:
hook.after_delete(instance)
return sqlalchemy.orm.EXT_CONTINUE
|
Add an initial "Hook" object and MokshaHookMapperExtension.
These area designed to make it simple to write plugins that
monitor and analyze database activity.# This file is part of Moksha.
#
# Moksha is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Moksha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Moksha. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2008, Red Hat, Inc.
# Authors: Luke Macken <lmacken@redhat.com>
import logging
import sqlalchemy
import pkg_resources
from sqlalchemy.orm.interfaces import MapperExtension
log = logging.getLogger(__name__)
class Hook(object):
""" The parent Hook class """
def after_insert(self, instance):
pass
def after_update(self, instance):
pass
def after_delete(self, instance):
pass
class MokshaHookMapperExtension(MapperExtension):
"""
This is a SQLAlchemy MapperExtension that handles loading up all of the
moksha hooks, and running them when new objects are inserted, updated,
and deleted from the model.
"""
hooks = {'after_insert': [], 'after_update': [], 'after_delete': []}
def __init__(self):
super(MokshaHookMapperExtension, self).__init__()
for hook_entry in pkg_resources.iter_entry_points('moksha.hook'):
log.info('Loading %s hook' % hook_entry.name)
hook_class = hook_entry.load()
self.hooks[hook_entry.name].append(hook_class())
def after_insert(self, mapper, connection, instance):
for hook in self.hooks['after_insert']:
hook.after_insert(instance)
return sqlalchemy.orm.EXT_CONTINUE
def after_update(self, mapper, connection, instance):
for hook in self.hooks['after_update']:
hook.after_update(instance)
return sqlalchemy.orm.EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
for hook in self.hooks['after_delete']:
hook.after_delete(instance)
return sqlalchemy.orm.EXT_CONTINUE
|
<commit_before><commit_msg>Add an initial "Hook" object and MokshaHookMapperExtension.
These area designed to make it simple to write plugins that
monitor and analyze database activity.<commit_after># This file is part of Moksha.
#
# Moksha is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Moksha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Moksha. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2008, Red Hat, Inc.
# Authors: Luke Macken <lmacken@redhat.com>
import logging
import sqlalchemy
import pkg_resources
from sqlalchemy.orm.interfaces import MapperExtension
log = logging.getLogger(__name__)
class Hook(object):
""" The parent Hook class """
def after_insert(self, instance):
pass
def after_update(self, instance):
pass
def after_delete(self, instance):
pass
class MokshaHookMapperExtension(MapperExtension):
"""
This is a SQLAlchemy MapperExtension that handles loading up all of the
moksha hooks, and running them when new objects are inserted, updated,
and deleted from the model.
"""
hooks = {'after_insert': [], 'after_update': [], 'after_delete': []}
def __init__(self):
super(MokshaHookMapperExtension, self).__init__()
for hook_entry in pkg_resources.iter_entry_points('moksha.hook'):
log.info('Loading %s hook' % hook_entry.name)
hook_class = hook_entry.load()
self.hooks[hook_entry.name].append(hook_class())
def after_insert(self, mapper, connection, instance):
for hook in self.hooks['after_insert']:
hook.after_insert(instance)
return sqlalchemy.orm.EXT_CONTINUE
def after_update(self, mapper, connection, instance):
for hook in self.hooks['after_update']:
hook.after_update(instance)
return sqlalchemy.orm.EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
for hook in self.hooks['after_delete']:
hook.after_delete(instance)
return sqlalchemy.orm.EXT_CONTINUE
|
|
abe8bbd2e602b2f747947a584d752dad5159edfb
|
src/setup.py2app.py
|
src/setup.py2app.py
|
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
import opencmiss.zinc.context
APP = ['opencmiss/neon/neon.py']
DATA_FILES = []
OPTIONS = {'argv_emulation': True,
# 'frameworks': ['/Users/hsor001/work/opencmiss-software/zinc-software/zinc/library-build-make/core/libzinc.r11285M.3.0.1.dylib']
}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
|
Add basic setup.py file to create application bundles on OS X.
|
Add basic setup.py file to create application bundles on OS X.
|
Python
|
apache-2.0
|
alan-wu/neon
|
Add basic setup.py file to create application bundles on OS X.
|
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
import opencmiss.zinc.context
APP = ['opencmiss/neon/neon.py']
DATA_FILES = []
OPTIONS = {'argv_emulation': True,
# 'frameworks': ['/Users/hsor001/work/opencmiss-software/zinc-software/zinc/library-build-make/core/libzinc.r11285M.3.0.1.dylib']
}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
|
<commit_before><commit_msg>Add basic setup.py file to create application bundles on OS X.<commit_after>
|
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
import opencmiss.zinc.context
APP = ['opencmiss/neon/neon.py']
DATA_FILES = []
OPTIONS = {'argv_emulation': True,
# 'frameworks': ['/Users/hsor001/work/opencmiss-software/zinc-software/zinc/library-build-make/core/libzinc.r11285M.3.0.1.dylib']
}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
|
Add basic setup.py file to create application bundles on OS X."""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
import opencmiss.zinc.context
APP = ['opencmiss/neon/neon.py']
DATA_FILES = []
OPTIONS = {'argv_emulation': True,
# 'frameworks': ['/Users/hsor001/work/opencmiss-software/zinc-software/zinc/library-build-make/core/libzinc.r11285M.3.0.1.dylib']
}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
|
<commit_before><commit_msg>Add basic setup.py file to create application bundles on OS X.<commit_after>"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
import opencmiss.zinc.context
APP = ['opencmiss/neon/neon.py']
DATA_FILES = []
OPTIONS = {'argv_emulation': True,
# 'frameworks': ['/Users/hsor001/work/opencmiss-software/zinc-software/zinc/library-build-make/core/libzinc.r11285M.3.0.1.dylib']
}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
|
|
9022e13eb14ce0f031b968d9eb1375ebcae7e770
|
disasm/tests.py
|
disasm/tests.py
|
#!/usr/bin/env python
'''
Check that each file assembles and that the output binary
is identical to the original file.
'''
import filecmp
import os
import subprocess
import shutil
import sys
import tempfile
FILES = {
'prep-corvus-diag.asm': 'prep-corvus-diag.bin',
'prep-hardbox-configure.asm': 'prep-hardbox-configure.bin',
'rom-u62-c7.63.asm': 'rom-u62-c7.63.bin',
}
def main():
here = os.path.abspath(os.path.dirname(__file__))
failures = []
for src in sorted(FILES.keys()):
# find absolute path to original binary, if any
original = FILES[src]
if original is not None:
original = os.path.join(here, FILES[src])
# change to directory of source file
# this is necessary for files that use include directives
src_dirname = os.path.join(here, os.path.dirname(src))
os.chdir(src_dirname)
# filenames for assembly command
tmpdir = tempfile.mkdtemp(prefix='corvus')
srcfile = os.path.join(here, src)
outfile = os.path.join(tmpdir, 'a.bin')
lstfile = os.path.join(tmpdir, 'a.lst')
subs = {'srcfile': srcfile, 'outfile': outfile, 'lstfile': lstfile}
cmd = ("z80asm --list='%(lstfile)s' --output='%(outfile)s' "
"'%(srcfile)s'")
# try to assemble the file
try:
subprocess.check_output(cmd % subs, shell=True)
assembled = True
except subprocess.CalledProcessError as exc:
sys.stdout.write(exc.output)
assembled = False
# check assembled output is identical to original binary
if not assembled:
sys.stderr.write("%s: assembly failed\n" % src)
failures.append(src)
elif original is None:
sys.stdout.write("%s: ok\n" % src)
elif filecmp.cmp(original, outfile):
sys.stdout.write("%s: ok\n" % src)
else:
sys.stderr.write("%s: not ok\n" % src)
failures.append(src)
shutil.rmtree(tmpdir)
return len(failures)
if __name__ == '__main__':
if sys.version_info[:2] < (2, 7):
sys.stderr.write("Python 2.7 or later required\n")
sys.exit(1)
status = main()
sys.exit(status)
|
Add script to check assembly of each file
|
Add script to check assembly of each file
|
Python
|
bsd-3-clause
|
mnaberez/corvus
|
Add script to check assembly of each file
|
#!/usr/bin/env python
'''
Check that each file assembles and that the output binary
is identical to the original file.
'''
import filecmp
import os
import subprocess
import shutil
import sys
import tempfile
FILES = {
'prep-corvus-diag.asm': 'prep-corvus-diag.bin',
'prep-hardbox-configure.asm': 'prep-hardbox-configure.bin',
'rom-u62-c7.63.asm': 'rom-u62-c7.63.bin',
}
def main():
here = os.path.abspath(os.path.dirname(__file__))
failures = []
for src in sorted(FILES.keys()):
# find absolute path to original binary, if any
original = FILES[src]
if original is not None:
original = os.path.join(here, FILES[src])
# change to directory of source file
# this is necessary for files that use include directives
src_dirname = os.path.join(here, os.path.dirname(src))
os.chdir(src_dirname)
# filenames for assembly command
tmpdir = tempfile.mkdtemp(prefix='corvus')
srcfile = os.path.join(here, src)
outfile = os.path.join(tmpdir, 'a.bin')
lstfile = os.path.join(tmpdir, 'a.lst')
subs = {'srcfile': srcfile, 'outfile': outfile, 'lstfile': lstfile}
cmd = ("z80asm --list='%(lstfile)s' --output='%(outfile)s' "
"'%(srcfile)s'")
# try to assemble the file
try:
subprocess.check_output(cmd % subs, shell=True)
assembled = True
except subprocess.CalledProcessError as exc:
sys.stdout.write(exc.output)
assembled = False
# check assembled output is identical to original binary
if not assembled:
sys.stderr.write("%s: assembly failed\n" % src)
failures.append(src)
elif original is None:
sys.stdout.write("%s: ok\n" % src)
elif filecmp.cmp(original, outfile):
sys.stdout.write("%s: ok\n" % src)
else:
sys.stderr.write("%s: not ok\n" % src)
failures.append(src)
shutil.rmtree(tmpdir)
return len(failures)
if __name__ == '__main__':
if sys.version_info[:2] < (2, 7):
sys.stderr.write("Python 2.7 or later required\n")
sys.exit(1)
status = main()
sys.exit(status)
|
<commit_before><commit_msg>Add script to check assembly of each file<commit_after>
|
#!/usr/bin/env python
'''
Check that each file assembles and that the output binary
is identical to the original file.
'''
import filecmp
import os
import subprocess
import shutil
import sys
import tempfile
FILES = {
'prep-corvus-diag.asm': 'prep-corvus-diag.bin',
'prep-hardbox-configure.asm': 'prep-hardbox-configure.bin',
'rom-u62-c7.63.asm': 'rom-u62-c7.63.bin',
}
def main():
here = os.path.abspath(os.path.dirname(__file__))
failures = []
for src in sorted(FILES.keys()):
# find absolute path to original binary, if any
original = FILES[src]
if original is not None:
original = os.path.join(here, FILES[src])
# change to directory of source file
# this is necessary for files that use include directives
src_dirname = os.path.join(here, os.path.dirname(src))
os.chdir(src_dirname)
# filenames for assembly command
tmpdir = tempfile.mkdtemp(prefix='corvus')
srcfile = os.path.join(here, src)
outfile = os.path.join(tmpdir, 'a.bin')
lstfile = os.path.join(tmpdir, 'a.lst')
subs = {'srcfile': srcfile, 'outfile': outfile, 'lstfile': lstfile}
cmd = ("z80asm --list='%(lstfile)s' --output='%(outfile)s' "
"'%(srcfile)s'")
# try to assemble the file
try:
subprocess.check_output(cmd % subs, shell=True)
assembled = True
except subprocess.CalledProcessError as exc:
sys.stdout.write(exc.output)
assembled = False
# check assembled output is identical to original binary
if not assembled:
sys.stderr.write("%s: assembly failed\n" % src)
failures.append(src)
elif original is None:
sys.stdout.write("%s: ok\n" % src)
elif filecmp.cmp(original, outfile):
sys.stdout.write("%s: ok\n" % src)
else:
sys.stderr.write("%s: not ok\n" % src)
failures.append(src)
shutil.rmtree(tmpdir)
return len(failures)
if __name__ == '__main__':
if sys.version_info[:2] < (2, 7):
sys.stderr.write("Python 2.7 or later required\n")
sys.exit(1)
status = main()
sys.exit(status)
|
Add script to check assembly of each file#!/usr/bin/env python
'''
Check that each file assembles and that the output binary
is identical to the original file.
'''
import filecmp
import os
import subprocess
import shutil
import sys
import tempfile
FILES = {
'prep-corvus-diag.asm': 'prep-corvus-diag.bin',
'prep-hardbox-configure.asm': 'prep-hardbox-configure.bin',
'rom-u62-c7.63.asm': 'rom-u62-c7.63.bin',
}
def main():
here = os.path.abspath(os.path.dirname(__file__))
failures = []
for src in sorted(FILES.keys()):
# find absolute path to original binary, if any
original = FILES[src]
if original is not None:
original = os.path.join(here, FILES[src])
# change to directory of source file
# this is necessary for files that use include directives
src_dirname = os.path.join(here, os.path.dirname(src))
os.chdir(src_dirname)
# filenames for assembly command
tmpdir = tempfile.mkdtemp(prefix='corvus')
srcfile = os.path.join(here, src)
outfile = os.path.join(tmpdir, 'a.bin')
lstfile = os.path.join(tmpdir, 'a.lst')
subs = {'srcfile': srcfile, 'outfile': outfile, 'lstfile': lstfile}
cmd = ("z80asm --list='%(lstfile)s' --output='%(outfile)s' "
"'%(srcfile)s'")
# try to assemble the file
try:
subprocess.check_output(cmd % subs, shell=True)
assembled = True
except subprocess.CalledProcessError as exc:
sys.stdout.write(exc.output)
assembled = False
# check assembled output is identical to original binary
if not assembled:
sys.stderr.write("%s: assembly failed\n" % src)
failures.append(src)
elif original is None:
sys.stdout.write("%s: ok\n" % src)
elif filecmp.cmp(original, outfile):
sys.stdout.write("%s: ok\n" % src)
else:
sys.stderr.write("%s: not ok\n" % src)
failures.append(src)
shutil.rmtree(tmpdir)
return len(failures)
if __name__ == '__main__':
if sys.version_info[:2] < (2, 7):
sys.stderr.write("Python 2.7 or later required\n")
sys.exit(1)
status = main()
sys.exit(status)
|
<commit_before><commit_msg>Add script to check assembly of each file<commit_after>#!/usr/bin/env python
'''
Check that each file assembles and that the output binary
is identical to the original file.
'''
import filecmp
import os
import subprocess
import shutil
import sys
import tempfile
FILES = {
'prep-corvus-diag.asm': 'prep-corvus-diag.bin',
'prep-hardbox-configure.asm': 'prep-hardbox-configure.bin',
'rom-u62-c7.63.asm': 'rom-u62-c7.63.bin',
}
def main():
here = os.path.abspath(os.path.dirname(__file__))
failures = []
for src in sorted(FILES.keys()):
# find absolute path to original binary, if any
original = FILES[src]
if original is not None:
original = os.path.join(here, FILES[src])
# change to directory of source file
# this is necessary for files that use include directives
src_dirname = os.path.join(here, os.path.dirname(src))
os.chdir(src_dirname)
# filenames for assembly command
tmpdir = tempfile.mkdtemp(prefix='corvus')
srcfile = os.path.join(here, src)
outfile = os.path.join(tmpdir, 'a.bin')
lstfile = os.path.join(tmpdir, 'a.lst')
subs = {'srcfile': srcfile, 'outfile': outfile, 'lstfile': lstfile}
cmd = ("z80asm --list='%(lstfile)s' --output='%(outfile)s' "
"'%(srcfile)s'")
# try to assemble the file
try:
subprocess.check_output(cmd % subs, shell=True)
assembled = True
except subprocess.CalledProcessError as exc:
sys.stdout.write(exc.output)
assembled = False
# check assembled output is identical to original binary
if not assembled:
sys.stderr.write("%s: assembly failed\n" % src)
failures.append(src)
elif original is None:
sys.stdout.write("%s: ok\n" % src)
elif filecmp.cmp(original, outfile):
sys.stdout.write("%s: ok\n" % src)
else:
sys.stderr.write("%s: not ok\n" % src)
failures.append(src)
shutil.rmtree(tmpdir)
return len(failures)
if __name__ == '__main__':
if sys.version_info[:2] < (2, 7):
sys.stderr.write("Python 2.7 or later required\n")
sys.exit(1)
status = main()
sys.exit(status)
|
|
a024a9a23cccbb8d72098dc1831770ece8b1aec8
|
ceph_deploy/tests/parser/test_uninstall.py
|
ceph_deploy/tests/parser/test_uninstall.py
|
import pytest
from ceph_deploy.cli import get_parser
class TestParserUninstall(object):
def setup(self):
self.parser = get_parser()
def test_uninstall_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('uninstall --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy uninstall' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_uninstall_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('uninstall'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_uninstall_one_host(self):
args = self.parser.parse_args('uninstall host1'.split())
assert args.host == ['host1']
def test_uninstall_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['uninstall'] + hostnames)
assert frozenset(args.host) == frozenset(hostnames)
|
Add tests for argparse uninstall
|
[RM-11742] Add tests for argparse uninstall
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
Python
|
mit
|
isyippee/ceph-deploy,ghxandsky/ceph-deploy,trhoden/ceph-deploy,imzhulei/ceph-deploy,shenhequnying/ceph-deploy,codenrhoden/ceph-deploy,osynge/ceph-deploy,Vicente-Cheng/ceph-deploy,zhouyuan/ceph-deploy,isyippee/ceph-deploy,ghxandsky/ceph-deploy,zhouyuan/ceph-deploy,trhoden/ceph-deploy,codenrhoden/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,SUSE/ceph-deploy-to-be-deleted,branto1/ceph-deploy,SUSE/ceph-deploy,branto1/ceph-deploy,Vicente-Cheng/ceph-deploy,ceph/ceph-deploy,osynge/ceph-deploy,shenhequnying/ceph-deploy,SUSE/ceph-deploy,imzhulei/ceph-deploy,ceph/ceph-deploy
|
[RM-11742] Add tests for argparse uninstall
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
import pytest
from ceph_deploy.cli import get_parser
class TestParserUninstall(object):
def setup(self):
self.parser = get_parser()
def test_uninstall_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('uninstall --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy uninstall' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_uninstall_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('uninstall'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_uninstall_one_host(self):
args = self.parser.parse_args('uninstall host1'.split())
assert args.host == ['host1']
def test_uninstall_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['uninstall'] + hostnames)
assert frozenset(args.host) == frozenset(hostnames)
|
<commit_before><commit_msg>[RM-11742] Add tests for argparse uninstall
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>
|
import pytest
from ceph_deploy.cli import get_parser
class TestParserUninstall(object):
def setup(self):
self.parser = get_parser()
def test_uninstall_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('uninstall --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy uninstall' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_uninstall_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('uninstall'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_uninstall_one_host(self):
args = self.parser.parse_args('uninstall host1'.split())
assert args.host == ['host1']
def test_uninstall_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['uninstall'] + hostnames)
assert frozenset(args.host) == frozenset(hostnames)
|
[RM-11742] Add tests for argparse uninstall
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>import pytest
from ceph_deploy.cli import get_parser
class TestParserUninstall(object):
def setup(self):
self.parser = get_parser()
def test_uninstall_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('uninstall --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy uninstall' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_uninstall_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('uninstall'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_uninstall_one_host(self):
args = self.parser.parse_args('uninstall host1'.split())
assert args.host == ['host1']
def test_uninstall_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['uninstall'] + hostnames)
assert frozenset(args.host) == frozenset(hostnames)
|
<commit_before><commit_msg>[RM-11742] Add tests for argparse uninstall
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>import pytest
from ceph_deploy.cli import get_parser
class TestParserUninstall(object):
def setup(self):
self.parser = get_parser()
def test_uninstall_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('uninstall --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy uninstall' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_uninstall_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('uninstall'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_uninstall_one_host(self):
args = self.parser.parse_args('uninstall host1'.split())
assert args.host == ['host1']
def test_uninstall_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['uninstall'] + hostnames)
assert frozenset(args.host) == frozenset(hostnames)
|
|
dff6d985b5b7152a3e5d378aa4919664cb2e1b51
|
examples/translations/french_test_1.py
|
examples/translations/french_test_1.py
|
# French Language Test - Python 3 Only!
from seleniumbase.translate.french import CasDeBase
class ClasseDeTest(CasDeBase):
def test_exemple_1(self):
self.ouvrir_url("https://fr.wikipedia.org/wiki/")
self.vérifier_le_texte("Wikipédia") # noqa
self.vérifier_un_élément('[title="Visiter la page d’accueil"]')
self.modifier_le_texte("#searchInput", "Crème brûlée")
self.cliquez_sur("#searchButton")
self.vérifier_le_texte("Crème brûlée", "#firstHeading")
self.vérifier_un_élément('img[alt*="Crème brûlée"]')
self.modifier_le_texte("#searchInput", "Jardin des Tuileries")
self.cliquez_sur("#searchButton")
self.vérifier_le_texte("Jardin des Tuileries", "#firstHeading")
self.vérifier_un_élément('img[alt*="Jardin des Tuileries"]')
self.retour()
self.vérifier_la_vérité("brûlée" in self.obtenir_url_actuelle())
self.en_avant()
self.vérifier_la_vérité("Jardin" in self.obtenir_url_actuelle())
|
Add the example test of SeleniumBase in French
|
Add the example test of SeleniumBase in French
|
Python
|
mit
|
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase
|
Add the example test of SeleniumBase in French
|
# French Language Test - Python 3 Only!
from seleniumbase.translate.french import CasDeBase
class ClasseDeTest(CasDeBase):
def test_exemple_1(self):
self.ouvrir_url("https://fr.wikipedia.org/wiki/")
self.vérifier_le_texte("Wikipédia") # noqa
self.vérifier_un_élément('[title="Visiter la page d’accueil"]')
self.modifier_le_texte("#searchInput", "Crème brûlée")
self.cliquez_sur("#searchButton")
self.vérifier_le_texte("Crème brûlée", "#firstHeading")
self.vérifier_un_élément('img[alt*="Crème brûlée"]')
self.modifier_le_texte("#searchInput", "Jardin des Tuileries")
self.cliquez_sur("#searchButton")
self.vérifier_le_texte("Jardin des Tuileries", "#firstHeading")
self.vérifier_un_élément('img[alt*="Jardin des Tuileries"]')
self.retour()
self.vérifier_la_vérité("brûlée" in self.obtenir_url_actuelle())
self.en_avant()
self.vérifier_la_vérité("Jardin" in self.obtenir_url_actuelle())
|
<commit_before><commit_msg>Add the example test of SeleniumBase in French<commit_after>
|
# French Language Test - Python 3 Only!
from seleniumbase.translate.french import CasDeBase
class ClasseDeTest(CasDeBase):
def test_exemple_1(self):
self.ouvrir_url("https://fr.wikipedia.org/wiki/")
self.vérifier_le_texte("Wikipédia") # noqa
self.vérifier_un_élément('[title="Visiter la page d’accueil"]')
self.modifier_le_texte("#searchInput", "Crème brûlée")
self.cliquez_sur("#searchButton")
self.vérifier_le_texte("Crème brûlée", "#firstHeading")
self.vérifier_un_élément('img[alt*="Crème brûlée"]')
self.modifier_le_texte("#searchInput", "Jardin des Tuileries")
self.cliquez_sur("#searchButton")
self.vérifier_le_texte("Jardin des Tuileries", "#firstHeading")
self.vérifier_un_élément('img[alt*="Jardin des Tuileries"]')
self.retour()
self.vérifier_la_vérité("brûlée" in self.obtenir_url_actuelle())
self.en_avant()
self.vérifier_la_vérité("Jardin" in self.obtenir_url_actuelle())
|
Add the example test of SeleniumBase in French# French Language Test - Python 3 Only!
from seleniumbase.translate.french import CasDeBase
class ClasseDeTest(CasDeBase):
def test_exemple_1(self):
self.ouvrir_url("https://fr.wikipedia.org/wiki/")
self.vérifier_le_texte("Wikipédia") # noqa
self.vérifier_un_élément('[title="Visiter la page d’accueil"]')
self.modifier_le_texte("#searchInput", "Crème brûlée")
self.cliquez_sur("#searchButton")
self.vérifier_le_texte("Crème brûlée", "#firstHeading")
self.vérifier_un_élément('img[alt*="Crème brûlée"]')
self.modifier_le_texte("#searchInput", "Jardin des Tuileries")
self.cliquez_sur("#searchButton")
self.vérifier_le_texte("Jardin des Tuileries", "#firstHeading")
self.vérifier_un_élément('img[alt*="Jardin des Tuileries"]')
self.retour()
self.vérifier_la_vérité("brûlée" in self.obtenir_url_actuelle())
self.en_avant()
self.vérifier_la_vérité("Jardin" in self.obtenir_url_actuelle())
|
<commit_before><commit_msg>Add the example test of SeleniumBase in French<commit_after># French Language Test - Python 3 Only!
from seleniumbase.translate.french import CasDeBase
class ClasseDeTest(CasDeBase):
def test_exemple_1(self):
self.ouvrir_url("https://fr.wikipedia.org/wiki/")
self.vérifier_le_texte("Wikipédia") # noqa
self.vérifier_un_élément('[title="Visiter la page d’accueil"]')
self.modifier_le_texte("#searchInput", "Crème brûlée")
self.cliquez_sur("#searchButton")
self.vérifier_le_texte("Crème brûlée", "#firstHeading")
self.vérifier_un_élément('img[alt*="Crème brûlée"]')
self.modifier_le_texte("#searchInput", "Jardin des Tuileries")
self.cliquez_sur("#searchButton")
self.vérifier_le_texte("Jardin des Tuileries", "#firstHeading")
self.vérifier_un_élément('img[alt*="Jardin des Tuileries"]')
self.retour()
self.vérifier_la_vérité("brûlée" in self.obtenir_url_actuelle())
self.en_avant()
self.vérifier_la_vérité("Jardin" in self.obtenir_url_actuelle())
|
|
ca92d32b73bc6e62fb842f2e7382fbcb076973e1
|
indra/tests/test_deft_tools.py
|
indra/tests/test_deft_tools.py
|
from nose.plugins.attrib import attr
from indra.literature.deft_tools import universal_extract_text
from indra.literature import pmc_client, elsevier_client, pubmed_client
@attr('nonpublic', 'webservice')
def test_universal_extract_text_elsevier():
doi = '10.1016/B978-0-12-416673-8.00004-6'
xml_str = elsevier_client.download_article(doi)
text = universal_extract_text(xml_str)
assert text is not None
assert ' ER ' in text
@attr('webservice')
def test_universal_extract_text_pmc():
pmc_id = 'PMC3262597'
xml_str = pmc_client.get_xml(pmc_id)
text = universal_extract_text(xml_str)
assert text is not None
assert ' ER ' in text
@attr('webservice')
def test_universal_extract_text_abstract():
pmid = '16511588'
abstract = pubmed_client.get_abstract(pmid)
result = universal_extract_text(abstract)
assert result == abstract + '\n'
@attr('webservice')
def test_universal_extract_text_contains():
pmc_id = 'PMC3262597'
xml_str = pmc_client.get_xml(pmc_id)
text1 = universal_extract_text(xml_str)
text2 = universal_extract_text(xml_str, contains='ER')
assert text1 is not None
assert text2 is not None
assert ' ER ' in text1 and ' ER ' in text2
assert len(text2) < len(text1)
@attr('webservice')
def test_universal_extract_text_contains_union():
pmc_id = 'PMC4954987'
xml_str = pmc_client.get_xml(pmc_id)
text1 = universal_extract_text(xml_str)
text2 = universal_extract_text(xml_str, contains='NP')
text3 = universal_extract_text(xml_str, contains='NPs')
text4 = universal_extract_text(xml_str, contains=['NP', 'NPs'])
assert text1 is not None
assert text2 is not None
assert text3 is not None
assert text4 is not None
assert len(text2) < len(text3) < len(text4) < len(text1)
|
Write tests for text extraction for deft
|
Write tests for text extraction for deft
|
Python
|
bsd-2-clause
|
pvtodorov/indra,johnbachman/indra,sorgerlab/belpy,sorgerlab/belpy,sorgerlab/indra,pvtodorov/indra,johnbachman/belpy,johnbachman/indra,johnbachman/belpy,johnbachman/belpy,sorgerlab/indra,johnbachman/indra,bgyori/indra,sorgerlab/belpy,bgyori/indra,sorgerlab/indra,pvtodorov/indra,pvtodorov/indra,bgyori/indra
|
Write tests for text extraction for deft
|
from nose.plugins.attrib import attr
from indra.literature.deft_tools import universal_extract_text
from indra.literature import pmc_client, elsevier_client, pubmed_client
@attr('nonpublic', 'webservice')
def test_universal_extract_text_elsevier():
doi = '10.1016/B978-0-12-416673-8.00004-6'
xml_str = elsevier_client.download_article(doi)
text = universal_extract_text(xml_str)
assert text is not None
assert ' ER ' in text
@attr('webservice')
def test_universal_extract_text_pmc():
pmc_id = 'PMC3262597'
xml_str = pmc_client.get_xml(pmc_id)
text = universal_extract_text(xml_str)
assert text is not None
assert ' ER ' in text
@attr('webservice')
def test_universal_extract_text_abstract():
pmid = '16511588'
abstract = pubmed_client.get_abstract(pmid)
result = universal_extract_text(abstract)
assert result == abstract + '\n'
@attr('webservice')
def test_universal_extract_text_contains():
pmc_id = 'PMC3262597'
xml_str = pmc_client.get_xml(pmc_id)
text1 = universal_extract_text(xml_str)
text2 = universal_extract_text(xml_str, contains='ER')
assert text1 is not None
assert text2 is not None
assert ' ER ' in text1 and ' ER ' in text2
assert len(text2) < len(text1)
@attr('webservice')
def test_universal_extract_text_contains_union():
pmc_id = 'PMC4954987'
xml_str = pmc_client.get_xml(pmc_id)
text1 = universal_extract_text(xml_str)
text2 = universal_extract_text(xml_str, contains='NP')
text3 = universal_extract_text(xml_str, contains='NPs')
text4 = universal_extract_text(xml_str, contains=['NP', 'NPs'])
assert text1 is not None
assert text2 is not None
assert text3 is not None
assert text4 is not None
assert len(text2) < len(text3) < len(text4) < len(text1)
|
<commit_before><commit_msg>Write tests for text extraction for deft<commit_after>
|
from nose.plugins.attrib import attr
from indra.literature.deft_tools import universal_extract_text
from indra.literature import pmc_client, elsevier_client, pubmed_client
@attr('nonpublic', 'webservice')
def test_universal_extract_text_elsevier():
doi = '10.1016/B978-0-12-416673-8.00004-6'
xml_str = elsevier_client.download_article(doi)
text = universal_extract_text(xml_str)
assert text is not None
assert ' ER ' in text
@attr('webservice')
def test_universal_extract_text_pmc():
pmc_id = 'PMC3262597'
xml_str = pmc_client.get_xml(pmc_id)
text = universal_extract_text(xml_str)
assert text is not None
assert ' ER ' in text
@attr('webservice')
def test_universal_extract_text_abstract():
pmid = '16511588'
abstract = pubmed_client.get_abstract(pmid)
result = universal_extract_text(abstract)
assert result == abstract + '\n'
@attr('webservice')
def test_universal_extract_text_contains():
pmc_id = 'PMC3262597'
xml_str = pmc_client.get_xml(pmc_id)
text1 = universal_extract_text(xml_str)
text2 = universal_extract_text(xml_str, contains='ER')
assert text1 is not None
assert text2 is not None
assert ' ER ' in text1 and ' ER ' in text2
assert len(text2) < len(text1)
@attr('webservice')
def test_universal_extract_text_contains_union():
pmc_id = 'PMC4954987'
xml_str = pmc_client.get_xml(pmc_id)
text1 = universal_extract_text(xml_str)
text2 = universal_extract_text(xml_str, contains='NP')
text3 = universal_extract_text(xml_str, contains='NPs')
text4 = universal_extract_text(xml_str, contains=['NP', 'NPs'])
assert text1 is not None
assert text2 is not None
assert text3 is not None
assert text4 is not None
assert len(text2) < len(text3) < len(text4) < len(text1)
|
Write tests for text extraction for deftfrom nose.plugins.attrib import attr
from indra.literature.deft_tools import universal_extract_text
from indra.literature import pmc_client, elsevier_client, pubmed_client
@attr('nonpublic', 'webservice')
def test_universal_extract_text_elsevier():
doi = '10.1016/B978-0-12-416673-8.00004-6'
xml_str = elsevier_client.download_article(doi)
text = universal_extract_text(xml_str)
assert text is not None
assert ' ER ' in text
@attr('webservice')
def test_universal_extract_text_pmc():
pmc_id = 'PMC3262597'
xml_str = pmc_client.get_xml(pmc_id)
text = universal_extract_text(xml_str)
assert text is not None
assert ' ER ' in text
@attr('webservice')
def test_universal_extract_text_abstract():
pmid = '16511588'
abstract = pubmed_client.get_abstract(pmid)
result = universal_extract_text(abstract)
assert result == abstract + '\n'
@attr('webservice')
def test_universal_extract_text_contains():
pmc_id = 'PMC3262597'
xml_str = pmc_client.get_xml(pmc_id)
text1 = universal_extract_text(xml_str)
text2 = universal_extract_text(xml_str, contains='ER')
assert text1 is not None
assert text2 is not None
assert ' ER ' in text1 and ' ER ' in text2
assert len(text2) < len(text1)
@attr('webservice')
def test_universal_extract_text_contains_union():
pmc_id = 'PMC4954987'
xml_str = pmc_client.get_xml(pmc_id)
text1 = universal_extract_text(xml_str)
text2 = universal_extract_text(xml_str, contains='NP')
text3 = universal_extract_text(xml_str, contains='NPs')
text4 = universal_extract_text(xml_str, contains=['NP', 'NPs'])
assert text1 is not None
assert text2 is not None
assert text3 is not None
assert text4 is not None
assert len(text2) < len(text3) < len(text4) < len(text1)
|
<commit_before><commit_msg>Write tests for text extraction for deft<commit_after>from nose.plugins.attrib import attr
from indra.literature.deft_tools import universal_extract_text
from indra.literature import pmc_client, elsevier_client, pubmed_client
@attr('nonpublic', 'webservice')
def test_universal_extract_text_elsevier():
doi = '10.1016/B978-0-12-416673-8.00004-6'
xml_str = elsevier_client.download_article(doi)
text = universal_extract_text(xml_str)
assert text is not None
assert ' ER ' in text
@attr('webservice')
def test_universal_extract_text_pmc():
pmc_id = 'PMC3262597'
xml_str = pmc_client.get_xml(pmc_id)
text = universal_extract_text(xml_str)
assert text is not None
assert ' ER ' in text
@attr('webservice')
def test_universal_extract_text_abstract():
pmid = '16511588'
abstract = pubmed_client.get_abstract(pmid)
result = universal_extract_text(abstract)
assert result == abstract + '\n'
@attr('webservice')
def test_universal_extract_text_contains():
pmc_id = 'PMC3262597'
xml_str = pmc_client.get_xml(pmc_id)
text1 = universal_extract_text(xml_str)
text2 = universal_extract_text(xml_str, contains='ER')
assert text1 is not None
assert text2 is not None
assert ' ER ' in text1 and ' ER ' in text2
assert len(text2) < len(text1)
@attr('webservice')
def test_universal_extract_text_contains_union():
pmc_id = 'PMC4954987'
xml_str = pmc_client.get_xml(pmc_id)
text1 = universal_extract_text(xml_str)
text2 = universal_extract_text(xml_str, contains='NP')
text3 = universal_extract_text(xml_str, contains='NPs')
text4 = universal_extract_text(xml_str, contains=['NP', 'NPs'])
assert text1 is not None
assert text2 is not None
assert text3 is not None
assert text4 is not None
assert len(text2) < len(text3) < len(text4) < len(text1)
|
|
8a6121cbd594fcd5402d0170614f4e1340282145
|
eccodes/__main__.py
|
eccodes/__main__.py
|
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import os.path
import click
# NOTE: imports are executed inside functions so missing dependencies don't break all commands
@click.group()
def cfgrib_cli():
pass
@cfgrib_cli.command('selfcheck')
def selfcheck():
from . import bindings
print("Found: ecCodes v%s." % bindings.codes_get_api_version())
print("Your system is ready.")
@cfgrib_cli.command('to_netcdf')
@click.argument('inpaths', nargs=-1)
@click.option('--outpath', '-o', default=None)
@click.option('--cdm', '-c', default=None)
@click.option('--engine', '-e', default='cfgrib')
def to_netcdf(inpaths, outpath, cdm, engine):
import cf2cdm
import xarray as xr
# NOTE: noop if no input argument
if len(inpaths) == 0:
return
if not outpath:
outpath = os.path.splitext(inpaths[0])[0] + '.nc'
ds = xr.open_mfdataset(inpaths, engine=engine)
if cdm:
coord_model = getattr(cf2cdm, cdm)
ds = cf2cdm.translate_coords(ds, coord_model=coord_model)
ds.to_netcdf(outpath)
if __name__ == '__main__': # pragma: no cover
cfgrib_cli()
|
Enable python -m eccodes selfcheck.
|
Enable python -m eccodes selfcheck.
|
Python
|
apache-2.0
|
ecmwf/eccodes-python,ecmwf/eccodes-python
|
Enable python -m eccodes selfcheck.
|
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import os.path
import click
# NOTE: imports are executed inside functions so missing dependencies don't break all commands
@click.group()
def cfgrib_cli():
pass
@cfgrib_cli.command('selfcheck')
def selfcheck():
from . import bindings
print("Found: ecCodes v%s." % bindings.codes_get_api_version())
print("Your system is ready.")
@cfgrib_cli.command('to_netcdf')
@click.argument('inpaths', nargs=-1)
@click.option('--outpath', '-o', default=None)
@click.option('--cdm', '-c', default=None)
@click.option('--engine', '-e', default='cfgrib')
def to_netcdf(inpaths, outpath, cdm, engine):
import cf2cdm
import xarray as xr
# NOTE: noop if no input argument
if len(inpaths) == 0:
return
if not outpath:
outpath = os.path.splitext(inpaths[0])[0] + '.nc'
ds = xr.open_mfdataset(inpaths, engine=engine)
if cdm:
coord_model = getattr(cf2cdm, cdm)
ds = cf2cdm.translate_coords(ds, coord_model=coord_model)
ds.to_netcdf(outpath)
if __name__ == '__main__': # pragma: no cover
cfgrib_cli()
|
<commit_before><commit_msg>Enable python -m eccodes selfcheck.<commit_after>
|
#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import os.path
import click
# NOTE: imports are executed inside functions so missing dependencies don't break all commands
@click.group()
def cfgrib_cli():
pass
@cfgrib_cli.command('selfcheck')
def selfcheck():
from . import bindings
print("Found: ecCodes v%s." % bindings.codes_get_api_version())
print("Your system is ready.")
@cfgrib_cli.command('to_netcdf')
@click.argument('inpaths', nargs=-1)
@click.option('--outpath', '-o', default=None)
@click.option('--cdm', '-c', default=None)
@click.option('--engine', '-e', default='cfgrib')
def to_netcdf(inpaths, outpath, cdm, engine):
import cf2cdm
import xarray as xr
# NOTE: noop if no input argument
if len(inpaths) == 0:
return
if not outpath:
outpath = os.path.splitext(inpaths[0])[0] + '.nc'
ds = xr.open_mfdataset(inpaths, engine=engine)
if cdm:
coord_model = getattr(cf2cdm, cdm)
ds = cf2cdm.translate_coords(ds, coord_model=coord_model)
ds.to_netcdf(outpath)
if __name__ == '__main__': # pragma: no cover
cfgrib_cli()
|
Enable python -m eccodes selfcheck.#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import os.path
import click
# NOTE: imports are executed inside functions so missing dependencies don't break all commands
@click.group()
def cfgrib_cli():
pass
@cfgrib_cli.command('selfcheck')
def selfcheck():
from . import bindings
print("Found: ecCodes v%s." % bindings.codes_get_api_version())
print("Your system is ready.")
@cfgrib_cli.command('to_netcdf')
@click.argument('inpaths', nargs=-1)
@click.option('--outpath', '-o', default=None)
@click.option('--cdm', '-c', default=None)
@click.option('--engine', '-e', default='cfgrib')
def to_netcdf(inpaths, outpath, cdm, engine):
import cf2cdm
import xarray as xr
# NOTE: noop if no input argument
if len(inpaths) == 0:
return
if not outpath:
outpath = os.path.splitext(inpaths[0])[0] + '.nc'
ds = xr.open_mfdataset(inpaths, engine=engine)
if cdm:
coord_model = getattr(cf2cdm, cdm)
ds = cf2cdm.translate_coords(ds, coord_model=coord_model)
ds.to_netcdf(outpath)
if __name__ == '__main__': # pragma: no cover
cfgrib_cli()
|
<commit_before><commit_msg>Enable python -m eccodes selfcheck.<commit_after>#
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
import os.path
import click
# NOTE: imports are executed inside functions so missing dependencies don't break all commands
@click.group()
def cfgrib_cli():
pass
@cfgrib_cli.command('selfcheck')
def selfcheck():
from . import bindings
print("Found: ecCodes v%s." % bindings.codes_get_api_version())
print("Your system is ready.")
@cfgrib_cli.command('to_netcdf')
@click.argument('inpaths', nargs=-1)
@click.option('--outpath', '-o', default=None)
@click.option('--cdm', '-c', default=None)
@click.option('--engine', '-e', default='cfgrib')
def to_netcdf(inpaths, outpath, cdm, engine):
import cf2cdm
import xarray as xr
# NOTE: noop if no input argument
if len(inpaths) == 0:
return
if not outpath:
outpath = os.path.splitext(inpaths[0])[0] + '.nc'
ds = xr.open_mfdataset(inpaths, engine=engine)
if cdm:
coord_model = getattr(cf2cdm, cdm)
ds = cf2cdm.translate_coords(ds, coord_model=coord_model)
ds.to_netcdf(outpath)
if __name__ == '__main__': # pragma: no cover
cfgrib_cli()
|
|
bd7e802e81d313805c47b3822b70e9180bf9af98
|
examples/widgets/compound_selection.py
|
examples/widgets/compound_selection.py
|
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.behaviors import CompoundSelectionBehavior
from kivy.app import runTouchApp
from kivy.core.window import Window
class SelectableGrid(CompoundSelectionBehavior, GridLayout):
def __init__(self, **kwargs):
super(SelectableGrid, self).__init__(**kwargs)
keyboard = Window.request_keyboard(None, self)
keyboard.bind(on_key_down=self.select_with_key_down,
on_key_up=self.select_with_key_up)
def print_selection(*l):
print [x.text for x in self.selected_nodes]
self.bind(selected_nodes=print_selection)
def goto_node(self, key, last_node, last_node_idx):
''' This function is used to go to the node by typing the number
of the text of the button.
'''
node, idx = super(SelectableGrid, self).goto_node(key, last_node,
last_node_idx)
if node == last_node:
children = self.children
for i in range(len(children)):
if children[i].text == key:
return children[i], i
return node, idx
def select_node(self, node):
node.background_color = (1, 0, 0, 1)
return super(SelectableGrid, self).select_node(node)
def deselect_node(self, node):
node.background_color = (1, 1, 1, 1)
super(SelectableGrid, self).deselect_node(node)
def do_touch(self, instance, touch):
if ('button' in touch.profile and touch.button in
('scrollup', 'scrolldown', 'scrollleft', 'scrollright')) or\
instance.collide_point(*touch.pos):
self.select_with_touch(instance, touch)
else:
return False
return True
root = SelectableGrid(cols=5, up_count=5, multiselect=True, scroll_count=1)
for i in range(40):
c = Button(text=str(i))
c.bind(on_touch_down=root.do_touch)
root.add_widget(c)
runTouchApp(root)
|
Add compound selection usage example to examples.
|
Add compound selection usage example to examples.
|
Python
|
mit
|
arcticshores/kivy,mSenyor/kivy,Cheaterman/kivy,inclement/kivy,xpndlabs/kivy,habibmasuro/kivy,KeyWeeUsr/kivy,bob-the-hamster/kivy,xpndlabs/kivy,angryrancor/kivy,Cheaterman/kivy,Farkal/kivy,viralpandey/kivy,rafalo1333/kivy,adamkh/kivy,VinGarcia/kivy,bliz937/kivy,Shyam10/kivy,yoelk/kivy,bionoid/kivy,Shyam10/kivy,iamutkarshtiwari/kivy,rnixx/kivy,el-ethan/kivy,janssen/kivy,mSenyor/kivy,adamkh/kivy,Farkal/kivy,kivy/kivy,jkankiewicz/kivy,vipulroxx/kivy,autosportlabs/kivy,matham/kivy,gonzafirewall/kivy,ernstp/kivy,jffernandez/kivy,Cheaterman/kivy,bliz937/kivy,jkankiewicz/kivy,kivy/kivy,manashmndl/kivy,andnovar/kivy,darkopevec/kivy,autosportlabs/kivy,dirkjot/kivy,akshayaurora/kivy,bob-the-hamster/kivy,viralpandey/kivy,manthansharma/kivy,Ramalus/kivy,niavlys/kivy,yoelk/kivy,andnovar/kivy,aron-bordin/kivy,JohnHowland/kivy,LogicalDash/kivy,aron-bordin/kivy,jegger/kivy,inclement/kivy,niavlys/kivy,bob-the-hamster/kivy,dirkjot/kivy,bionoid/kivy,Ramalus/kivy,VinGarcia/kivy,janssen/kivy,Farkal/kivy,xiaoyanit/kivy,ernstp/kivy,rnixx/kivy,MiyamotoAkira/kivy,Ramalus/kivy,angryrancor/kivy,el-ethan/kivy,thezawad/kivy,manashmndl/kivy,ernstp/kivy,autosportlabs/kivy,xpndlabs/kivy,dirkjot/kivy,mSenyor/kivy,Shyam10/kivy,VinGarcia/kivy,tony/kivy,denys-duchier/kivy,gonzafirewall/kivy,kived/kivy,JohnHowland/kivy,angryrancor/kivy,CuriousLearner/kivy,yoelk/kivy,KeyWeeUsr/kivy,jehutting/kivy,dirkjot/kivy,bob-the-hamster/kivy,adamkh/kivy,gonzafirewall/kivy,manthansharma/kivy,kived/kivy,iamutkarshtiwari/kivy,cbenhagen/kivy,arcticshores/kivy,LogicalDash/kivy,Farkal/kivy,Cheaterman/kivy,manashmndl/kivy,adamkh/kivy,KeyWeeUsr/kivy,iamutkarshtiwari/kivy,youprofit/kivy,yoelk/kivy,MiyamotoAkira/kivy,jkankiewicz/kivy,KeyWeeUsr/kivy,vitorio/kivy,edubrunaldi/kivy,manthansharma/kivy,arlowhite/kivy,ernstp/kivy,janssen/kivy,andnovar/kivy,vitorio/kivy,aron-bordin/kivy,bliz937/kivy,thezawad/kivy,jegger/kivy,bionoid/kivy,CuriousLearner/kivy,denys-duchier/kivy,janssen/kivy,darkopevec/kivy,arlowhite/kivy,bhargav2408/kivy,LogicalDash/kivy,JohnHowland/kivy,akshayaurora/kivy,tony/kivy,arcticshores/kivy,aron-bordin/kivy,habibmasuro/kivy,CuriousLearner/kivy,thezawad/kivy,MiyamotoAkira/kivy,niavlys/kivy,jegger/kivy,gonzafirewall/kivy,kived/kivy,edubrunaldi/kivy,kivy/kivy,viralpandey/kivy,edubrunaldi/kivy,rnixx/kivy,arlowhite/kivy,habibmasuro/kivy,cbenhagen/kivy,jegger/kivy,rafalo1333/kivy,jehutting/kivy,angryrancor/kivy,jkankiewicz/kivy,el-ethan/kivy,matham/kivy,vipulroxx/kivy,vipulroxx/kivy,niavlys/kivy,denys-duchier/kivy,vitorio/kivy,darkopevec/kivy,tony/kivy,manthansharma/kivy,youprofit/kivy,xiaoyanit/kivy,Shyam10/kivy,darkopevec/kivy,matham/kivy,arcticshores/kivy,jffernandez/kivy,denys-duchier/kivy,rafalo1333/kivy,xiaoyanit/kivy,bhargav2408/kivy,cbenhagen/kivy,jehutting/kivy,matham/kivy,inclement/kivy,MiyamotoAkira/kivy,youprofit/kivy,vipulroxx/kivy,bhargav2408/kivy,LogicalDash/kivy,jffernandez/kivy,jffernandez/kivy,bionoid/kivy,JohnHowland/kivy,akshayaurora/kivy
|
Add compound selection usage example to examples.
|
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.behaviors import CompoundSelectionBehavior
from kivy.app import runTouchApp
from kivy.core.window import Window
class SelectableGrid(CompoundSelectionBehavior, GridLayout):
def __init__(self, **kwargs):
super(SelectableGrid, self).__init__(**kwargs)
keyboard = Window.request_keyboard(None, self)
keyboard.bind(on_key_down=self.select_with_key_down,
on_key_up=self.select_with_key_up)
def print_selection(*l):
print [x.text for x in self.selected_nodes]
self.bind(selected_nodes=print_selection)
def goto_node(self, key, last_node, last_node_idx):
''' This function is used to go to the node by typing the number
of the text of the button.
'''
node, idx = super(SelectableGrid, self).goto_node(key, last_node,
last_node_idx)
if node == last_node:
children = self.children
for i in range(len(children)):
if children[i].text == key:
return children[i], i
return node, idx
def select_node(self, node):
node.background_color = (1, 0, 0, 1)
return super(SelectableGrid, self).select_node(node)
def deselect_node(self, node):
node.background_color = (1, 1, 1, 1)
super(SelectableGrid, self).deselect_node(node)
def do_touch(self, instance, touch):
if ('button' in touch.profile and touch.button in
('scrollup', 'scrolldown', 'scrollleft', 'scrollright')) or\
instance.collide_point(*touch.pos):
self.select_with_touch(instance, touch)
else:
return False
return True
root = SelectableGrid(cols=5, up_count=5, multiselect=True, scroll_count=1)
for i in range(40):
c = Button(text=str(i))
c.bind(on_touch_down=root.do_touch)
root.add_widget(c)
runTouchApp(root)
|
<commit_before><commit_msg>Add compound selection usage example to examples.<commit_after>
|
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.behaviors import CompoundSelectionBehavior
from kivy.app import runTouchApp
from kivy.core.window import Window
class SelectableGrid(CompoundSelectionBehavior, GridLayout):
def __init__(self, **kwargs):
super(SelectableGrid, self).__init__(**kwargs)
keyboard = Window.request_keyboard(None, self)
keyboard.bind(on_key_down=self.select_with_key_down,
on_key_up=self.select_with_key_up)
def print_selection(*l):
print [x.text for x in self.selected_nodes]
self.bind(selected_nodes=print_selection)
def goto_node(self, key, last_node, last_node_idx):
''' This function is used to go to the node by typing the number
of the text of the button.
'''
node, idx = super(SelectableGrid, self).goto_node(key, last_node,
last_node_idx)
if node == last_node:
children = self.children
for i in range(len(children)):
if children[i].text == key:
return children[i], i
return node, idx
def select_node(self, node):
node.background_color = (1, 0, 0, 1)
return super(SelectableGrid, self).select_node(node)
def deselect_node(self, node):
node.background_color = (1, 1, 1, 1)
super(SelectableGrid, self).deselect_node(node)
def do_touch(self, instance, touch):
if ('button' in touch.profile and touch.button in
('scrollup', 'scrolldown', 'scrollleft', 'scrollright')) or\
instance.collide_point(*touch.pos):
self.select_with_touch(instance, touch)
else:
return False
return True
root = SelectableGrid(cols=5, up_count=5, multiselect=True, scroll_count=1)
for i in range(40):
c = Button(text=str(i))
c.bind(on_touch_down=root.do_touch)
root.add_widget(c)
runTouchApp(root)
|
Add compound selection usage example to examples.from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.behaviors import CompoundSelectionBehavior
from kivy.app import runTouchApp
from kivy.core.window import Window
class SelectableGrid(CompoundSelectionBehavior, GridLayout):
def __init__(self, **kwargs):
super(SelectableGrid, self).__init__(**kwargs)
keyboard = Window.request_keyboard(None, self)
keyboard.bind(on_key_down=self.select_with_key_down,
on_key_up=self.select_with_key_up)
def print_selection(*l):
print [x.text for x in self.selected_nodes]
self.bind(selected_nodes=print_selection)
def goto_node(self, key, last_node, last_node_idx):
''' This function is used to go to the node by typing the number
of the text of the button.
'''
node, idx = super(SelectableGrid, self).goto_node(key, last_node,
last_node_idx)
if node == last_node:
children = self.children
for i in range(len(children)):
if children[i].text == key:
return children[i], i
return node, idx
def select_node(self, node):
node.background_color = (1, 0, 0, 1)
return super(SelectableGrid, self).select_node(node)
def deselect_node(self, node):
node.background_color = (1, 1, 1, 1)
super(SelectableGrid, self).deselect_node(node)
def do_touch(self, instance, touch):
if ('button' in touch.profile and touch.button in
('scrollup', 'scrolldown', 'scrollleft', 'scrollright')) or\
instance.collide_point(*touch.pos):
self.select_with_touch(instance, touch)
else:
return False
return True
root = SelectableGrid(cols=5, up_count=5, multiselect=True, scroll_count=1)
for i in range(40):
c = Button(text=str(i))
c.bind(on_touch_down=root.do_touch)
root.add_widget(c)
runTouchApp(root)
|
<commit_before><commit_msg>Add compound selection usage example to examples.<commit_after>from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.behaviors import CompoundSelectionBehavior
from kivy.app import runTouchApp
from kivy.core.window import Window
class SelectableGrid(CompoundSelectionBehavior, GridLayout):
def __init__(self, **kwargs):
super(SelectableGrid, self).__init__(**kwargs)
keyboard = Window.request_keyboard(None, self)
keyboard.bind(on_key_down=self.select_with_key_down,
on_key_up=self.select_with_key_up)
def print_selection(*l):
print [x.text for x in self.selected_nodes]
self.bind(selected_nodes=print_selection)
def goto_node(self, key, last_node, last_node_idx):
''' This function is used to go to the node by typing the number
of the text of the button.
'''
node, idx = super(SelectableGrid, self).goto_node(key, last_node,
last_node_idx)
if node == last_node:
children = self.children
for i in range(len(children)):
if children[i].text == key:
return children[i], i
return node, idx
def select_node(self, node):
node.background_color = (1, 0, 0, 1)
return super(SelectableGrid, self).select_node(node)
def deselect_node(self, node):
node.background_color = (1, 1, 1, 1)
super(SelectableGrid, self).deselect_node(node)
def do_touch(self, instance, touch):
if ('button' in touch.profile and touch.button in
('scrollup', 'scrolldown', 'scrollleft', 'scrollright')) or\
instance.collide_point(*touch.pos):
self.select_with_touch(instance, touch)
else:
return False
return True
root = SelectableGrid(cols=5, up_count=5, multiselect=True, scroll_count=1)
for i in range(40):
c = Button(text=str(i))
c.bind(on_touch_down=root.do_touch)
root.add_widget(c)
runTouchApp(root)
|
|
8bdeefa23ce44a0c0aad3913ec59d4167d2b0eff
|
duralex/AddCommitMessageVisitor.py
|
duralex/AddCommitMessageVisitor.py
|
# -*- coding: utf-8 -*-
from AbstractVisitor import AbstractVisitor
from duralex.alinea_parser import *
import duralex.node_type
def int_to_roman(integer):
string = ''
table = [
['M',1000], ['CM',900], ['D',500], ['CD',400], ['C',100], ['XC',90], ['L',50], ['XL',40], ['X',10], ['IX',9],
['V',5], ['IV',4], ['I',1]
]
for pair in table:
while integer - pair[1] >= 0:
integer -= pair[1]
string += pair[0]
return string
class AddCommitMessageVisitor(AbstractVisitor):
def visit_edit_node(self, node, post):
if post:
return
messages = []
ancestors = get_node_ancestors(node)
for ancestor in ancestors:
if 'type' not in ancestor:
continue;
if ancestor['type'] == 'article':
messages.append('Article ' + str(ancestor['order']))
if ancestor['type'] == 'bill-header1':
messages.append(int_to_roman(ancestor['order']))
if ancestor['type'] == 'bill-header2':
messages.append(unicode(ancestor['order']) + u'°')
node['commitMessage'] = ', '.join(messages[::-1])
|
Add a visitor to generate commit messages on each 'edit" node.
|
Add a visitor to generate commit messages on each 'edit" node.
|
Python
|
mit
|
Legilibre/duralex
|
Add a visitor to generate commit messages on each 'edit" node.
|
# -*- coding: utf-8 -*-
from AbstractVisitor import AbstractVisitor
from duralex.alinea_parser import *
import duralex.node_type
def int_to_roman(integer):
string = ''
table = [
['M',1000], ['CM',900], ['D',500], ['CD',400], ['C',100], ['XC',90], ['L',50], ['XL',40], ['X',10], ['IX',9],
['V',5], ['IV',4], ['I',1]
]
for pair in table:
while integer - pair[1] >= 0:
integer -= pair[1]
string += pair[0]
return string
class AddCommitMessageVisitor(AbstractVisitor):
def visit_edit_node(self, node, post):
if post:
return
messages = []
ancestors = get_node_ancestors(node)
for ancestor in ancestors:
if 'type' not in ancestor:
continue;
if ancestor['type'] == 'article':
messages.append('Article ' + str(ancestor['order']))
if ancestor['type'] == 'bill-header1':
messages.append(int_to_roman(ancestor['order']))
if ancestor['type'] == 'bill-header2':
messages.append(unicode(ancestor['order']) + u'°')
node['commitMessage'] = ', '.join(messages[::-1])
|
<commit_before><commit_msg>Add a visitor to generate commit messages on each 'edit" node.<commit_after>
|
# -*- coding: utf-8 -*-
from AbstractVisitor import AbstractVisitor
from duralex.alinea_parser import *
import duralex.node_type
def int_to_roman(integer):
string = ''
table = [
['M',1000], ['CM',900], ['D',500], ['CD',400], ['C',100], ['XC',90], ['L',50], ['XL',40], ['X',10], ['IX',9],
['V',5], ['IV',4], ['I',1]
]
for pair in table:
while integer - pair[1] >= 0:
integer -= pair[1]
string += pair[0]
return string
class AddCommitMessageVisitor(AbstractVisitor):
def visit_edit_node(self, node, post):
if post:
return
messages = []
ancestors = get_node_ancestors(node)
for ancestor in ancestors:
if 'type' not in ancestor:
continue;
if ancestor['type'] == 'article':
messages.append('Article ' + str(ancestor['order']))
if ancestor['type'] == 'bill-header1':
messages.append(int_to_roman(ancestor['order']))
if ancestor['type'] == 'bill-header2':
messages.append(unicode(ancestor['order']) + u'°')
node['commitMessage'] = ', '.join(messages[::-1])
|
Add a visitor to generate commit messages on each 'edit" node.# -*- coding: utf-8 -*-
from AbstractVisitor import AbstractVisitor
from duralex.alinea_parser import *
import duralex.node_type
def int_to_roman(integer):
string = ''
table = [
['M',1000], ['CM',900], ['D',500], ['CD',400], ['C',100], ['XC',90], ['L',50], ['XL',40], ['X',10], ['IX',9],
['V',5], ['IV',4], ['I',1]
]
for pair in table:
while integer - pair[1] >= 0:
integer -= pair[1]
string += pair[0]
return string
class AddCommitMessageVisitor(AbstractVisitor):
def visit_edit_node(self, node, post):
if post:
return
messages = []
ancestors = get_node_ancestors(node)
for ancestor in ancestors:
if 'type' not in ancestor:
continue;
if ancestor['type'] == 'article':
messages.append('Article ' + str(ancestor['order']))
if ancestor['type'] == 'bill-header1':
messages.append(int_to_roman(ancestor['order']))
if ancestor['type'] == 'bill-header2':
messages.append(unicode(ancestor['order']) + u'°')
node['commitMessage'] = ', '.join(messages[::-1])
|
<commit_before><commit_msg>Add a visitor to generate commit messages on each 'edit" node.<commit_after># -*- coding: utf-8 -*-
from AbstractVisitor import AbstractVisitor
from duralex.alinea_parser import *
import duralex.node_type
def int_to_roman(integer):
string = ''
table = [
['M',1000], ['CM',900], ['D',500], ['CD',400], ['C',100], ['XC',90], ['L',50], ['XL',40], ['X',10], ['IX',9],
['V',5], ['IV',4], ['I',1]
]
for pair in table:
while integer - pair[1] >= 0:
integer -= pair[1]
string += pair[0]
return string
class AddCommitMessageVisitor(AbstractVisitor):
def visit_edit_node(self, node, post):
if post:
return
messages = []
ancestors = get_node_ancestors(node)
for ancestor in ancestors:
if 'type' not in ancestor:
continue;
if ancestor['type'] == 'article':
messages.append('Article ' + str(ancestor['order']))
if ancestor['type'] == 'bill-header1':
messages.append(int_to_roman(ancestor['order']))
if ancestor['type'] == 'bill-header2':
messages.append(unicode(ancestor['order']) + u'°')
node['commitMessage'] = ', '.join(messages[::-1])
|
|
44662a9b82f22de611a99722eb7763f31d723be6
|
plugins/configuration/configurationtype/validated_dictionary.py
|
plugins/configuration/configurationtype/validated_dictionary.py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Defines a dictionary-like data structure where all values are validated.
The purpose of this dictionary is to be used in configuration data structures
where the keys are more or less frozen after the application has completed its
start-up phase.
"""
class ValidatedDictionary:
"""
A dictionary-like data structure where all values are validated.
New keys can only be added by also providing a validator predicate. Adding
new keys can't be done with the usual ``dictionary[key] = value``, but via a
separate method ``add``. Adding new keys in the usual way would result in an
exception stating that the key cannot be found.
When setting a key to a new value, the predicate associated with the key
will be executed to determine whether the value is allowed. If the value is
not allowed, an exception will be raised.
For the rest, this class should behave like an ordinary dictionary.
"""
pass #Not yet implemented.
|
Add documentation plan for ValidatedDictionary
|
Add documentation plan for ValidatedDictionary
This is supposed to implement the latest idea for how we're going to do that flexible configuration properly...
|
Python
|
cc0-1.0
|
Ghostkeeper/Luna
|
Add documentation plan for ValidatedDictionary
This is supposed to implement the latest idea for how we're going to do that flexible configuration properly...
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Defines a dictionary-like data structure where all values are validated.
The purpose of this dictionary is to be used in configuration data structures
where the keys are more or less frozen after the application has completed its
start-up phase.
"""
class ValidatedDictionary:
"""
A dictionary-like data structure where all values are validated.
New keys can only be added by also providing a validator predicate. Adding
new keys can't be done with the usual ``dictionary[key] = value``, but via a
separate method ``add``. Adding new keys in the usual way would result in an
exception stating that the key cannot be found.
When setting a key to a new value, the predicate associated with the key
will be executed to determine whether the value is allowed. If the value is
not allowed, an exception will be raised.
For the rest, this class should behave like an ordinary dictionary.
"""
pass #Not yet implemented.
|
<commit_before><commit_msg>Add documentation plan for ValidatedDictionary
This is supposed to implement the latest idea for how we're going to do that flexible configuration properly...<commit_after>
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Defines a dictionary-like data structure where all values are validated.
The purpose of this dictionary is to be used in configuration data structures
where the keys are more or less frozen after the application has completed its
start-up phase.
"""
class ValidatedDictionary:
"""
A dictionary-like data structure where all values are validated.
New keys can only be added by also providing a validator predicate. Adding
new keys can't be done with the usual ``dictionary[key] = value``, but via a
separate method ``add``. Adding new keys in the usual way would result in an
exception stating that the key cannot be found.
When setting a key to a new value, the predicate associated with the key
will be executed to determine whether the value is allowed. If the value is
not allowed, an exception will be raised.
For the rest, this class should behave like an ordinary dictionary.
"""
pass #Not yet implemented.
|
Add documentation plan for ValidatedDictionary
This is supposed to implement the latest idea for how we're going to do that flexible configuration properly...#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Defines a dictionary-like data structure where all values are validated.
The purpose of this dictionary is to be used in configuration data structures
where the keys are more or less frozen after the application has completed its
start-up phase.
"""
class ValidatedDictionary:
"""
A dictionary-like data structure where all values are validated.
New keys can only be added by also providing a validator predicate. Adding
new keys can't be done with the usual ``dictionary[key] = value``, but via a
separate method ``add``. Adding new keys in the usual way would result in an
exception stating that the key cannot be found.
When setting a key to a new value, the predicate associated with the key
will be executed to determine whether the value is allowed. If the value is
not allowed, an exception will be raised.
For the rest, this class should behave like an ordinary dictionary.
"""
pass #Not yet implemented.
|
<commit_before><commit_msg>Add documentation plan for ValidatedDictionary
This is supposed to implement the latest idea for how we're going to do that flexible configuration properly...<commit_after>#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Defines a dictionary-like data structure where all values are validated.
The purpose of this dictionary is to be used in configuration data structures
where the keys are more or less frozen after the application has completed its
start-up phase.
"""
class ValidatedDictionary:
"""
A dictionary-like data structure where all values are validated.
New keys can only be added by also providing a validator predicate. Adding
new keys can't be done with the usual ``dictionary[key] = value``, but via a
separate method ``add``. Adding new keys in the usual way would result in an
exception stating that the key cannot be found.
When setting a key to a new value, the predicate associated with the key
will be executed to determine whether the value is allowed. If the value is
not allowed, an exception will be raised.
For the rest, this class should behave like an ordinary dictionary.
"""
pass #Not yet implemented.
|
|
ea06febec1d9bb3c288bade012f1d9c1144577fd
|
007.py
|
007.py
|
"""
Project Euler Problem 7
=======================
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see
that the 6th prime is 13.
What is the 10001st prime number?
"""
from itertools import count
def is_prime(number):
"""
Takes a number and returns True if it's a prime number, otherwise returns False.
"""
if number == 2 or number == 3:
return True
if number <= 0 or number % 2 == 0 or number % 3 == 0:
return False
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0:
return False
return True
def prime_by_position(position):
"""
Takes an integer and returns the prime number at that position.
"""
prime_count = 0
for i in count(start=2):
if is_prime(i):
prime_count += 1
if prime_count == position:
return i
def test_prime_by_position():
assert prime_by_position(6) == 13
print(prime_by_position(10001))
|
Add solution and unit tests for problem 7
|
Add solution and unit tests for problem 7
|
Python
|
mit
|
BeataBak/project-euler-problems
|
Add solution and unit tests for problem 7
|
"""
Project Euler Problem 7
=======================
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see
that the 6th prime is 13.
What is the 10001st prime number?
"""
from itertools import count
def is_prime(number):
"""
Takes a number and returns True if it's a prime number, otherwise returns False.
"""
if number == 2 or number == 3:
return True
if number <= 0 or number % 2 == 0 or number % 3 == 0:
return False
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0:
return False
return True
def prime_by_position(position):
"""
Takes an integer and returns the prime number at that position.
"""
prime_count = 0
for i in count(start=2):
if is_prime(i):
prime_count += 1
if prime_count == position:
return i
def test_prime_by_position():
assert prime_by_position(6) == 13
print(prime_by_position(10001))
|
<commit_before><commit_msg>Add solution and unit tests for problem 7<commit_after>
|
"""
Project Euler Problem 7
=======================
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see
that the 6th prime is 13.
What is the 10001st prime number?
"""
from itertools import count
def is_prime(number):
"""
Takes a number and returns True if it's a prime number, otherwise returns False.
"""
if number == 2 or number == 3:
return True
if number <= 0 or number % 2 == 0 or number % 3 == 0:
return False
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0:
return False
return True
def prime_by_position(position):
"""
Takes an integer and returns the prime number at that position.
"""
prime_count = 0
for i in count(start=2):
if is_prime(i):
prime_count += 1
if prime_count == position:
return i
def test_prime_by_position():
assert prime_by_position(6) == 13
print(prime_by_position(10001))
|
Add solution and unit tests for problem 7"""
Project Euler Problem 7
=======================
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see
that the 6th prime is 13.
What is the 10001st prime number?
"""
from itertools import count
def is_prime(number):
"""
Takes a number and returns True if it's a prime number, otherwise returns False.
"""
if number == 2 or number == 3:
return True
if number <= 0 or number % 2 == 0 or number % 3 == 0:
return False
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0:
return False
return True
def prime_by_position(position):
"""
Takes an integer and returns the prime number at that position.
"""
prime_count = 0
for i in count(start=2):
if is_prime(i):
prime_count += 1
if prime_count == position:
return i
def test_prime_by_position():
assert prime_by_position(6) == 13
print(prime_by_position(10001))
|
<commit_before><commit_msg>Add solution and unit tests for problem 7<commit_after>"""
Project Euler Problem 7
=======================
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see
that the 6th prime is 13.
What is the 10001st prime number?
"""
from itertools import count
def is_prime(number):
"""
Takes a number and returns True if it's a prime number, otherwise returns False.
"""
if number == 2 or number == 3:
return True
if number <= 0 or number % 2 == 0 or number % 3 == 0:
return False
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0:
return False
return True
def prime_by_position(position):
"""
Takes an integer and returns the prime number at that position.
"""
prime_count = 0
for i in count(start=2):
if is_prime(i):
prime_count += 1
if prime_count == position:
return i
def test_prime_by_position():
assert prime_by_position(6) == 13
print(prime_by_position(10001))
|
|
027c91860a610b28d42d3045cb461f5fc78e7e2a
|
course_discovery/apps/course_metadata/migrations/0114_auto_20180905_1547.py
|
course_discovery/apps/course_metadata/migrations/0114_auto_20180905_1547.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-05 15:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0113_brief_text_curriculum'),
]
operations = [
migrations.RemoveField(
model_name='degree',
name='campus_image_desktop',
),
migrations.RemoveField(
model_name='degree',
name='campus_image_mobile',
),
migrations.RemoveField(
model_name='degree',
name='campus_image_tablet',
),
]
|
Remove the no longer used header images from database
|
Remove the no longer used header images from database
|
Python
|
agpl-3.0
|
edx/course-discovery,edx/course-discovery,edx/course-discovery,edx/course-discovery
|
Remove the no longer used header images from database
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-05 15:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0113_brief_text_curriculum'),
]
operations = [
migrations.RemoveField(
model_name='degree',
name='campus_image_desktop',
),
migrations.RemoveField(
model_name='degree',
name='campus_image_mobile',
),
migrations.RemoveField(
model_name='degree',
name='campus_image_tablet',
),
]
|
<commit_before><commit_msg>Remove the no longer used header images from database<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-05 15:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0113_brief_text_curriculum'),
]
operations = [
migrations.RemoveField(
model_name='degree',
name='campus_image_desktop',
),
migrations.RemoveField(
model_name='degree',
name='campus_image_mobile',
),
migrations.RemoveField(
model_name='degree',
name='campus_image_tablet',
),
]
|
Remove the no longer used header images from database# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-05 15:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0113_brief_text_curriculum'),
]
operations = [
migrations.RemoveField(
model_name='degree',
name='campus_image_desktop',
),
migrations.RemoveField(
model_name='degree',
name='campus_image_mobile',
),
migrations.RemoveField(
model_name='degree',
name='campus_image_tablet',
),
]
|
<commit_before><commit_msg>Remove the no longer used header images from database<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-05 15:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0113_brief_text_curriculum'),
]
operations = [
migrations.RemoveField(
model_name='degree',
name='campus_image_desktop',
),
migrations.RemoveField(
model_name='degree',
name='campus_image_mobile',
),
migrations.RemoveField(
model_name='degree',
name='campus_image_tablet',
),
]
|
|
adc85e4680f7fd7bde12dcc374fa097afb876b22
|
redditscrape.py
|
redditscrape.py
|
#!/usr/bin/env python
__author__ = "Patrick Guelcher"
__copyright__ = "(C) 2016 Patrick Guelcher"
__license__ = "MIT"
__version__ = "1.0"
"""
Scrapes the list of provided subreddits for images and downloads them to a local directoy
"""
import os
import praw
import wget
import urllib.error
# Configuration
path = 'images' # Download folder (Default: images)
sub_list = ['vexillology', 'mapporn', 'pics'] # Subreddit list
post_limit = 100 # Sumbission limit to check and download
user_agent = 'Image Scraper 1.0 by /u/aeroblitz' # Use your own reddit username
# Do not edit beyond this comment
def main():
create_folder()
def create_folder():
os.mkdir(path)
download_images()
def download_images():
u = praw.Reddit(user_agent=user_agent)
for sub in sub_list:
posts = u.get_subreddit(sub).get_hot(limit=post_limit)
for post in posts:
if post.url is not None:
file_name = post.url
extension = post.url[-4:]
if extension == '.jpg' or extension == '.png':
try:
print (' File Name ' + file_name)
print (' Path ' + path)
wget.download(file_name, path)
except urllib.error.HTTPError as err:
if err.code == 404:
pass
else:
continue
else:
pass
if __name__ == '__main__':
main()
|
Add image scraper for Reddit
|
Add image scraper for Reddit
Currently breaks after pulling a few images
|
Python
|
mit
|
aerovolts/python-scripts
|
Add image scraper for Reddit
Currently breaks after pulling a few images
|
#!/usr/bin/env python
__author__ = "Patrick Guelcher"
__copyright__ = "(C) 2016 Patrick Guelcher"
__license__ = "MIT"
__version__ = "1.0"
"""
Scrapes the list of provided subreddits for images and downloads them to a local directoy
"""
import os
import praw
import wget
import urllib.error
# Configuration
path = 'images' # Download folder (Default: images)
sub_list = ['vexillology', 'mapporn', 'pics'] # Subreddit list
post_limit = 100 # Sumbission limit to check and download
user_agent = 'Image Scraper 1.0 by /u/aeroblitz' # Use your own reddit username
# Do not edit beyond this comment
def main():
create_folder()
def create_folder():
os.mkdir(path)
download_images()
def download_images():
u = praw.Reddit(user_agent=user_agent)
for sub in sub_list:
posts = u.get_subreddit(sub).get_hot(limit=post_limit)
for post in posts:
if post.url is not None:
file_name = post.url
extension = post.url[-4:]
if extension == '.jpg' or extension == '.png':
try:
print (' File Name ' + file_name)
print (' Path ' + path)
wget.download(file_name, path)
except urllib.error.HTTPError as err:
if err.code == 404:
pass
else:
continue
else:
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add image scraper for Reddit
Currently breaks after pulling a few images<commit_after>
|
#!/usr/bin/env python
__author__ = "Patrick Guelcher"
__copyright__ = "(C) 2016 Patrick Guelcher"
__license__ = "MIT"
__version__ = "1.0"
"""
Scrapes the list of provided subreddits for images and downloads them to a local directoy
"""
import os
import praw
import wget
import urllib.error
# Configuration
path = 'images' # Download folder (Default: images)
sub_list = ['vexillology', 'mapporn', 'pics'] # Subreddit list
post_limit = 100 # Sumbission limit to check and download
user_agent = 'Image Scraper 1.0 by /u/aeroblitz' # Use your own reddit username
# Do not edit beyond this comment
def main():
create_folder()
def create_folder():
os.mkdir(path)
download_images()
def download_images():
u = praw.Reddit(user_agent=user_agent)
for sub in sub_list:
posts = u.get_subreddit(sub).get_hot(limit=post_limit)
for post in posts:
if post.url is not None:
file_name = post.url
extension = post.url[-4:]
if extension == '.jpg' or extension == '.png':
try:
print (' File Name ' + file_name)
print (' Path ' + path)
wget.download(file_name, path)
except urllib.error.HTTPError as err:
if err.code == 404:
pass
else:
continue
else:
pass
if __name__ == '__main__':
main()
|
Add image scraper for Reddit
Currently breaks after pulling a few images#!/usr/bin/env python
__author__ = "Patrick Guelcher"
__copyright__ = "(C) 2016 Patrick Guelcher"
__license__ = "MIT"
__version__ = "1.0"
"""
Scrapes the list of provided subreddits for images and downloads them to a local directoy
"""
import os
import praw
import wget
import urllib.error
# Configuration
path = 'images' # Download folder (Default: images)
sub_list = ['vexillology', 'mapporn', 'pics'] # Subreddit list
post_limit = 100 # Sumbission limit to check and download
user_agent = 'Image Scraper 1.0 by /u/aeroblitz' # Use your own reddit username
# Do not edit beyond this comment
def main():
create_folder()
def create_folder():
os.mkdir(path)
download_images()
def download_images():
u = praw.Reddit(user_agent=user_agent)
for sub in sub_list:
posts = u.get_subreddit(sub).get_hot(limit=post_limit)
for post in posts:
if post.url is not None:
file_name = post.url
extension = post.url[-4:]
if extension == '.jpg' or extension == '.png':
try:
print (' File Name ' + file_name)
print (' Path ' + path)
wget.download(file_name, path)
except urllib.error.HTTPError as err:
if err.code == 404:
pass
else:
continue
else:
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add image scraper for Reddit
Currently breaks after pulling a few images<commit_after>#!/usr/bin/env python
__author__ = "Patrick Guelcher"
__copyright__ = "(C) 2016 Patrick Guelcher"
__license__ = "MIT"
__version__ = "1.0"
"""
Scrapes the list of provided subreddits for images and downloads them to a local directoy
"""
import os
import praw
import wget
import urllib.error
# Configuration
path = 'images' # Download folder (Default: images)
sub_list = ['vexillology', 'mapporn', 'pics'] # Subreddit list
post_limit = 100 # Sumbission limit to check and download
user_agent = 'Image Scraper 1.0 by /u/aeroblitz' # Use your own reddit username
# Do not edit beyond this comment
def main():
create_folder()
def create_folder():
os.mkdir(path)
download_images()
def download_images():
u = praw.Reddit(user_agent=user_agent)
for sub in sub_list:
posts = u.get_subreddit(sub).get_hot(limit=post_limit)
for post in posts:
if post.url is not None:
file_name = post.url
extension = post.url[-4:]
if extension == '.jpg' or extension == '.png':
try:
print (' File Name ' + file_name)
print (' Path ' + path)
wget.download(file_name, path)
except urllib.error.HTTPError as err:
if err.code == 404:
pass
else:
continue
else:
pass
if __name__ == '__main__':
main()
|
|
a2afb7983735f82695068e945fa5fe325f6b9813
|
cfp/management/commands/applications_for_scoring.py
|
cfp/management/commands/applications_for_scoring.py
|
from cfp.models import CallForPaper
from django.core.management.base import BaseCommand
from django.db.models import StdDev, Count, Avg
class Command(BaseCommand):
help = ('Dumps a list of applications with scoring info, ready to be '
'copy/pasted into a google drive spreadsheet.')
def add_arguments(self, parser):
parser.add_argument('cfp_id', type=int)
def dump_application(self, application):
user = application.applicant.user
track = None
for label in application.labels.all():
if label.name.startswith("Track: "):
track = label.name[7:]
break
parts = [
application.pk,
user.full_name,
application.title,
application.type,
application.skill_level.name,
user.tshirt_size.name[0], # sex
"✈" if application.travel_expenses_required else None,
"🛏" if application.accomodation_required else None,
application.score_count,
round(application.score_average, 1) if application.score_average else None,
round(application.score_stddev, 1) if application.score_stddev else None,
track,
]
print("\t".join(str(p) if p else "" for p in parts))
def handle(self, *args, **options):
cfp = CallForPaper.objects.get(pk=options.get('cfp_id'))
applications = (cfp.applications.talks()
.prefetch_related('applicant__user__tshirt_size', 'skill_level', 'labels')
.annotate(score_count=Count('committee_votes'))
.annotate(score_average=Avg('committee_votes__score'))
.annotate(score_stddev=StdDev('committee_votes__score'))
.order_by('created_at'))
for application in applications:
self.dump_application(application)
|
Add command for dumping the scoring sheet
|
Add command for dumping the scoring sheet
|
Python
|
bsd-3-clause
|
WebCampZg/conference-web,WebCampZg/conference-web,WebCampZg/conference-web
|
Add command for dumping the scoring sheet
|
from cfp.models import CallForPaper
from django.core.management.base import BaseCommand
from django.db.models import StdDev, Count, Avg
class Command(BaseCommand):
help = ('Dumps a list of applications with scoring info, ready to be '
'copy/pasted into a google drive spreadsheet.')
def add_arguments(self, parser):
parser.add_argument('cfp_id', type=int)
def dump_application(self, application):
user = application.applicant.user
track = None
for label in application.labels.all():
if label.name.startswith("Track: "):
track = label.name[7:]
break
parts = [
application.pk,
user.full_name,
application.title,
application.type,
application.skill_level.name,
user.tshirt_size.name[0], # sex
"✈" if application.travel_expenses_required else None,
"🛏" if application.accomodation_required else None,
application.score_count,
round(application.score_average, 1) if application.score_average else None,
round(application.score_stddev, 1) if application.score_stddev else None,
track,
]
print("\t".join(str(p) if p else "" for p in parts))
def handle(self, *args, **options):
cfp = CallForPaper.objects.get(pk=options.get('cfp_id'))
applications = (cfp.applications.talks()
.prefetch_related('applicant__user__tshirt_size', 'skill_level', 'labels')
.annotate(score_count=Count('committee_votes'))
.annotate(score_average=Avg('committee_votes__score'))
.annotate(score_stddev=StdDev('committee_votes__score'))
.order_by('created_at'))
for application in applications:
self.dump_application(application)
|
<commit_before><commit_msg>Add command for dumping the scoring sheet<commit_after>
|
from cfp.models import CallForPaper
from django.core.management.base import BaseCommand
from django.db.models import StdDev, Count, Avg
class Command(BaseCommand):
help = ('Dumps a list of applications with scoring info, ready to be '
'copy/pasted into a google drive spreadsheet.')
def add_arguments(self, parser):
parser.add_argument('cfp_id', type=int)
def dump_application(self, application):
user = application.applicant.user
track = None
for label in application.labels.all():
if label.name.startswith("Track: "):
track = label.name[7:]
break
parts = [
application.pk,
user.full_name,
application.title,
application.type,
application.skill_level.name,
user.tshirt_size.name[0], # sex
"✈" if application.travel_expenses_required else None,
"🛏" if application.accomodation_required else None,
application.score_count,
round(application.score_average, 1) if application.score_average else None,
round(application.score_stddev, 1) if application.score_stddev else None,
track,
]
print("\t".join(str(p) if p else "" for p in parts))
def handle(self, *args, **options):
cfp = CallForPaper.objects.get(pk=options.get('cfp_id'))
applications = (cfp.applications.talks()
.prefetch_related('applicant__user__tshirt_size', 'skill_level', 'labels')
.annotate(score_count=Count('committee_votes'))
.annotate(score_average=Avg('committee_votes__score'))
.annotate(score_stddev=StdDev('committee_votes__score'))
.order_by('created_at'))
for application in applications:
self.dump_application(application)
|
Add command for dumping the scoring sheetfrom cfp.models import CallForPaper
from django.core.management.base import BaseCommand
from django.db.models import StdDev, Count, Avg
class Command(BaseCommand):
help = ('Dumps a list of applications with scoring info, ready to be '
'copy/pasted into a google drive spreadsheet.')
def add_arguments(self, parser):
parser.add_argument('cfp_id', type=int)
def dump_application(self, application):
user = application.applicant.user
track = None
for label in application.labels.all():
if label.name.startswith("Track: "):
track = label.name[7:]
break
parts = [
application.pk,
user.full_name,
application.title,
application.type,
application.skill_level.name,
user.tshirt_size.name[0], # sex
"✈" if application.travel_expenses_required else None,
"🛏" if application.accomodation_required else None,
application.score_count,
round(application.score_average, 1) if application.score_average else None,
round(application.score_stddev, 1) if application.score_stddev else None,
track,
]
print("\t".join(str(p) if p else "" for p in parts))
def handle(self, *args, **options):
cfp = CallForPaper.objects.get(pk=options.get('cfp_id'))
applications = (cfp.applications.talks()
.prefetch_related('applicant__user__tshirt_size', 'skill_level', 'labels')
.annotate(score_count=Count('committee_votes'))
.annotate(score_average=Avg('committee_votes__score'))
.annotate(score_stddev=StdDev('committee_votes__score'))
.order_by('created_at'))
for application in applications:
self.dump_application(application)
|
<commit_before><commit_msg>Add command for dumping the scoring sheet<commit_after>from cfp.models import CallForPaper
from django.core.management.base import BaseCommand
from django.db.models import StdDev, Count, Avg
class Command(BaseCommand):
help = ('Dumps a list of applications with scoring info, ready to be '
'copy/pasted into a google drive spreadsheet.')
def add_arguments(self, parser):
parser.add_argument('cfp_id', type=int)
def dump_application(self, application):
user = application.applicant.user
track = None
for label in application.labels.all():
if label.name.startswith("Track: "):
track = label.name[7:]
break
parts = [
application.pk,
user.full_name,
application.title,
application.type,
application.skill_level.name,
user.tshirt_size.name[0], # sex
"✈" if application.travel_expenses_required else None,
"🛏" if application.accomodation_required else None,
application.score_count,
round(application.score_average, 1) if application.score_average else None,
round(application.score_stddev, 1) if application.score_stddev else None,
track,
]
print("\t".join(str(p) if p else "" for p in parts))
def handle(self, *args, **options):
cfp = CallForPaper.objects.get(pk=options.get('cfp_id'))
applications = (cfp.applications.talks()
.prefetch_related('applicant__user__tshirt_size', 'skill_level', 'labels')
.annotate(score_count=Count('committee_votes'))
.annotate(score_average=Avg('committee_votes__score'))
.annotate(score_stddev=StdDev('committee_votes__score'))
.order_by('created_at'))
for application in applications:
self.dump_application(application)
|
|
6294766c3e30963380fda5b6c6f4f57d16be81d1
|
scripts/export_point_cloud_to_vtu.py
|
scripts/export_point_cloud_to_vtu.py
|
import pyevtk
import numpy as np
# Input and output file are hardcoded at this time but that can be changed if
# necessary
input_filename = 'point_cloud.txt'
# If output_path is ./point_cloud, the script will output in the current
# directory a file named point_cloud.vtu
output_path = './point_cloud'
point_cloud = np.genfromtxt(input_filename, delimiter=' ')
x = np.ascontiguousarray(point_cloud[:,0])
y = np.ascontiguousarray(point_cloud[:,1])
z = np.ascontiguousarray(point_cloud[:,2])
pyevtk.hl.pointsToVTK(output_path, x, y, z, data={"val" :
np.ones([len(point_cloud)])})
|
Add script to output point clouds as vtu files
|
Add script to output point clouds as vtu files
|
Python
|
bsd-3-clause
|
ORNL-CEES/DataTransferKit,ORNL-CEES/DataTransferKit,dalg24/DataTransferKit,Rombur/DataTransferKit,Rombur/DataTransferKit,ORNL-CEES/DataTransferKit,dalg24/DataTransferKit,ORNL-CEES/DataTransferKit,Rombur/DataTransferKit,dalg24/DataTransferKit,dalg24/DataTransferKit,Rombur/DataTransferKit
|
Add script to output point clouds as vtu files
|
import pyevtk
import numpy as np
# Input and output file are hardcoded at this time but that can be changed if
# necessary
input_filename = 'point_cloud.txt'
# If output_path is ./point_cloud, the script will output in the current
# directory a file named point_cloud.vtu
output_path = './point_cloud'
point_cloud = np.genfromtxt(input_filename, delimiter=' ')
x = np.ascontiguousarray(point_cloud[:,0])
y = np.ascontiguousarray(point_cloud[:,1])
z = np.ascontiguousarray(point_cloud[:,2])
pyevtk.hl.pointsToVTK(output_path, x, y, z, data={"val" :
np.ones([len(point_cloud)])})
|
<commit_before><commit_msg>Add script to output point clouds as vtu files<commit_after>
|
import pyevtk
import numpy as np
# Input and output file are hardcoded at this time but that can be changed if
# necessary
input_filename = 'point_cloud.txt'
# If output_path is ./point_cloud, the script will output in the current
# directory a file named point_cloud.vtu
output_path = './point_cloud'
point_cloud = np.genfromtxt(input_filename, delimiter=' ')
x = np.ascontiguousarray(point_cloud[:,0])
y = np.ascontiguousarray(point_cloud[:,1])
z = np.ascontiguousarray(point_cloud[:,2])
pyevtk.hl.pointsToVTK(output_path, x, y, z, data={"val" :
np.ones([len(point_cloud)])})
|
Add script to output point clouds as vtu filesimport pyevtk
import numpy as np
# Input and output file are hardcoded at this time but that can be changed if
# necessary
input_filename = 'point_cloud.txt'
# If output_path is ./point_cloud, the script will output in the current
# directory a file named point_cloud.vtu
output_path = './point_cloud'
point_cloud = np.genfromtxt(input_filename, delimiter=' ')
x = np.ascontiguousarray(point_cloud[:,0])
y = np.ascontiguousarray(point_cloud[:,1])
z = np.ascontiguousarray(point_cloud[:,2])
pyevtk.hl.pointsToVTK(output_path, x, y, z, data={"val" :
np.ones([len(point_cloud)])})
|
<commit_before><commit_msg>Add script to output point clouds as vtu files<commit_after>import pyevtk
import numpy as np
# Input and output file are hardcoded at this time but that can be changed if
# necessary
input_filename = 'point_cloud.txt'
# If output_path is ./point_cloud, the script will output in the current
# directory a file named point_cloud.vtu
output_path = './point_cloud'
point_cloud = np.genfromtxt(input_filename, delimiter=' ')
x = np.ascontiguousarray(point_cloud[:,0])
y = np.ascontiguousarray(point_cloud[:,1])
z = np.ascontiguousarray(point_cloud[:,2])
pyevtk.hl.pointsToVTK(output_path, x, y, z, data={"val" :
np.ones([len(point_cloud)])})
|
|
db6dafeabdade2cc8f2e14be3ed06938d3dff644
|
tests/test_classes.py
|
tests/test_classes.py
|
import unittest
from classes import Paladin
from models.spells.loader import load_paladin_spells_for_level
class PaladinTests(unittest.TestCase):
def setUp(self):
self.name = "Netherblood"
self.level = 3
self.dummy = Paladin(name=self.name, level=self.level, health=100, mana=100, strength=10)
def test_init(self):
""" The __init__ should load/save all the spells for the Paladin"""
spells = [spell for level in range(1,self.level+1) for spell in load_paladin_spells_for_level(level)]
self.assertNotEqual(len(self.dummy.learned_spells), 0)
for spell in spells:
self.assertIn(spell.name, self.dummy.learned_spells)
char_spell = self.dummy.learned_spells[spell.name]
# find the largest rank in our spells list (the char has the highest rank only)
max_rank = list(sorted(filter(lambda x: x.name == spell.name, spells), key=lambda x: x.rank))[-1].rank
self.assertEqual(char_spell.rank, max_rank)
if __name__ == '__main__':
unittest.main()
|
Test for the __init__ function of the Paladin class
|
Test for the __init__ function of the Paladin class
|
Python
|
mit
|
Enether/python_wow
|
Test for the __init__ function of the Paladin class
|
import unittest
from classes import Paladin
from models.spells.loader import load_paladin_spells_for_level
class PaladinTests(unittest.TestCase):
def setUp(self):
self.name = "Netherblood"
self.level = 3
self.dummy = Paladin(name=self.name, level=self.level, health=100, mana=100, strength=10)
def test_init(self):
""" The __init__ should load/save all the spells for the Paladin"""
spells = [spell for level in range(1,self.level+1) for spell in load_paladin_spells_for_level(level)]
self.assertNotEqual(len(self.dummy.learned_spells), 0)
for spell in spells:
self.assertIn(spell.name, self.dummy.learned_spells)
char_spell = self.dummy.learned_spells[spell.name]
# find the largest rank in our spells list (the char has the highest rank only)
max_rank = list(sorted(filter(lambda x: x.name == spell.name, spells), key=lambda x: x.rank))[-1].rank
self.assertEqual(char_spell.rank, max_rank)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test for the __init__ function of the Paladin class<commit_after>
|
import unittest
from classes import Paladin
from models.spells.loader import load_paladin_spells_for_level
class PaladinTests(unittest.TestCase):
def setUp(self):
self.name = "Netherblood"
self.level = 3
self.dummy = Paladin(name=self.name, level=self.level, health=100, mana=100, strength=10)
def test_init(self):
""" The __init__ should load/save all the spells for the Paladin"""
spells = [spell for level in range(1,self.level+1) for spell in load_paladin_spells_for_level(level)]
self.assertNotEqual(len(self.dummy.learned_spells), 0)
for spell in spells:
self.assertIn(spell.name, self.dummy.learned_spells)
char_spell = self.dummy.learned_spells[spell.name]
# find the largest rank in our spells list (the char has the highest rank only)
max_rank = list(sorted(filter(lambda x: x.name == spell.name, spells), key=lambda x: x.rank))[-1].rank
self.assertEqual(char_spell.rank, max_rank)
if __name__ == '__main__':
unittest.main()
|
Test for the __init__ function of the Paladin classimport unittest
from classes import Paladin
from models.spells.loader import load_paladin_spells_for_level
class PaladinTests(unittest.TestCase):
def setUp(self):
self.name = "Netherblood"
self.level = 3
self.dummy = Paladin(name=self.name, level=self.level, health=100, mana=100, strength=10)
def test_init(self):
""" The __init__ should load/save all the spells for the Paladin"""
spells = [spell for level in range(1,self.level+1) for spell in load_paladin_spells_for_level(level)]
self.assertNotEqual(len(self.dummy.learned_spells), 0)
for spell in spells:
self.assertIn(spell.name, self.dummy.learned_spells)
char_spell = self.dummy.learned_spells[spell.name]
# find the largest rank in our spells list (the char has the highest rank only)
max_rank = list(sorted(filter(lambda x: x.name == spell.name, spells), key=lambda x: x.rank))[-1].rank
self.assertEqual(char_spell.rank, max_rank)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test for the __init__ function of the Paladin class<commit_after>import unittest
from classes import Paladin
from models.spells.loader import load_paladin_spells_for_level
class PaladinTests(unittest.TestCase):
def setUp(self):
self.name = "Netherblood"
self.level = 3
self.dummy = Paladin(name=self.name, level=self.level, health=100, mana=100, strength=10)
def test_init(self):
""" The __init__ should load/save all the spells for the Paladin"""
spells = [spell for level in range(1,self.level+1) for spell in load_paladin_spells_for_level(level)]
self.assertNotEqual(len(self.dummy.learned_spells), 0)
for spell in spells:
self.assertIn(spell.name, self.dummy.learned_spells)
char_spell = self.dummy.learned_spells[spell.name]
# find the largest rank in our spells list (the char has the highest rank only)
max_rank = list(sorted(filter(lambda x: x.name == spell.name, spells), key=lambda x: x.rank))[-1].rank
self.assertEqual(char_spell.rank, max_rank)
if __name__ == '__main__':
unittest.main()
|
|
289a17fe296f579b43d62c00d27ca691da2ac944
|
reporter-cli/sql-pdf/python/src/reportlabpkq/__init__.py
|
reporter-cli/sql-pdf/python/src/reportlabpkq/__init__.py
|
# -*- coding: utf-8 -*-
# reporter-cli/sql-pdf/python/src/reportlabpkq/__init__.py
# =============================================================================
# Reporter Multilang. Version 0.1
# =============================================================================
# A tool to generate human-readable reports based on data from various sources
# with the focus on its implementation using a series of programming languages.
# =============================================================================
# Written by Radislav (Radicchio) Golubtsov, 2016-2017
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# (See the LICENSE file at the top of the source tree.)
#
# vim:set nu:et:ts=4:sw=4:
|
Add package initializer (according to PEP 420).
|
SQL-PDF-Python: Add package initializer (according to PEP 420).
|
Python
|
unlicense
|
rgolubtsov/reporter-multilang,rgolubtsov/reporter-multilang,rgolubtsov/reporter-multilang,rgolubtsov/reporter-multilang
|
SQL-PDF-Python: Add package initializer (according to PEP 420).
|
# -*- coding: utf-8 -*-
# reporter-cli/sql-pdf/python/src/reportlabpkq/__init__.py
# =============================================================================
# Reporter Multilang. Version 0.1
# =============================================================================
# A tool to generate human-readable reports based on data from various sources
# with the focus on its implementation using a series of programming languages.
# =============================================================================
# Written by Radislav (Radicchio) Golubtsov, 2016-2017
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# (See the LICENSE file at the top of the source tree.)
#
# vim:set nu:et:ts=4:sw=4:
|
<commit_before><commit_msg>SQL-PDF-Python: Add package initializer (according to PEP 420).<commit_after>
|
# -*- coding: utf-8 -*-
# reporter-cli/sql-pdf/python/src/reportlabpkq/__init__.py
# =============================================================================
# Reporter Multilang. Version 0.1
# =============================================================================
# A tool to generate human-readable reports based on data from various sources
# with the focus on its implementation using a series of programming languages.
# =============================================================================
# Written by Radislav (Radicchio) Golubtsov, 2016-2017
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# (See the LICENSE file at the top of the source tree.)
#
# vim:set nu:et:ts=4:sw=4:
|
SQL-PDF-Python: Add package initializer (according to PEP 420).# -*- coding: utf-8 -*-
# reporter-cli/sql-pdf/python/src/reportlabpkq/__init__.py
# =============================================================================
# Reporter Multilang. Version 0.1
# =============================================================================
# A tool to generate human-readable reports based on data from various sources
# with the focus on its implementation using a series of programming languages.
# =============================================================================
# Written by Radislav (Radicchio) Golubtsov, 2016-2017
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# (See the LICENSE file at the top of the source tree.)
#
# vim:set nu:et:ts=4:sw=4:
|
<commit_before><commit_msg>SQL-PDF-Python: Add package initializer (according to PEP 420).<commit_after># -*- coding: utf-8 -*-
# reporter-cli/sql-pdf/python/src/reportlabpkq/__init__.py
# =============================================================================
# Reporter Multilang. Version 0.1
# =============================================================================
# A tool to generate human-readable reports based on data from various sources
# with the focus on its implementation using a series of programming languages.
# =============================================================================
# Written by Radislav (Radicchio) Golubtsov, 2016-2017
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# (See the LICENSE file at the top of the source tree.)
#
# vim:set nu:et:ts=4:sw=4:
|
|
cdb12a5536931c62652f4e7a329eb13969cac6cb
|
lintcode/Medium/124_Longest_Consecutive_Sequence.py
|
lintcode/Medium/124_Longest_Consecutive_Sequence.py
|
class Solution:
"""
@param num, a list of integer
@return an integer
"""
def longestConsecutive(self, num):
# write your code here
hashMap = {}
res = 0
for n in num:
hashMap[n] = False
for k in hashMap:
tmp = k - 1
tmpRes = 1
while (tmp in hashMap and not hashMap[tmp]):
hashMap[tmp] = True
tmp -= 1
tmpRes += 1
tmp = k + 1
while (tmp in hashMap and not hashMap[tmp]):
hashMap[tmp] = True
tmp += 1
tmpRes += 1
res = res if tmpRes < res else tmpRes12
return res
|
Add Solution to lintcode question 124
|
Add Solution to lintcode question 124
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add Solution to lintcode question 124
|
class Solution:
"""
@param num, a list of integer
@return an integer
"""
def longestConsecutive(self, num):
# write your code here
hashMap = {}
res = 0
for n in num:
hashMap[n] = False
for k in hashMap:
tmp = k - 1
tmpRes = 1
while (tmp in hashMap and not hashMap[tmp]):
hashMap[tmp] = True
tmp -= 1
tmpRes += 1
tmp = k + 1
while (tmp in hashMap and not hashMap[tmp]):
hashMap[tmp] = True
tmp += 1
tmpRes += 1
res = res if tmpRes < res else tmpRes12
return res
|
<commit_before><commit_msg>Add Solution to lintcode question 124<commit_after>
|
class Solution:
"""
@param num, a list of integer
@return an integer
"""
def longestConsecutive(self, num):
# write your code here
hashMap = {}
res = 0
for n in num:
hashMap[n] = False
for k in hashMap:
tmp = k - 1
tmpRes = 1
while (tmp in hashMap and not hashMap[tmp]):
hashMap[tmp] = True
tmp -= 1
tmpRes += 1
tmp = k + 1
while (tmp in hashMap and not hashMap[tmp]):
hashMap[tmp] = True
tmp += 1
tmpRes += 1
res = res if tmpRes < res else tmpRes12
return res
|
Add Solution to lintcode question 124class Solution:
"""
@param num, a list of integer
@return an integer
"""
def longestConsecutive(self, num):
# write your code here
hashMap = {}
res = 0
for n in num:
hashMap[n] = False
for k in hashMap:
tmp = k - 1
tmpRes = 1
while (tmp in hashMap and not hashMap[tmp]):
hashMap[tmp] = True
tmp -= 1
tmpRes += 1
tmp = k + 1
while (tmp in hashMap and not hashMap[tmp]):
hashMap[tmp] = True
tmp += 1
tmpRes += 1
res = res if tmpRes < res else tmpRes12
return res
|
<commit_before><commit_msg>Add Solution to lintcode question 124<commit_after>class Solution:
"""
@param num, a list of integer
@return an integer
"""
def longestConsecutive(self, num):
# write your code here
hashMap = {}
res = 0
for n in num:
hashMap[n] = False
for k in hashMap:
tmp = k - 1
tmpRes = 1
while (tmp in hashMap and not hashMap[tmp]):
hashMap[tmp] = True
tmp -= 1
tmpRes += 1
tmp = k + 1
while (tmp in hashMap and not hashMap[tmp]):
hashMap[tmp] = True
tmp += 1
tmpRes += 1
res = res if tmpRes < res else tmpRes12
return res
|
|
bd243742f65a8fd92f4a773ce485cdc6f03f4a84
|
kevin/leet/copy_list_with_random_pointers.py
|
kevin/leet/copy_list_with_random_pointers.py
|
"""
https://leetcode.com/explore/challenge/card/february-leetcoding-challenge-2021/585/week-2-february-8th-february-14th/3635/
"""
class Node:
def __init__(self, x: int, next: 'Node'=None, random: 'Node'=None):
self.val = int(x)
self.next = next
self.random = random
class Solution:
def copy_random_list(self, head: Node) -> Node:
# I am aware this solution is not at all optimal in terms of
# memory consumption
# O(1) space is possible by cleverly linking old and new nodes in
# an alternating fashion
if head is None:
return None
# construct map from nodes to their random nodes
current = head
random = dict() # map from old node to old random node
while current is not None:
random[current] = current.random
current = current.next
# set up new nodes, with map from new to old
current = head
new = Node(current.val)
new_head = new
back = {new: head} # map back from new to old
forward = {head: new} # map forward from old to new
while current.next is not None:
current = current.next
new.next = Node(current.val)
new = new.next
back[new] = current
forward[current] = new
# add correct random pointers in new nodes
current = new_head
while current is not None:
old = back[current]
old_random = random[old]
current.random = forward.get(old_random, None)
current = current.next
return new_head
|
Add Copy List with Random Pointer LeetCode problem
|
Add Copy List with Random Pointer LeetCode problem
- No tests though
|
Python
|
mit
|
kalyons11/kevin,kalyons11/kevin
|
Add Copy List with Random Pointer LeetCode problem
- No tests though
|
"""
https://leetcode.com/explore/challenge/card/february-leetcoding-challenge-2021/585/week-2-february-8th-february-14th/3635/
"""
class Node:
def __init__(self, x: int, next: 'Node'=None, random: 'Node'=None):
self.val = int(x)
self.next = next
self.random = random
class Solution:
def copy_random_list(self, head: Node) -> Node:
# I am aware this solution is not at all optimal in terms of
# memory consumption
# O(1) space is possible by cleverly linking old and new nodes in
# an alternating fashion
if head is None:
return None
# construct map from nodes to their random nodes
current = head
random = dict() # map from old node to old random node
while current is not None:
random[current] = current.random
current = current.next
# set up new nodes, with map from new to old
current = head
new = Node(current.val)
new_head = new
back = {new: head} # map back from new to old
forward = {head: new} # map forward from old to new
while current.next is not None:
current = current.next
new.next = Node(current.val)
new = new.next
back[new] = current
forward[current] = new
# add correct random pointers in new nodes
current = new_head
while current is not None:
old = back[current]
old_random = random[old]
current.random = forward.get(old_random, None)
current = current.next
return new_head
|
<commit_before><commit_msg>Add Copy List with Random Pointer LeetCode problem
- No tests though<commit_after>
|
"""
https://leetcode.com/explore/challenge/card/february-leetcoding-challenge-2021/585/week-2-february-8th-february-14th/3635/
"""
class Node:
def __init__(self, x: int, next: 'Node'=None, random: 'Node'=None):
self.val = int(x)
self.next = next
self.random = random
class Solution:
def copy_random_list(self, head: Node) -> Node:
# I am aware this solution is not at all optimal in terms of
# memory consumption
# O(1) space is possible by cleverly linking old and new nodes in
# an alternating fashion
if head is None:
return None
# construct map from nodes to their random nodes
current = head
random = dict() # map from old node to old random node
while current is not None:
random[current] = current.random
current = current.next
# set up new nodes, with map from new to old
current = head
new = Node(current.val)
new_head = new
back = {new: head} # map back from new to old
forward = {head: new} # map forward from old to new
while current.next is not None:
current = current.next
new.next = Node(current.val)
new = new.next
back[new] = current
forward[current] = new
# add correct random pointers in new nodes
current = new_head
while current is not None:
old = back[current]
old_random = random[old]
current.random = forward.get(old_random, None)
current = current.next
return new_head
|
Add Copy List with Random Pointer LeetCode problem
- No tests though"""
https://leetcode.com/explore/challenge/card/february-leetcoding-challenge-2021/585/week-2-february-8th-february-14th/3635/
"""
class Node:
def __init__(self, x: int, next: 'Node'=None, random: 'Node'=None):
self.val = int(x)
self.next = next
self.random = random
class Solution:
def copy_random_list(self, head: Node) -> Node:
# I am aware this solution is not at all optimal in terms of
# memory consumption
# O(1) space is possible by cleverly linking old and new nodes in
# an alternating fashion
if head is None:
return None
# construct map from nodes to their random nodes
current = head
random = dict() # map from old node to old random node
while current is not None:
random[current] = current.random
current = current.next
# set up new nodes, with map from new to old
current = head
new = Node(current.val)
new_head = new
back = {new: head} # map back from new to old
forward = {head: new} # map forward from old to new
while current.next is not None:
current = current.next
new.next = Node(current.val)
new = new.next
back[new] = current
forward[current] = new
# add correct random pointers in new nodes
current = new_head
while current is not None:
old = back[current]
old_random = random[old]
current.random = forward.get(old_random, None)
current = current.next
return new_head
|
<commit_before><commit_msg>Add Copy List with Random Pointer LeetCode problem
- No tests though<commit_after>"""
https://leetcode.com/explore/challenge/card/february-leetcoding-challenge-2021/585/week-2-february-8th-february-14th/3635/
"""
class Node:
def __init__(self, x: int, next: 'Node'=None, random: 'Node'=None):
self.val = int(x)
self.next = next
self.random = random
class Solution:
def copy_random_list(self, head: Node) -> Node:
# I am aware this solution is not at all optimal in terms of
# memory consumption
# O(1) space is possible by cleverly linking old and new nodes in
# an alternating fashion
if head is None:
return None
# construct map from nodes to their random nodes
current = head
random = dict() # map from old node to old random node
while current is not None:
random[current] = current.random
current = current.next
# set up new nodes, with map from new to old
current = head
new = Node(current.val)
new_head = new
back = {new: head} # map back from new to old
forward = {head: new} # map forward from old to new
while current.next is not None:
current = current.next
new.next = Node(current.val)
new = new.next
back[new] = current
forward[current] = new
# add correct random pointers in new nodes
current = new_head
while current is not None:
old = back[current]
old_random = random[old]
current.random = forward.get(old_random, None)
current = current.next
return new_head
|
|
e51e1c14b1375249af90eff21978a316471c16b9
|
ichnaea/tests/test_migration.py
|
ichnaea/tests/test_migration.py
|
from alembic import command as alembic_command
from alembic.config import Config
from alembic.script import ScriptDirectory
from sqlalchemy import inspect
from sqlalchemy.schema import (
MetaData,
Table,
)
# make sure all models are imported
from ichnaea import models # NOQA
from ichnaea.content import models # NOQA
from ichnaea.tests.base import (
_make_db,
DBIsolation,
setup_package,
SQL_BASE_STRUCTURE,
TestCase,
)
class TestMigration(TestCase):
def setUp(self):
self.db = _make_db()
# capture state of fresh database
self.head_tables = self.inspect_tables()
DBIsolation.cleanup_tables(self.db.engine)
def tearDown(self):
self.db.engine.pool.dispose()
del self.db
# setup normal database schema again
setup_package(None)
def alembic_config(self):
alembic_cfg = Config()
alembic_cfg.set_section_option(
'alembic', 'script_location', 'alembic')
alembic_cfg.set_section_option(
'alembic', 'sqlalchemy.url', str(self.db.engine.url))
return alembic_cfg
def alembic_script(self):
return ScriptDirectory.from_config(self.alembic_config())
def current_db_revision(self):
with self.db.engine.connect() as conn:
result = conn.execute('select version_num from alembic_version')
alembic_rev = result.first()
if alembic_rev is None:
return None
return alembic_rev[0]
def inspect_tables(self):
metadata = MetaData()
inspector = inspect(self.db.engine)
tables = {}
for name in inspector.get_table_names():
tables[name] = Table(name, metadata)
return tables
def setup_base_db(self):
with open(SQL_BASE_STRUCTURE) as fd:
sql_text = fd.read()
with self.db.engine.connect() as conn:
conn.execute(sql_text)
def run_migration(self, target='head'):
engine = self.db.engine
with engine.connect() as conn:
trans = conn.begin()
alembic_command.upgrade(self.alembic_config(), target)
trans.commit()
def test_migration(self):
self.setup_base_db()
# we have no alembic base revision
self.assertTrue(self.current_db_revision() is None)
self.run_migration()
# after the migration, the DB is stamped
db_revision = self.current_db_revision()
self.assertTrue(db_revision is not None)
# db revision matches latest alembic revision
alembic_head = self.alembic_script().get_current_head()
self.assertEqual(db_revision, alembic_head)
# compare the tables from a migrated database to those
# created fresh from the model definitions
migrated_tables = self.inspect_tables()
head_tables = self.head_tables
self.assertEqual(set(head_tables.keys()),
set(migrated_tables.keys()))
|
Add a test to execute all migrations.
|
Add a test to execute all migrations.
|
Python
|
apache-2.0
|
mozilla/ichnaea,therewillbecode/ichnaea,mozilla/ichnaea,mozilla/ichnaea,mozilla/ichnaea,therewillbecode/ichnaea,therewillbecode/ichnaea
|
Add a test to execute all migrations.
|
from alembic import command as alembic_command
from alembic.config import Config
from alembic.script import ScriptDirectory
from sqlalchemy import inspect
from sqlalchemy.schema import (
MetaData,
Table,
)
# make sure all models are imported
from ichnaea import models # NOQA
from ichnaea.content import models # NOQA
from ichnaea.tests.base import (
_make_db,
DBIsolation,
setup_package,
SQL_BASE_STRUCTURE,
TestCase,
)
class TestMigration(TestCase):
def setUp(self):
self.db = _make_db()
# capture state of fresh database
self.head_tables = self.inspect_tables()
DBIsolation.cleanup_tables(self.db.engine)
def tearDown(self):
self.db.engine.pool.dispose()
del self.db
# setup normal database schema again
setup_package(None)
def alembic_config(self):
alembic_cfg = Config()
alembic_cfg.set_section_option(
'alembic', 'script_location', 'alembic')
alembic_cfg.set_section_option(
'alembic', 'sqlalchemy.url', str(self.db.engine.url))
return alembic_cfg
def alembic_script(self):
return ScriptDirectory.from_config(self.alembic_config())
def current_db_revision(self):
with self.db.engine.connect() as conn:
result = conn.execute('select version_num from alembic_version')
alembic_rev = result.first()
if alembic_rev is None:
return None
return alembic_rev[0]
def inspect_tables(self):
metadata = MetaData()
inspector = inspect(self.db.engine)
tables = {}
for name in inspector.get_table_names():
tables[name] = Table(name, metadata)
return tables
def setup_base_db(self):
with open(SQL_BASE_STRUCTURE) as fd:
sql_text = fd.read()
with self.db.engine.connect() as conn:
conn.execute(sql_text)
def run_migration(self, target='head'):
engine = self.db.engine
with engine.connect() as conn:
trans = conn.begin()
alembic_command.upgrade(self.alembic_config(), target)
trans.commit()
def test_migration(self):
self.setup_base_db()
# we have no alembic base revision
self.assertTrue(self.current_db_revision() is None)
self.run_migration()
# after the migration, the DB is stamped
db_revision = self.current_db_revision()
self.assertTrue(db_revision is not None)
# db revision matches latest alembic revision
alembic_head = self.alembic_script().get_current_head()
self.assertEqual(db_revision, alembic_head)
# compare the tables from a migrated database to those
# created fresh from the model definitions
migrated_tables = self.inspect_tables()
head_tables = self.head_tables
self.assertEqual(set(head_tables.keys()),
set(migrated_tables.keys()))
|
<commit_before><commit_msg>Add a test to execute all migrations.<commit_after>
|
from alembic import command as alembic_command
from alembic.config import Config
from alembic.script import ScriptDirectory
from sqlalchemy import inspect
from sqlalchemy.schema import (
MetaData,
Table,
)
# make sure all models are imported
from ichnaea import models # NOQA
from ichnaea.content import models # NOQA
from ichnaea.tests.base import (
_make_db,
DBIsolation,
setup_package,
SQL_BASE_STRUCTURE,
TestCase,
)
class TestMigration(TestCase):
def setUp(self):
self.db = _make_db()
# capture state of fresh database
self.head_tables = self.inspect_tables()
DBIsolation.cleanup_tables(self.db.engine)
def tearDown(self):
self.db.engine.pool.dispose()
del self.db
# setup normal database schema again
setup_package(None)
def alembic_config(self):
alembic_cfg = Config()
alembic_cfg.set_section_option(
'alembic', 'script_location', 'alembic')
alembic_cfg.set_section_option(
'alembic', 'sqlalchemy.url', str(self.db.engine.url))
return alembic_cfg
def alembic_script(self):
return ScriptDirectory.from_config(self.alembic_config())
def current_db_revision(self):
with self.db.engine.connect() as conn:
result = conn.execute('select version_num from alembic_version')
alembic_rev = result.first()
if alembic_rev is None:
return None
return alembic_rev[0]
def inspect_tables(self):
metadata = MetaData()
inspector = inspect(self.db.engine)
tables = {}
for name in inspector.get_table_names():
tables[name] = Table(name, metadata)
return tables
def setup_base_db(self):
with open(SQL_BASE_STRUCTURE) as fd:
sql_text = fd.read()
with self.db.engine.connect() as conn:
conn.execute(sql_text)
def run_migration(self, target='head'):
engine = self.db.engine
with engine.connect() as conn:
trans = conn.begin()
alembic_command.upgrade(self.alembic_config(), target)
trans.commit()
def test_migration(self):
self.setup_base_db()
# we have no alembic base revision
self.assertTrue(self.current_db_revision() is None)
self.run_migration()
# after the migration, the DB is stamped
db_revision = self.current_db_revision()
self.assertTrue(db_revision is not None)
# db revision matches latest alembic revision
alembic_head = self.alembic_script().get_current_head()
self.assertEqual(db_revision, alembic_head)
# compare the tables from a migrated database to those
# created fresh from the model definitions
migrated_tables = self.inspect_tables()
head_tables = self.head_tables
self.assertEqual(set(head_tables.keys()),
set(migrated_tables.keys()))
|
Add a test to execute all migrations.from alembic import command as alembic_command
from alembic.config import Config
from alembic.script import ScriptDirectory
from sqlalchemy import inspect
from sqlalchemy.schema import (
MetaData,
Table,
)
# make sure all models are imported
from ichnaea import models # NOQA
from ichnaea.content import models # NOQA
from ichnaea.tests.base import (
_make_db,
DBIsolation,
setup_package,
SQL_BASE_STRUCTURE,
TestCase,
)
class TestMigration(TestCase):
def setUp(self):
self.db = _make_db()
# capture state of fresh database
self.head_tables = self.inspect_tables()
DBIsolation.cleanup_tables(self.db.engine)
def tearDown(self):
self.db.engine.pool.dispose()
del self.db
# setup normal database schema again
setup_package(None)
def alembic_config(self):
alembic_cfg = Config()
alembic_cfg.set_section_option(
'alembic', 'script_location', 'alembic')
alembic_cfg.set_section_option(
'alembic', 'sqlalchemy.url', str(self.db.engine.url))
return alembic_cfg
def alembic_script(self):
return ScriptDirectory.from_config(self.alembic_config())
def current_db_revision(self):
with self.db.engine.connect() as conn:
result = conn.execute('select version_num from alembic_version')
alembic_rev = result.first()
if alembic_rev is None:
return None
return alembic_rev[0]
def inspect_tables(self):
metadata = MetaData()
inspector = inspect(self.db.engine)
tables = {}
for name in inspector.get_table_names():
tables[name] = Table(name, metadata)
return tables
def setup_base_db(self):
with open(SQL_BASE_STRUCTURE) as fd:
sql_text = fd.read()
with self.db.engine.connect() as conn:
conn.execute(sql_text)
def run_migration(self, target='head'):
engine = self.db.engine
with engine.connect() as conn:
trans = conn.begin()
alembic_command.upgrade(self.alembic_config(), target)
trans.commit()
def test_migration(self):
self.setup_base_db()
# we have no alembic base revision
self.assertTrue(self.current_db_revision() is None)
self.run_migration()
# after the migration, the DB is stamped
db_revision = self.current_db_revision()
self.assertTrue(db_revision is not None)
# db revision matches latest alembic revision
alembic_head = self.alembic_script().get_current_head()
self.assertEqual(db_revision, alembic_head)
# compare the tables from a migrated database to those
# created fresh from the model definitions
migrated_tables = self.inspect_tables()
head_tables = self.head_tables
self.assertEqual(set(head_tables.keys()),
set(migrated_tables.keys()))
|
<commit_before><commit_msg>Add a test to execute all migrations.<commit_after>from alembic import command as alembic_command
from alembic.config import Config
from alembic.script import ScriptDirectory
from sqlalchemy import inspect
from sqlalchemy.schema import (
MetaData,
Table,
)
# make sure all models are imported
from ichnaea import models # NOQA
from ichnaea.content import models # NOQA
from ichnaea.tests.base import (
_make_db,
DBIsolation,
setup_package,
SQL_BASE_STRUCTURE,
TestCase,
)
class TestMigration(TestCase):
def setUp(self):
self.db = _make_db()
# capture state of fresh database
self.head_tables = self.inspect_tables()
DBIsolation.cleanup_tables(self.db.engine)
def tearDown(self):
self.db.engine.pool.dispose()
del self.db
# setup normal database schema again
setup_package(None)
def alembic_config(self):
alembic_cfg = Config()
alembic_cfg.set_section_option(
'alembic', 'script_location', 'alembic')
alembic_cfg.set_section_option(
'alembic', 'sqlalchemy.url', str(self.db.engine.url))
return alembic_cfg
def alembic_script(self):
return ScriptDirectory.from_config(self.alembic_config())
def current_db_revision(self):
with self.db.engine.connect() as conn:
result = conn.execute('select version_num from alembic_version')
alembic_rev = result.first()
if alembic_rev is None:
return None
return alembic_rev[0]
def inspect_tables(self):
metadata = MetaData()
inspector = inspect(self.db.engine)
tables = {}
for name in inspector.get_table_names():
tables[name] = Table(name, metadata)
return tables
def setup_base_db(self):
with open(SQL_BASE_STRUCTURE) as fd:
sql_text = fd.read()
with self.db.engine.connect() as conn:
conn.execute(sql_text)
def run_migration(self, target='head'):
engine = self.db.engine
with engine.connect() as conn:
trans = conn.begin()
alembic_command.upgrade(self.alembic_config(), target)
trans.commit()
def test_migration(self):
self.setup_base_db()
# we have no alembic base revision
self.assertTrue(self.current_db_revision() is None)
self.run_migration()
# after the migration, the DB is stamped
db_revision = self.current_db_revision()
self.assertTrue(db_revision is not None)
# db revision matches latest alembic revision
alembic_head = self.alembic_script().get_current_head()
self.assertEqual(db_revision, alembic_head)
# compare the tables from a migrated database to those
# created fresh from the model definitions
migrated_tables = self.inspect_tables()
head_tables = self.head_tables
self.assertEqual(set(head_tables.keys()),
set(migrated_tables.keys()))
|
|
5b2d5446b178cc49934192d42de73b29ed0707e1
|
del_no_available.py
|
del_no_available.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
import re
import shutil
import imagehash
from PIL import Image
TRAIN_DIR = 'flickr_logos_27_dataset'
DISTRACT_IMAGE_DIR = os.path.join(TRAIN_DIR,
'flickr_logos_27_dataset_distractor_images')
NO_AVAILABLE_IMG = 'no_available.jpg'
def hash_value(img_fn, htype):
img = Image.open(img_fn)
if htype == 'a':
hval = imagehash.average_hash(img)
elif htype == 'p':
hval = imagehash.phash(img)
elif htype == 'd':
hval = imagehash.dhash(img)
elif htype == 'w':
hval = imagehash.whash(img)
else:
hval = imagehash.average_hash(img)
return hval
def main():
imgs = [img for img in os.listdir(DISTRACT_IMAGE_DIR)
if re.match('^(\d+)_', img)]
src_hash = hash_value(
os.path.join(DISTRACT_IMAGE_DIR, NO_AVAILABLE_IMG), htype='p')
for img in imgs:
target_hash = hash_value(
os.path.join(DISTRACT_IMAGE_DIR, img), htype='p')
if src_hash == target_hash:
os.remove(os.path.join(DISTRACT_IMAGE_DIR, img))
print('Delete:', img)
if __name__ == '__main__':
main()
|
Add a new script to delete no available image file.
|
Add a new script to delete no available image file.
|
Python
|
mit
|
satojkovic/DeepLogo
|
Add a new script to delete no available image file.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
import re
import shutil
import imagehash
from PIL import Image
TRAIN_DIR = 'flickr_logos_27_dataset'
DISTRACT_IMAGE_DIR = os.path.join(TRAIN_DIR,
'flickr_logos_27_dataset_distractor_images')
NO_AVAILABLE_IMG = 'no_available.jpg'
def hash_value(img_fn, htype):
img = Image.open(img_fn)
if htype == 'a':
hval = imagehash.average_hash(img)
elif htype == 'p':
hval = imagehash.phash(img)
elif htype == 'd':
hval = imagehash.dhash(img)
elif htype == 'w':
hval = imagehash.whash(img)
else:
hval = imagehash.average_hash(img)
return hval
def main():
imgs = [img for img in os.listdir(DISTRACT_IMAGE_DIR)
if re.match('^(\d+)_', img)]
src_hash = hash_value(
os.path.join(DISTRACT_IMAGE_DIR, NO_AVAILABLE_IMG), htype='p')
for img in imgs:
target_hash = hash_value(
os.path.join(DISTRACT_IMAGE_DIR, img), htype='p')
if src_hash == target_hash:
os.remove(os.path.join(DISTRACT_IMAGE_DIR, img))
print('Delete:', img)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a new script to delete no available image file.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
import re
import shutil
import imagehash
from PIL import Image
TRAIN_DIR = 'flickr_logos_27_dataset'
DISTRACT_IMAGE_DIR = os.path.join(TRAIN_DIR,
'flickr_logos_27_dataset_distractor_images')
NO_AVAILABLE_IMG = 'no_available.jpg'
def hash_value(img_fn, htype):
img = Image.open(img_fn)
if htype == 'a':
hval = imagehash.average_hash(img)
elif htype == 'p':
hval = imagehash.phash(img)
elif htype == 'd':
hval = imagehash.dhash(img)
elif htype == 'w':
hval = imagehash.whash(img)
else:
hval = imagehash.average_hash(img)
return hval
def main():
imgs = [img for img in os.listdir(DISTRACT_IMAGE_DIR)
if re.match('^(\d+)_', img)]
src_hash = hash_value(
os.path.join(DISTRACT_IMAGE_DIR, NO_AVAILABLE_IMG), htype='p')
for img in imgs:
target_hash = hash_value(
os.path.join(DISTRACT_IMAGE_DIR, img), htype='p')
if src_hash == target_hash:
os.remove(os.path.join(DISTRACT_IMAGE_DIR, img))
print('Delete:', img)
if __name__ == '__main__':
main()
|
Add a new script to delete no available image file.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
import re
import shutil
import imagehash
from PIL import Image
TRAIN_DIR = 'flickr_logos_27_dataset'
DISTRACT_IMAGE_DIR = os.path.join(TRAIN_DIR,
'flickr_logos_27_dataset_distractor_images')
NO_AVAILABLE_IMG = 'no_available.jpg'
def hash_value(img_fn, htype):
img = Image.open(img_fn)
if htype == 'a':
hval = imagehash.average_hash(img)
elif htype == 'p':
hval = imagehash.phash(img)
elif htype == 'd':
hval = imagehash.dhash(img)
elif htype == 'w':
hval = imagehash.whash(img)
else:
hval = imagehash.average_hash(img)
return hval
def main():
imgs = [img for img in os.listdir(DISTRACT_IMAGE_DIR)
if re.match('^(\d+)_', img)]
src_hash = hash_value(
os.path.join(DISTRACT_IMAGE_DIR, NO_AVAILABLE_IMG), htype='p')
for img in imgs:
target_hash = hash_value(
os.path.join(DISTRACT_IMAGE_DIR, img), htype='p')
if src_hash == target_hash:
os.remove(os.path.join(DISTRACT_IMAGE_DIR, img))
print('Delete:', img)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a new script to delete no available image file.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
import re
import shutil
import imagehash
from PIL import Image
TRAIN_DIR = 'flickr_logos_27_dataset'
DISTRACT_IMAGE_DIR = os.path.join(TRAIN_DIR,
'flickr_logos_27_dataset_distractor_images')
NO_AVAILABLE_IMG = 'no_available.jpg'
def hash_value(img_fn, htype):
img = Image.open(img_fn)
if htype == 'a':
hval = imagehash.average_hash(img)
elif htype == 'p':
hval = imagehash.phash(img)
elif htype == 'd':
hval = imagehash.dhash(img)
elif htype == 'w':
hval = imagehash.whash(img)
else:
hval = imagehash.average_hash(img)
return hval
def main():
imgs = [img for img in os.listdir(DISTRACT_IMAGE_DIR)
if re.match('^(\d+)_', img)]
src_hash = hash_value(
os.path.join(DISTRACT_IMAGE_DIR, NO_AVAILABLE_IMG), htype='p')
for img in imgs:
target_hash = hash_value(
os.path.join(DISTRACT_IMAGE_DIR, img), htype='p')
if src_hash == target_hash:
os.remove(os.path.join(DISTRACT_IMAGE_DIR, img))
print('Delete:', img)
if __name__ == '__main__':
main()
|
|
1e1d58c3a3d2134c98f42206b1dd5226d5e23c27
|
phasortoolbox/synchrophasor.py
|
phasortoolbox/synchrophasor.py
|
from collections import UserList
class Synchrophasor(UserList):
"""
time_tag is the time tag of the synchrophasor message
arr_time is the unix time that the last data frame for the synchrophasor received
perf_counter is used the check the performace
"""
def __init__(self, list_, time_tag, arr_time, perf_counter):
super(Synchrophasor, self).__init__(list_)
self.time = time_tag
self.arr_time = arr_time
self.perf_counter = perf_counter
|
Put Synchrophasor in a seperate file
|
Put Synchrophasor in a seperate file
|
Python
|
mit
|
sonusz/PhasorToolBox
|
Put Synchrophasor in a seperate file
|
from collections import UserList
class Synchrophasor(UserList):
"""
time_tag is the time tag of the synchrophasor message
arr_time is the unix time that the last data frame for the synchrophasor received
perf_counter is used the check the performace
"""
def __init__(self, list_, time_tag, arr_time, perf_counter):
super(Synchrophasor, self).__init__(list_)
self.time = time_tag
self.arr_time = arr_time
self.perf_counter = perf_counter
|
<commit_before><commit_msg>Put Synchrophasor in a seperate file<commit_after>
|
from collections import UserList
class Synchrophasor(UserList):
"""
time_tag is the time tag of the synchrophasor message
arr_time is the unix time that the last data frame for the synchrophasor received
perf_counter is used the check the performace
"""
def __init__(self, list_, time_tag, arr_time, perf_counter):
super(Synchrophasor, self).__init__(list_)
self.time = time_tag
self.arr_time = arr_time
self.perf_counter = perf_counter
|
Put Synchrophasor in a seperate filefrom collections import UserList
class Synchrophasor(UserList):
"""
time_tag is the time tag of the synchrophasor message
arr_time is the unix time that the last data frame for the synchrophasor received
perf_counter is used the check the performace
"""
def __init__(self, list_, time_tag, arr_time, perf_counter):
super(Synchrophasor, self).__init__(list_)
self.time = time_tag
self.arr_time = arr_time
self.perf_counter = perf_counter
|
<commit_before><commit_msg>Put Synchrophasor in a seperate file<commit_after>from collections import UserList
class Synchrophasor(UserList):
"""
time_tag is the time tag of the synchrophasor message
arr_time is the unix time that the last data frame for the synchrophasor received
perf_counter is used the check the performace
"""
def __init__(self, list_, time_tag, arr_time, perf_counter):
super(Synchrophasor, self).__init__(list_)
self.time = time_tag
self.arr_time = arr_time
self.perf_counter = perf_counter
|
|
03f1f9558b717cd2d6b08db609eb8bd706ad641e
|
griddedspectra.py
|
griddedspectra.py
|
# -*- coding: utf-8 -*-
"""Class to create spectra spaced on a regular grid through the box"""
import numpy as np
import hdfsim
import spectra
class GriddedSpectra(spectra.Spectra):
"""Generate metal line spectra from simulation snapshot. Default parameters are BOSS DR9"""
def __init__(self,num, base, nspec=200, res = 90., cdir = None, savefile="gridded_spectra.hdf5", savedir=None):
#Load halos to push lines through them
f = hdfsim.get_file(num, base, 0)
self.box = f["Header"].attrs["BoxSize"]
f.close()
self.NumLos = nspec*nspec
#All through y axis
axis = np.ones(self.NumLos)
#Sightlines at random positions
#Re-seed for repeatability
np.random.seed(23)
cofm = self.get_cofm()
spectra.Spectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir,reload_file=True)
def get_cofm(self, num = None):
"""Find a bunch more sightlines: should be overriden by child classes"""
if num is None:
num = np.sqrt(self.NumLos)
cofm = np.empty([num*num, 3])
for nn in range(num):
for mm in range(num):
cofm[nn*num+mm] = self.box*np.array([nn, nn, mm])/(1.*num)
return cofm
|
Add script for generating spectra on a grid
|
Add script for generating spectra on a grid
|
Python
|
mit
|
sbird/fake_spectra,sbird/fake_spectra,sbird/fake_spectra
|
Add script for generating spectra on a grid
|
# -*- coding: utf-8 -*-
"""Class to create spectra spaced on a regular grid through the box"""
import numpy as np
import hdfsim
import spectra
class GriddedSpectra(spectra.Spectra):
"""Generate metal line spectra from simulation snapshot. Default parameters are BOSS DR9"""
def __init__(self,num, base, nspec=200, res = 90., cdir = None, savefile="gridded_spectra.hdf5", savedir=None):
#Load halos to push lines through them
f = hdfsim.get_file(num, base, 0)
self.box = f["Header"].attrs["BoxSize"]
f.close()
self.NumLos = nspec*nspec
#All through y axis
axis = np.ones(self.NumLos)
#Sightlines at random positions
#Re-seed for repeatability
np.random.seed(23)
cofm = self.get_cofm()
spectra.Spectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir,reload_file=True)
def get_cofm(self, num = None):
"""Find a bunch more sightlines: should be overriden by child classes"""
if num is None:
num = np.sqrt(self.NumLos)
cofm = np.empty([num*num, 3])
for nn in range(num):
for mm in range(num):
cofm[nn*num+mm] = self.box*np.array([nn, nn, mm])/(1.*num)
return cofm
|
<commit_before><commit_msg>Add script for generating spectra on a grid<commit_after>
|
# -*- coding: utf-8 -*-
"""Class to create spectra spaced on a regular grid through the box"""
import numpy as np
import hdfsim
import spectra
class GriddedSpectra(spectra.Spectra):
"""Generate metal line spectra from simulation snapshot. Default parameters are BOSS DR9"""
def __init__(self,num, base, nspec=200, res = 90., cdir = None, savefile="gridded_spectra.hdf5", savedir=None):
#Load halos to push lines through them
f = hdfsim.get_file(num, base, 0)
self.box = f["Header"].attrs["BoxSize"]
f.close()
self.NumLos = nspec*nspec
#All through y axis
axis = np.ones(self.NumLos)
#Sightlines at random positions
#Re-seed for repeatability
np.random.seed(23)
cofm = self.get_cofm()
spectra.Spectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir,reload_file=True)
def get_cofm(self, num = None):
"""Find a bunch more sightlines: should be overriden by child classes"""
if num is None:
num = np.sqrt(self.NumLos)
cofm = np.empty([num*num, 3])
for nn in range(num):
for mm in range(num):
cofm[nn*num+mm] = self.box*np.array([nn, nn, mm])/(1.*num)
return cofm
|
Add script for generating spectra on a grid# -*- coding: utf-8 -*-
"""Class to create spectra spaced on a regular grid through the box"""
import numpy as np
import hdfsim
import spectra
class GriddedSpectra(spectra.Spectra):
"""Generate metal line spectra from simulation snapshot. Default parameters are BOSS DR9"""
def __init__(self,num, base, nspec=200, res = 90., cdir = None, savefile="gridded_spectra.hdf5", savedir=None):
#Load halos to push lines through them
f = hdfsim.get_file(num, base, 0)
self.box = f["Header"].attrs["BoxSize"]
f.close()
self.NumLos = nspec*nspec
#All through y axis
axis = np.ones(self.NumLos)
#Sightlines at random positions
#Re-seed for repeatability
np.random.seed(23)
cofm = self.get_cofm()
spectra.Spectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir,reload_file=True)
def get_cofm(self, num = None):
"""Find a bunch more sightlines: should be overriden by child classes"""
if num is None:
num = np.sqrt(self.NumLos)
cofm = np.empty([num*num, 3])
for nn in range(num):
for mm in range(num):
cofm[nn*num+mm] = self.box*np.array([nn, nn, mm])/(1.*num)
return cofm
|
<commit_before><commit_msg>Add script for generating spectra on a grid<commit_after># -*- coding: utf-8 -*-
"""Class to create spectra spaced on a regular grid through the box"""
import numpy as np
import hdfsim
import spectra
class GriddedSpectra(spectra.Spectra):
"""Generate metal line spectra from simulation snapshot. Default parameters are BOSS DR9"""
def __init__(self,num, base, nspec=200, res = 90., cdir = None, savefile="gridded_spectra.hdf5", savedir=None):
#Load halos to push lines through them
f = hdfsim.get_file(num, base, 0)
self.box = f["Header"].attrs["BoxSize"]
f.close()
self.NumLos = nspec*nspec
#All through y axis
axis = np.ones(self.NumLos)
#Sightlines at random positions
#Re-seed for repeatability
np.random.seed(23)
cofm = self.get_cofm()
spectra.Spectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir,reload_file=True)
def get_cofm(self, num = None):
"""Find a bunch more sightlines: should be overriden by child classes"""
if num is None:
num = np.sqrt(self.NumLos)
cofm = np.empty([num*num, 3])
for nn in range(num):
for mm in range(num):
cofm[nn*num+mm] = self.box*np.array([nn, nn, mm])/(1.*num)
return cofm
|
|
4071277c8fe920e19293e46ac1fa937008418616
|
kqml/cl_json.py
|
kqml/cl_json.py
|
from .kqml_string import KQMLString
from .kqml_list import KQMLList
from .kqml_token import KQMLToken
from .kqml_exceptions import KQMLException
def parse_json(json_obj):
if isinstance(json_obj, list):
ret = KQMLList()
for elem in json_obj:
ret.append(parse_json(elem))
elif isinstance(json_obj, dict):
ret = KQMLList()
for key, val in json_obj.items():
ret.set(key, parse_json(val))
elif isinstance(json_obj, str):
ret = KQMLString(json_obj)
elif isinstance(json_obj, bool):
if json_obj:
ret = KQMLToken('T')
else:
ret = KQMLToken('NIL')
elif isinstance(json_obj, int) or isinstance(json_obj, float):
ret = KQMLString(str(json_obj))
elif json_obj is None:
return KQMLToken('NIL')
else:
raise KQMLException("Unexpected value %s of type %s."
% (json_obj, type(json_obj)))
return ret
|
Write function to parse cl-json.
|
Write function to parse cl-json.
|
Python
|
bsd-2-clause
|
bgyori/pykqml
|
Write function to parse cl-json.
|
from .kqml_string import KQMLString
from .kqml_list import KQMLList
from .kqml_token import KQMLToken
from .kqml_exceptions import KQMLException
def parse_json(json_obj):
if isinstance(json_obj, list):
ret = KQMLList()
for elem in json_obj:
ret.append(parse_json(elem))
elif isinstance(json_obj, dict):
ret = KQMLList()
for key, val in json_obj.items():
ret.set(key, parse_json(val))
elif isinstance(json_obj, str):
ret = KQMLString(json_obj)
elif isinstance(json_obj, bool):
if json_obj:
ret = KQMLToken('T')
else:
ret = KQMLToken('NIL')
elif isinstance(json_obj, int) or isinstance(json_obj, float):
ret = KQMLString(str(json_obj))
elif json_obj is None:
return KQMLToken('NIL')
else:
raise KQMLException("Unexpected value %s of type %s."
% (json_obj, type(json_obj)))
return ret
|
<commit_before><commit_msg>Write function to parse cl-json.<commit_after>
|
from .kqml_string import KQMLString
from .kqml_list import KQMLList
from .kqml_token import KQMLToken
from .kqml_exceptions import KQMLException
def parse_json(json_obj):
if isinstance(json_obj, list):
ret = KQMLList()
for elem in json_obj:
ret.append(parse_json(elem))
elif isinstance(json_obj, dict):
ret = KQMLList()
for key, val in json_obj.items():
ret.set(key, parse_json(val))
elif isinstance(json_obj, str):
ret = KQMLString(json_obj)
elif isinstance(json_obj, bool):
if json_obj:
ret = KQMLToken('T')
else:
ret = KQMLToken('NIL')
elif isinstance(json_obj, int) or isinstance(json_obj, float):
ret = KQMLString(str(json_obj))
elif json_obj is None:
return KQMLToken('NIL')
else:
raise KQMLException("Unexpected value %s of type %s."
% (json_obj, type(json_obj)))
return ret
|
Write function to parse cl-json.from .kqml_string import KQMLString
from .kqml_list import KQMLList
from .kqml_token import KQMLToken
from .kqml_exceptions import KQMLException
def parse_json(json_obj):
if isinstance(json_obj, list):
ret = KQMLList()
for elem in json_obj:
ret.append(parse_json(elem))
elif isinstance(json_obj, dict):
ret = KQMLList()
for key, val in json_obj.items():
ret.set(key, parse_json(val))
elif isinstance(json_obj, str):
ret = KQMLString(json_obj)
elif isinstance(json_obj, bool):
if json_obj:
ret = KQMLToken('T')
else:
ret = KQMLToken('NIL')
elif isinstance(json_obj, int) or isinstance(json_obj, float):
ret = KQMLString(str(json_obj))
elif json_obj is None:
return KQMLToken('NIL')
else:
raise KQMLException("Unexpected value %s of type %s."
% (json_obj, type(json_obj)))
return ret
|
<commit_before><commit_msg>Write function to parse cl-json.<commit_after>from .kqml_string import KQMLString
from .kqml_list import KQMLList
from .kqml_token import KQMLToken
from .kqml_exceptions import KQMLException
def parse_json(json_obj):
if isinstance(json_obj, list):
ret = KQMLList()
for elem in json_obj:
ret.append(parse_json(elem))
elif isinstance(json_obj, dict):
ret = KQMLList()
for key, val in json_obj.items():
ret.set(key, parse_json(val))
elif isinstance(json_obj, str):
ret = KQMLString(json_obj)
elif isinstance(json_obj, bool):
if json_obj:
ret = KQMLToken('T')
else:
ret = KQMLToken('NIL')
elif isinstance(json_obj, int) or isinstance(json_obj, float):
ret = KQMLString(str(json_obj))
elif json_obj is None:
return KQMLToken('NIL')
else:
raise KQMLException("Unexpected value %s of type %s."
% (json_obj, type(json_obj)))
return ret
|
|
05695f5b5f6ebac318b9ff3290d2be9834125307
|
test/unittests/util/test_platform.py
|
test/unittests/util/test_platform.py
|
from unittest import TestCase, mock
from mycroft.util import get_arch
class TestPlatform(TestCase):
@mock.patch('os.uname')
def test_get_arch(self, mock_uname):
mock_uname.return_value = ('Linux', 'Woodstock', '4.15.0-39-generic',
'Awesome test system Mark 7', 'x86_64')
self.assertEqual(get_arch(), 'x86_64')
|
Add simple test for get_platform
|
Add simple test for get_platform
|
Python
|
apache-2.0
|
MycroftAI/mycroft-core,forslund/mycroft-core,MycroftAI/mycroft-core,forslund/mycroft-core
|
Add simple test for get_platform
|
from unittest import TestCase, mock
from mycroft.util import get_arch
class TestPlatform(TestCase):
@mock.patch('os.uname')
def test_get_arch(self, mock_uname):
mock_uname.return_value = ('Linux', 'Woodstock', '4.15.0-39-generic',
'Awesome test system Mark 7', 'x86_64')
self.assertEqual(get_arch(), 'x86_64')
|
<commit_before><commit_msg>Add simple test for get_platform<commit_after>
|
from unittest import TestCase, mock
from mycroft.util import get_arch
class TestPlatform(TestCase):
@mock.patch('os.uname')
def test_get_arch(self, mock_uname):
mock_uname.return_value = ('Linux', 'Woodstock', '4.15.0-39-generic',
'Awesome test system Mark 7', 'x86_64')
self.assertEqual(get_arch(), 'x86_64')
|
Add simple test for get_platformfrom unittest import TestCase, mock
from mycroft.util import get_arch
class TestPlatform(TestCase):
@mock.patch('os.uname')
def test_get_arch(self, mock_uname):
mock_uname.return_value = ('Linux', 'Woodstock', '4.15.0-39-generic',
'Awesome test system Mark 7', 'x86_64')
self.assertEqual(get_arch(), 'x86_64')
|
<commit_before><commit_msg>Add simple test for get_platform<commit_after>from unittest import TestCase, mock
from mycroft.util import get_arch
class TestPlatform(TestCase):
@mock.patch('os.uname')
def test_get_arch(self, mock_uname):
mock_uname.return_value = ('Linux', 'Woodstock', '4.15.0-39-generic',
'Awesome test system Mark 7', 'x86_64')
self.assertEqual(get_arch(), 'x86_64')
|
|
7e9bd459b13efa191fb2293e675166e21de4ec28
|
setup/create_player_seasons.py
|
setup/create_player_seasons.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import concurrent.futures
from db.common import session_scope
from db.player import Player
from utils.player_data_retriever import PlayerDataRetriever
def create_player_seasons(simulation=False):
data_retriever = PlayerDataRetriever()
with session_scope() as session:
players = session.query(Player).all()[:25]
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as threads:
future_tasks = {
threads.submit(
data_retriever.retrieve_player_seasons,
player.player_id, simulation
): player for player in players
}
for future in concurrent.futures.as_completed(future_tasks):
try:
plr_seasons = future.result()
print(len(plr_seasons))
except Exception as e:
print("Concurrent task generated an exception: %s" % e)
|
Add utility script for mass season data retrieval
|
Add utility script for mass season data retrieval
|
Python
|
mit
|
leaffan/pynhldb
|
Add utility script for mass season data retrieval
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import concurrent.futures
from db.common import session_scope
from db.player import Player
from utils.player_data_retriever import PlayerDataRetriever
def create_player_seasons(simulation=False):
data_retriever = PlayerDataRetriever()
with session_scope() as session:
players = session.query(Player).all()[:25]
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as threads:
future_tasks = {
threads.submit(
data_retriever.retrieve_player_seasons,
player.player_id, simulation
): player for player in players
}
for future in concurrent.futures.as_completed(future_tasks):
try:
plr_seasons = future.result()
print(len(plr_seasons))
except Exception as e:
print("Concurrent task generated an exception: %s" % e)
|
<commit_before><commit_msg>Add utility script for mass season data retrieval<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import concurrent.futures
from db.common import session_scope
from db.player import Player
from utils.player_data_retriever import PlayerDataRetriever
def create_player_seasons(simulation=False):
data_retriever = PlayerDataRetriever()
with session_scope() as session:
players = session.query(Player).all()[:25]
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as threads:
future_tasks = {
threads.submit(
data_retriever.retrieve_player_seasons,
player.player_id, simulation
): player for player in players
}
for future in concurrent.futures.as_completed(future_tasks):
try:
plr_seasons = future.result()
print(len(plr_seasons))
except Exception as e:
print("Concurrent task generated an exception: %s" % e)
|
Add utility script for mass season data retrieval#!/usr/bin/env python
# -*- coding: utf-8 -*-
import concurrent.futures
from db.common import session_scope
from db.player import Player
from utils.player_data_retriever import PlayerDataRetriever
def create_player_seasons(simulation=False):
data_retriever = PlayerDataRetriever()
with session_scope() as session:
players = session.query(Player).all()[:25]
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as threads:
future_tasks = {
threads.submit(
data_retriever.retrieve_player_seasons,
player.player_id, simulation
): player for player in players
}
for future in concurrent.futures.as_completed(future_tasks):
try:
plr_seasons = future.result()
print(len(plr_seasons))
except Exception as e:
print("Concurrent task generated an exception: %s" % e)
|
<commit_before><commit_msg>Add utility script for mass season data retrieval<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import concurrent.futures
from db.common import session_scope
from db.player import Player
from utils.player_data_retriever import PlayerDataRetriever
def create_player_seasons(simulation=False):
data_retriever = PlayerDataRetriever()
with session_scope() as session:
players = session.query(Player).all()[:25]
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as threads:
future_tasks = {
threads.submit(
data_retriever.retrieve_player_seasons,
player.player_id, simulation
): player for player in players
}
for future in concurrent.futures.as_completed(future_tasks):
try:
plr_seasons = future.result()
print(len(plr_seasons))
except Exception as e:
print("Concurrent task generated an exception: %s" % e)
|
|
540a13e49d5f08fc030c273c4b24b62feab117b4
|
crawler/management/commands/classify.py
|
crawler/management/commands/classify.py
|
from django.core.management.base import BaseCommand
from crawler.models import App
from crawler.tasks import AppClassifier
class Command(BaseCommand):
help = 'Process similar csv'
def handle(self, *args, **options):
classifier = AppClassifier()
apps = App.objects.all()[:100]
classifier.create_utility_matrix(apps)
|
Create a Custom Command to use the classifier
|
Create a Custom Command to use the classifier
|
Python
|
apache-2.0
|
bkosawa/admin-recommendation
|
Create a Custom Command to use the classifier
|
from django.core.management.base import BaseCommand
from crawler.models import App
from crawler.tasks import AppClassifier
class Command(BaseCommand):
help = 'Process similar csv'
def handle(self, *args, **options):
classifier = AppClassifier()
apps = App.objects.all()[:100]
classifier.create_utility_matrix(apps)
|
<commit_before><commit_msg>Create a Custom Command to use the classifier<commit_after>
|
from django.core.management.base import BaseCommand
from crawler.models import App
from crawler.tasks import AppClassifier
class Command(BaseCommand):
help = 'Process similar csv'
def handle(self, *args, **options):
classifier = AppClassifier()
apps = App.objects.all()[:100]
classifier.create_utility_matrix(apps)
|
Create a Custom Command to use the classifierfrom django.core.management.base import BaseCommand
from crawler.models import App
from crawler.tasks import AppClassifier
class Command(BaseCommand):
help = 'Process similar csv'
def handle(self, *args, **options):
classifier = AppClassifier()
apps = App.objects.all()[:100]
classifier.create_utility_matrix(apps)
|
<commit_before><commit_msg>Create a Custom Command to use the classifier<commit_after>from django.core.management.base import BaseCommand
from crawler.models import App
from crawler.tasks import AppClassifier
class Command(BaseCommand):
help = 'Process similar csv'
def handle(self, *args, **options):
classifier = AppClassifier()
apps = App.objects.all()[:100]
classifier.create_utility_matrix(apps)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.