commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3e51c57a8611a8ebfb4f2eb045510c50587bd781
|
api/radar_api/tests/test_users.py
|
api/radar_api/tests/test_users.py
|
import json
from radar_api.tests.fixtures import get_user
def test_serialization(app):
admin = get_user('admin')
client = app.test_client()
client.login(admin)
response = client.get('/users')
assert response.status_code == 200
data = json.loads(response.data)
for user in data['data']:
assert 'username' in user
assert 'password_hash' not in user
assert 'reset_password_token' not in user
|
Test password tokens not in response
|
Test password tokens not in response
|
Python
|
agpl-3.0
|
renalreg/radar,renalreg/radar,renalreg/radar,renalreg/radar
|
Test password tokens not in response
|
import json
from radar_api.tests.fixtures import get_user
def test_serialization(app):
admin = get_user('admin')
client = app.test_client()
client.login(admin)
response = client.get('/users')
assert response.status_code == 200
data = json.loads(response.data)
for user in data['data']:
assert 'username' in user
assert 'password_hash' not in user
assert 'reset_password_token' not in user
|
<commit_before><commit_msg>Test password tokens not in response<commit_after>
|
import json
from radar_api.tests.fixtures import get_user
def test_serialization(app):
admin = get_user('admin')
client = app.test_client()
client.login(admin)
response = client.get('/users')
assert response.status_code == 200
data = json.loads(response.data)
for user in data['data']:
assert 'username' in user
assert 'password_hash' not in user
assert 'reset_password_token' not in user
|
Test password tokens not in responseimport json
from radar_api.tests.fixtures import get_user
def test_serialization(app):
admin = get_user('admin')
client = app.test_client()
client.login(admin)
response = client.get('/users')
assert response.status_code == 200
data = json.loads(response.data)
for user in data['data']:
assert 'username' in user
assert 'password_hash' not in user
assert 'reset_password_token' not in user
|
<commit_before><commit_msg>Test password tokens not in response<commit_after>import json
from radar_api.tests.fixtures import get_user
def test_serialization(app):
admin = get_user('admin')
client = app.test_client()
client.login(admin)
response = client.get('/users')
assert response.status_code == 200
data = json.loads(response.data)
for user in data['data']:
assert 'username' in user
assert 'password_hash' not in user
assert 'reset_password_token' not in user
|
|
dfe3f7fd7775ce13a670e1d27beddba5c1254a4a
|
hyper/http20/hpack_structures.py
|
hyper/http20/hpack_structures.py
|
# -*- coding: utf-8 -*-
"""
hyper/http20/hpack_structures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains data structures used in hyper's HPACK implementation.
"""
class Reference(object):
"""
The reference object is essentially an object that 'points to' another
object, not unlike a pointer in C or similar languages. This object is
distinct from the normal Python name because we can tell the difference
between a reference and the 'actual' object.
It behaves in the following ways:
- Two references to the same object evaluate equal.
- Two references to different objects evaluate not equal, even if those
objects themselves evaluate equal.
- Two references to the same object hash to the same value.
- Two references to different objects hash to different values.
The reference is distinct from and does not use weak references. A
reference may never point at an object that has been garbage collected.
This means that, to ensure objects get GC'd, any reference to them must
also go out of scope.
This object is _conceptually_ immutable, but the implementation doesn't
attempt to enforce that to avoid the overhead. Be warned that changing
the object being referenced after creation could lead to all sorts of weird
nonsense.
:param obj: The object being referenced.
"""
def __init__(self, obj):
self.obj = obj
def __eq__(self, other):
return (isinstance(other, Reference) and self.obj is other.obj)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(self.obj)
|
Define the HPACK reference structure.
|
Define the HPACK reference structure.
|
Python
|
mit
|
irvind/hyper,fredthomsen/hyper,lawnmowerlatte/hyper,masaori335/hyper,fredthomsen/hyper,masaori335/hyper,plucury/hyper,Lukasa/hyper,irvind/hyper,jdecuyper/hyper,Lukasa/hyper,jdecuyper/hyper,lawnmowerlatte/hyper,plucury/hyper
|
Define the HPACK reference structure.
|
# -*- coding: utf-8 -*-
"""
hyper/http20/hpack_structures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains data structures used in hyper's HPACK implementation.
"""
class Reference(object):
"""
The reference object is essentially an object that 'points to' another
object, not unlike a pointer in C or similar languages. This object is
distinct from the normal Python name because we can tell the difference
between a reference and the 'actual' object.
It behaves in the following ways:
- Two references to the same object evaluate equal.
- Two references to different objects evaluate not equal, even if those
objects themselves evaluate equal.
- Two references to the same object hash to the same value.
- Two references to different objects hash to different values.
The reference is distinct from and does not use weak references. A
reference may never point at an object that has been garbage collected.
This means that, to ensure objects get GC'd, any reference to them must
also go out of scope.
This object is _conceptually_ immutable, but the implementation doesn't
attempt to enforce that to avoid the overhead. Be warned that changing
the object being referenced after creation could lead to all sorts of weird
nonsense.
:param obj: The object being referenced.
"""
def __init__(self, obj):
self.obj = obj
def __eq__(self, other):
return (isinstance(other, Reference) and self.obj is other.obj)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(self.obj)
|
<commit_before><commit_msg>Define the HPACK reference structure.<commit_after>
|
# -*- coding: utf-8 -*-
"""
hyper/http20/hpack_structures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains data structures used in hyper's HPACK implementation.
"""
class Reference(object):
"""
The reference object is essentially an object that 'points to' another
object, not unlike a pointer in C or similar languages. This object is
distinct from the normal Python name because we can tell the difference
between a reference and the 'actual' object.
It behaves in the following ways:
- Two references to the same object evaluate equal.
- Two references to different objects evaluate not equal, even if those
objects themselves evaluate equal.
- Two references to the same object hash to the same value.
- Two references to different objects hash to different values.
The reference is distinct from and does not use weak references. A
reference may never point at an object that has been garbage collected.
This means that, to ensure objects get GC'd, any reference to them must
also go out of scope.
This object is _conceptually_ immutable, but the implementation doesn't
attempt to enforce that to avoid the overhead. Be warned that changing
the object being referenced after creation could lead to all sorts of weird
nonsense.
:param obj: The object being referenced.
"""
def __init__(self, obj):
self.obj = obj
def __eq__(self, other):
return (isinstance(other, Reference) and self.obj is other.obj)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(self.obj)
|
Define the HPACK reference structure.# -*- coding: utf-8 -*-
"""
hyper/http20/hpack_structures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains data structures used in hyper's HPACK implementation.
"""
class Reference(object):
"""
The reference object is essentially an object that 'points to' another
object, not unlike a pointer in C or similar languages. This object is
distinct from the normal Python name because we can tell the difference
between a reference and the 'actual' object.
It behaves in the following ways:
- Two references to the same object evaluate equal.
- Two references to different objects evaluate not equal, even if those
objects themselves evaluate equal.
- Two references to the same object hash to the same value.
- Two references to different objects hash to different values.
The reference is distinct from and does not use weak references. A
reference may never point at an object that has been garbage collected.
This means that, to ensure objects get GC'd, any reference to them must
also go out of scope.
This object is _conceptually_ immutable, but the implementation doesn't
attempt to enforce that to avoid the overhead. Be warned that changing
the object being referenced after creation could lead to all sorts of weird
nonsense.
:param obj: The object being referenced.
"""
def __init__(self, obj):
self.obj = obj
def __eq__(self, other):
return (isinstance(other, Reference) and self.obj is other.obj)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(self.obj)
|
<commit_before><commit_msg>Define the HPACK reference structure.<commit_after># -*- coding: utf-8 -*-
"""
hyper/http20/hpack_structures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains data structures used in hyper's HPACK implementation.
"""
class Reference(object):
"""
The reference object is essentially an object that 'points to' another
object, not unlike a pointer in C or similar languages. This object is
distinct from the normal Python name because we can tell the difference
between a reference and the 'actual' object.
It behaves in the following ways:
- Two references to the same object evaluate equal.
- Two references to different objects evaluate not equal, even if those
objects themselves evaluate equal.
- Two references to the same object hash to the same value.
- Two references to different objects hash to different values.
The reference is distinct from and does not use weak references. A
reference may never point at an object that has been garbage collected.
This means that, to ensure objects get GC'd, any reference to them must
also go out of scope.
This object is _conceptually_ immutable, but the implementation doesn't
attempt to enforce that to avoid the overhead. Be warned that changing
the object being referenced after creation could lead to all sorts of weird
nonsense.
:param obj: The object being referenced.
"""
def __init__(self, obj):
self.obj = obj
def __eq__(self, other):
return (isinstance(other, Reference) and self.obj is other.obj)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(self.obj)
|
|
78aaccb71fc64e52497abf0d0c768f3767a3d932
|
fellowms/migrations/0020_auto_20160602_1607.py
|
fellowms/migrations/0020_auto_20160602_1607.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-02 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0019_auto_20160601_1512'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='status',
field=models.CharField(choices=[('W', 'Not submitted yet'), ('S', 'Submitted (but not processed yet)'), ('P', 'Processing'), ('A', 'Approved (waiting reply from finances)'), ('F', 'Finished')], default='P', max_length=1),
),
]
|
Update expenses status on database
|
Update expenses status on database
|
Python
|
bsd-3-clause
|
softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat
|
Update expenses status on database
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-02 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0019_auto_20160601_1512'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='status',
field=models.CharField(choices=[('W', 'Not submitted yet'), ('S', 'Submitted (but not processed yet)'), ('P', 'Processing'), ('A', 'Approved (waiting reply from finances)'), ('F', 'Finished')], default='P', max_length=1),
),
]
|
<commit_before><commit_msg>Update expenses status on database<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-02 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0019_auto_20160601_1512'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='status',
field=models.CharField(choices=[('W', 'Not submitted yet'), ('S', 'Submitted (but not processed yet)'), ('P', 'Processing'), ('A', 'Approved (waiting reply from finances)'), ('F', 'Finished')], default='P', max_length=1),
),
]
|
Update expenses status on database# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-02 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0019_auto_20160601_1512'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='status',
field=models.CharField(choices=[('W', 'Not submitted yet'), ('S', 'Submitted (but not processed yet)'), ('P', 'Processing'), ('A', 'Approved (waiting reply from finances)'), ('F', 'Finished')], default='P', max_length=1),
),
]
|
<commit_before><commit_msg>Update expenses status on database<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-02 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0019_auto_20160601_1512'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='status',
field=models.CharField(choices=[('W', 'Not submitted yet'), ('S', 'Submitted (but not processed yet)'), ('P', 'Processing'), ('A', 'Approved (waiting reply from finances)'), ('F', 'Finished')], default='P', max_length=1),
),
]
|
|
9c70a5d65b1c06f62751dfb4fcdd4d6a60a5eb71
|
kivy/tests/test_widget_walk.py
|
kivy/tests/test_widget_walk.py
|
import unittest
class FileWidgetWalk(unittest.TestCase):
def test_walk_large_tree(self):
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.widget import walk, walk_reverse
''' the tree
BoxLayout
BoxLayout
Label
10 labels
BoxLayout
10 labels
BoxLayout
Label
Label
'''
root = BoxLayout()
tree = [root]
box = BoxLayout()
tree.append(box)
root.add_widget(box)
label = Label()
tree.append(label)
root.add_widget(label)
for i in range(10):
tree.append(Label())
label.add_widget(tree[-1])
box = BoxLayout()
tree.append(box)
root.add_widget(box)
for i in range(10):
tree.append(Label())
box.add_widget(tree[-1])
box = BoxLayout()
tree.append(box)
root.add_widget(box)
tree.append(Label())
box.add_widget(tree[-1])
label = Label()
tree.append(label)
root.add_widget(label)
def rotate(l, n):
return l[n:] + l[:n]
for i in range(len(tree)):
rotated = rotate(tree, i) # shift list to start at i
walked = [n for n in walk(tree[i])] # walk starting with i
walked_reversed = [n for n in walk_reverse(tree[i])]
self.assertListEqual(rotated, walked)
self.assertListEqual(walked, list(reversed(walked_reversed)))
def test_walk_single(self):
from kivy.uix.label import Label
from kivy.uix.widget import walk, walk_reverse
label = Label()
self.assertListEqual([n for n in walk(label)], [label])
self.assertListEqual([n for n in walk_reverse(label)], [label])
|
Add unit tests for testing the widget tree iterators.
|
Add unit tests for testing the widget tree iterators.
|
Python
|
mit
|
eHealthAfrica/kivy,xpndlabs/kivy,vipulroxx/kivy,rafalo1333/kivy,CuriousLearner/kivy,Farkal/kivy,rafalo1333/kivy,Cheaterman/kivy,bionoid/kivy,bliz937/kivy,vipulroxx/kivy,habibmasuro/kivy,xiaoyanit/kivy,dirkjot/kivy,Farkal/kivy,rafalo1333/kivy,manashmndl/kivy,KeyWeeUsr/kivy,darkopevec/kivy,viralpandey/kivy,matham/kivy,cbenhagen/kivy,jegger/kivy,matham/kivy,JohnHowland/kivy,jehutting/kivy,edubrunaldi/kivy,aron-bordin/kivy,LogicalDash/kivy,matham/kivy,LogicalDash/kivy,andnovar/kivy,Farkal/kivy,kivy/kivy,iamutkarshtiwari/kivy,manashmndl/kivy,akshayaurora/kivy,xiaoyanit/kivy,arcticshores/kivy,viralpandey/kivy,autosportlabs/kivy,bhargav2408/kivy,jegger/kivy,bionoid/kivy,adamkh/kivy,adamkh/kivy,Shyam10/kivy,viralpandey/kivy,inclement/kivy,darkopevec/kivy,KeyWeeUsr/kivy,gonzafirewall/kivy,denys-duchier/kivy,rnixx/kivy,aron-bordin/kivy,Ramalus/kivy,bob-the-hamster/kivy,yoelk/kivy,edubrunaldi/kivy,thezawad/kivy,akshayaurora/kivy,vipulroxx/kivy,gonzafirewall/kivy,JohnHowland/kivy,ehealthafrica-ci/kivy,LogicalDash/kivy,bob-the-hamster/kivy,iamutkarshtiwari/kivy,manthansharma/kivy,vitorio/kivy,xiaoyanit/kivy,Cheaterman/kivy,ehealthafrica-ci/kivy,ehealthafrica-ci/kivy,ehealthafrica-ci/kivy,jffernandez/kivy,thezawad/kivy,Farkal/kivy,VinGarcia/kivy,youprofit/kivy,xpndlabs/kivy,matham/kivy,jkankiewicz/kivy,niavlys/kivy,Cheaterman/kivy,bob-the-hamster/kivy,vitorio/kivy,ernstp/kivy,niavlys/kivy,Ramalus/kivy,cbenhagen/kivy,kived/kivy,bhargav2408/kivy,ernstp/kivy,Shyam10/kivy,JohnHowland/kivy,autosportlabs/kivy,habibmasuro/kivy,rnixx/kivy,MiyamotoAkira/kivy,bionoid/kivy,ernstp/kivy,bliz937/kivy,janssen/kivy,Shyam10/kivy,yoelk/kivy,adamkh/kivy,inclement/kivy,bhargav2408/kivy,mSenyor/kivy,Ramalus/kivy,darkopevec/kivy,gonzafirewall/kivy,jffernandez/kivy,dirkjot/kivy,CuriousLearner/kivy,jegger/kivy,MiyamotoAkira/kivy,yoelk/kivy,Shyam10/kivy,autosportlabs/kivy,kived/kivy,jkankiewicz/kivy,arcticshores/kivy,tony/kivy,VinGarcia/kivy,denys-duchier/kivy,inclement/kivy,angryrancor/kivy,angryrancor/kivy,el-ethan/kivy,bionoid/kivy,CuriousLearner/kivy,youprofit/kivy,youprofit/kivy,janssen/kivy,angryrancor/kivy,ernstp/kivy,el-ethan/kivy,mSenyor/kivy,kived/kivy,vipulroxx/kivy,eHealthAfrica/kivy,JohnHowland/kivy,bob-the-hamster/kivy,dirkjot/kivy,eHealthAfrica/kivy,yoelk/kivy,tony/kivy,arlowhite/kivy,denys-duchier/kivy,KeyWeeUsr/kivy,arlowhite/kivy,niavlys/kivy,VinGarcia/kivy,KeyWeeUsr/kivy,el-ethan/kivy,andnovar/kivy,iamutkarshtiwari/kivy,jegger/kivy,vitorio/kivy,akshayaurora/kivy,jehutting/kivy,MiyamotoAkira/kivy,manthansharma/kivy,rnixx/kivy,andnovar/kivy,cbenhagen/kivy,xpndlabs/kivy,mSenyor/kivy,darkopevec/kivy,manthansharma/kivy,eHealthAfrica/kivy,arcticshores/kivy,tony/kivy,jehutting/kivy,aron-bordin/kivy,LogicalDash/kivy,edubrunaldi/kivy,janssen/kivy,manashmndl/kivy,habibmasuro/kivy,MiyamotoAkira/kivy,janssen/kivy,bliz937/kivy,thezawad/kivy,niavlys/kivy,Cheaterman/kivy,kivy/kivy,jkankiewicz/kivy,angryrancor/kivy,arcticshores/kivy,jffernandez/kivy,denys-duchier/kivy,gonzafirewall/kivy,kivy/kivy,aron-bordin/kivy,jffernandez/kivy,arlowhite/kivy,manthansharma/kivy,dirkjot/kivy,adamkh/kivy,jkankiewicz/kivy
|
Add unit tests for testing the widget tree iterators.
|
import unittest
class FileWidgetWalk(unittest.TestCase):
def test_walk_large_tree(self):
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.widget import walk, walk_reverse
''' the tree
BoxLayout
BoxLayout
Label
10 labels
BoxLayout
10 labels
BoxLayout
Label
Label
'''
root = BoxLayout()
tree = [root]
box = BoxLayout()
tree.append(box)
root.add_widget(box)
label = Label()
tree.append(label)
root.add_widget(label)
for i in range(10):
tree.append(Label())
label.add_widget(tree[-1])
box = BoxLayout()
tree.append(box)
root.add_widget(box)
for i in range(10):
tree.append(Label())
box.add_widget(tree[-1])
box = BoxLayout()
tree.append(box)
root.add_widget(box)
tree.append(Label())
box.add_widget(tree[-1])
label = Label()
tree.append(label)
root.add_widget(label)
def rotate(l, n):
return l[n:] + l[:n]
for i in range(len(tree)):
rotated = rotate(tree, i) # shift list to start at i
walked = [n for n in walk(tree[i])] # walk starting with i
walked_reversed = [n for n in walk_reverse(tree[i])]
self.assertListEqual(rotated, walked)
self.assertListEqual(walked, list(reversed(walked_reversed)))
def test_walk_single(self):
from kivy.uix.label import Label
from kivy.uix.widget import walk, walk_reverse
label = Label()
self.assertListEqual([n for n in walk(label)], [label])
self.assertListEqual([n for n in walk_reverse(label)], [label])
|
<commit_before><commit_msg>Add unit tests for testing the widget tree iterators.<commit_after>
|
import unittest
class FileWidgetWalk(unittest.TestCase):
def test_walk_large_tree(self):
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.widget import walk, walk_reverse
''' the tree
BoxLayout
BoxLayout
Label
10 labels
BoxLayout
10 labels
BoxLayout
Label
Label
'''
root = BoxLayout()
tree = [root]
box = BoxLayout()
tree.append(box)
root.add_widget(box)
label = Label()
tree.append(label)
root.add_widget(label)
for i in range(10):
tree.append(Label())
label.add_widget(tree[-1])
box = BoxLayout()
tree.append(box)
root.add_widget(box)
for i in range(10):
tree.append(Label())
box.add_widget(tree[-1])
box = BoxLayout()
tree.append(box)
root.add_widget(box)
tree.append(Label())
box.add_widget(tree[-1])
label = Label()
tree.append(label)
root.add_widget(label)
def rotate(l, n):
return l[n:] + l[:n]
for i in range(len(tree)):
rotated = rotate(tree, i) # shift list to start at i
walked = [n for n in walk(tree[i])] # walk starting with i
walked_reversed = [n for n in walk_reverse(tree[i])]
self.assertListEqual(rotated, walked)
self.assertListEqual(walked, list(reversed(walked_reversed)))
def test_walk_single(self):
from kivy.uix.label import Label
from kivy.uix.widget import walk, walk_reverse
label = Label()
self.assertListEqual([n for n in walk(label)], [label])
self.assertListEqual([n for n in walk_reverse(label)], [label])
|
Add unit tests for testing the widget tree iterators.import unittest
class FileWidgetWalk(unittest.TestCase):
def test_walk_large_tree(self):
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.widget import walk, walk_reverse
''' the tree
BoxLayout
BoxLayout
Label
10 labels
BoxLayout
10 labels
BoxLayout
Label
Label
'''
root = BoxLayout()
tree = [root]
box = BoxLayout()
tree.append(box)
root.add_widget(box)
label = Label()
tree.append(label)
root.add_widget(label)
for i in range(10):
tree.append(Label())
label.add_widget(tree[-1])
box = BoxLayout()
tree.append(box)
root.add_widget(box)
for i in range(10):
tree.append(Label())
box.add_widget(tree[-1])
box = BoxLayout()
tree.append(box)
root.add_widget(box)
tree.append(Label())
box.add_widget(tree[-1])
label = Label()
tree.append(label)
root.add_widget(label)
def rotate(l, n):
return l[n:] + l[:n]
for i in range(len(tree)):
rotated = rotate(tree, i) # shift list to start at i
walked = [n for n in walk(tree[i])] # walk starting with i
walked_reversed = [n for n in walk_reverse(tree[i])]
self.assertListEqual(rotated, walked)
self.assertListEqual(walked, list(reversed(walked_reversed)))
def test_walk_single(self):
from kivy.uix.label import Label
from kivy.uix.widget import walk, walk_reverse
label = Label()
self.assertListEqual([n for n in walk(label)], [label])
self.assertListEqual([n for n in walk_reverse(label)], [label])
|
<commit_before><commit_msg>Add unit tests for testing the widget tree iterators.<commit_after>import unittest
class FileWidgetWalk(unittest.TestCase):
def test_walk_large_tree(self):
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.widget import walk, walk_reverse
''' the tree
BoxLayout
BoxLayout
Label
10 labels
BoxLayout
10 labels
BoxLayout
Label
Label
'''
root = BoxLayout()
tree = [root]
box = BoxLayout()
tree.append(box)
root.add_widget(box)
label = Label()
tree.append(label)
root.add_widget(label)
for i in range(10):
tree.append(Label())
label.add_widget(tree[-1])
box = BoxLayout()
tree.append(box)
root.add_widget(box)
for i in range(10):
tree.append(Label())
box.add_widget(tree[-1])
box = BoxLayout()
tree.append(box)
root.add_widget(box)
tree.append(Label())
box.add_widget(tree[-1])
label = Label()
tree.append(label)
root.add_widget(label)
def rotate(l, n):
return l[n:] + l[:n]
for i in range(len(tree)):
rotated = rotate(tree, i) # shift list to start at i
walked = [n for n in walk(tree[i])] # walk starting with i
walked_reversed = [n for n in walk_reverse(tree[i])]
self.assertListEqual(rotated, walked)
self.assertListEqual(walked, list(reversed(walked_reversed)))
def test_walk_single(self):
from kivy.uix.label import Label
from kivy.uix.widget import walk, walk_reverse
label = Label()
self.assertListEqual([n for n in walk(label)], [label])
self.assertListEqual([n for n in walk_reverse(label)], [label])
|
|
3a4de870ebefd0e3e32b8c1b9facee6c98ce8b7f
|
ltk2to3.py
|
ltk2to3.py
|
import os
import shutil
import fnmatch
def get_files(patterns):
""" gets all files matching pattern from root
pattern supports any unix shell-style wildcards (not same as RE) """
cwd = os.getcwd()
if isinstance(patterns,str):
patterns = [patterns]
matched_files = []
for pattern in patterns:
path = os.path.abspath(pattern)
# print("looking at path "+str(path))
# check if pattern contains subdirectory
if os.path.exists(path):
if os.path.isdir(path):
for root, subdirs, files in os.walk(path):
split_path = root.split('/')
for file in files:
# print(os.path.join(root, file))
if fnmatch.fnmatch(file, '*.py'):
matched_files.append(os.path.join(root, file))
else:
matched_files.append(path)
else:
logger.info("File not found: "+pattern)
if len(matched_files) == 0:
return None
return matched_files
dir2 = 'python2/ltk'
files2 = get_files(dir2)
# Copy files from 2 to 3
for fpath2 in files2:
fpath3 = fpath2.replace('python2','python3')
shutil.copyfile(fpath2, fpath3)
# Comment and uncomment specified lines in Python 3 version
for fpath in files2:
fpath = fpath.replace('python2','python3')
with open(fpath, 'r+') as f:
lines = f.readlines()
f.seek(0)
f.truncate()
is_python3 = False
is_python2 = False
for line in lines:
if '# Python 3' in line:
is_python3 = True
elif is_python3:
if '# End Python 3' in line:
is_python3 = False
continue
line = line.replace('# ','')
elif '# Python 2' in line:
is_python2 = True
elif is_python2:
if '# End Python 2' in line:
is_python2 = False
continue
line = '# '+str(line)
f.write(line)
|
Convert python 2 version to python 3
|
Convert python 2 version to python 3
|
Python
|
mit
|
Lingotek/filesystem-connector,Lingotek/translation-utility,Lingotek/client,Lingotek/client,Lingotek/translation-utility,Lingotek/filesystem-connector
|
Convert python 2 version to python 3
|
import os
import shutil
import fnmatch
def get_files(patterns):
""" gets all files matching pattern from root
pattern supports any unix shell-style wildcards (not same as RE) """
cwd = os.getcwd()
if isinstance(patterns,str):
patterns = [patterns]
matched_files = []
for pattern in patterns:
path = os.path.abspath(pattern)
# print("looking at path "+str(path))
# check if pattern contains subdirectory
if os.path.exists(path):
if os.path.isdir(path):
for root, subdirs, files in os.walk(path):
split_path = root.split('/')
for file in files:
# print(os.path.join(root, file))
if fnmatch.fnmatch(file, '*.py'):
matched_files.append(os.path.join(root, file))
else:
matched_files.append(path)
else:
logger.info("File not found: "+pattern)
if len(matched_files) == 0:
return None
return matched_files
dir2 = 'python2/ltk'
files2 = get_files(dir2)
# Copy files from 2 to 3
for fpath2 in files2:
fpath3 = fpath2.replace('python2','python3')
shutil.copyfile(fpath2, fpath3)
# Comment and uncomment specified lines in Python 3 version
for fpath in files2:
fpath = fpath.replace('python2','python3')
with open(fpath, 'r+') as f:
lines = f.readlines()
f.seek(0)
f.truncate()
is_python3 = False
is_python2 = False
for line in lines:
if '# Python 3' in line:
is_python3 = True
elif is_python3:
if '# End Python 3' in line:
is_python3 = False
continue
line = line.replace('# ','')
elif '# Python 2' in line:
is_python2 = True
elif is_python2:
if '# End Python 2' in line:
is_python2 = False
continue
line = '# '+str(line)
f.write(line)
|
<commit_before><commit_msg>Convert python 2 version to python 3<commit_after>
|
import os
import shutil
import fnmatch
def get_files(patterns):
""" gets all files matching pattern from root
pattern supports any unix shell-style wildcards (not same as RE) """
cwd = os.getcwd()
if isinstance(patterns,str):
patterns = [patterns]
matched_files = []
for pattern in patterns:
path = os.path.abspath(pattern)
# print("looking at path "+str(path))
# check if pattern contains subdirectory
if os.path.exists(path):
if os.path.isdir(path):
for root, subdirs, files in os.walk(path):
split_path = root.split('/')
for file in files:
# print(os.path.join(root, file))
if fnmatch.fnmatch(file, '*.py'):
matched_files.append(os.path.join(root, file))
else:
matched_files.append(path)
else:
logger.info("File not found: "+pattern)
if len(matched_files) == 0:
return None
return matched_files
dir2 = 'python2/ltk'
files2 = get_files(dir2)
# Copy files from 2 to 3
for fpath2 in files2:
fpath3 = fpath2.replace('python2','python3')
shutil.copyfile(fpath2, fpath3)
# Comment and uncomment specified lines in Python 3 version
for fpath in files2:
fpath = fpath.replace('python2','python3')
with open(fpath, 'r+') as f:
lines = f.readlines()
f.seek(0)
f.truncate()
is_python3 = False
is_python2 = False
for line in lines:
if '# Python 3' in line:
is_python3 = True
elif is_python3:
if '# End Python 3' in line:
is_python3 = False
continue
line = line.replace('# ','')
elif '# Python 2' in line:
is_python2 = True
elif is_python2:
if '# End Python 2' in line:
is_python2 = False
continue
line = '# '+str(line)
f.write(line)
|
Convert python 2 version to python 3import os
import shutil
import fnmatch
def get_files(patterns):
""" gets all files matching pattern from root
pattern supports any unix shell-style wildcards (not same as RE) """
cwd = os.getcwd()
if isinstance(patterns,str):
patterns = [patterns]
matched_files = []
for pattern in patterns:
path = os.path.abspath(pattern)
# print("looking at path "+str(path))
# check if pattern contains subdirectory
if os.path.exists(path):
if os.path.isdir(path):
for root, subdirs, files in os.walk(path):
split_path = root.split('/')
for file in files:
# print(os.path.join(root, file))
if fnmatch.fnmatch(file, '*.py'):
matched_files.append(os.path.join(root, file))
else:
matched_files.append(path)
else:
logger.info("File not found: "+pattern)
if len(matched_files) == 0:
return None
return matched_files
dir2 = 'python2/ltk'
files2 = get_files(dir2)
# Copy files from 2 to 3
for fpath2 in files2:
fpath3 = fpath2.replace('python2','python3')
shutil.copyfile(fpath2, fpath3)
# Comment and uncomment specified lines in Python 3 version
for fpath in files2:
fpath = fpath.replace('python2','python3')
with open(fpath, 'r+') as f:
lines = f.readlines()
f.seek(0)
f.truncate()
is_python3 = False
is_python2 = False
for line in lines:
if '# Python 3' in line:
is_python3 = True
elif is_python3:
if '# End Python 3' in line:
is_python3 = False
continue
line = line.replace('# ','')
elif '# Python 2' in line:
is_python2 = True
elif is_python2:
if '# End Python 2' in line:
is_python2 = False
continue
line = '# '+str(line)
f.write(line)
|
<commit_before><commit_msg>Convert python 2 version to python 3<commit_after>import os
import shutil
import fnmatch
def get_files(patterns):
""" gets all files matching pattern from root
pattern supports any unix shell-style wildcards (not same as RE) """
cwd = os.getcwd()
if isinstance(patterns,str):
patterns = [patterns]
matched_files = []
for pattern in patterns:
path = os.path.abspath(pattern)
# print("looking at path "+str(path))
# check if pattern contains subdirectory
if os.path.exists(path):
if os.path.isdir(path):
for root, subdirs, files in os.walk(path):
split_path = root.split('/')
for file in files:
# print(os.path.join(root, file))
if fnmatch.fnmatch(file, '*.py'):
matched_files.append(os.path.join(root, file))
else:
matched_files.append(path)
else:
logger.info("File not found: "+pattern)
if len(matched_files) == 0:
return None
return matched_files
dir2 = 'python2/ltk'
files2 = get_files(dir2)
# Copy files from 2 to 3
for fpath2 in files2:
fpath3 = fpath2.replace('python2','python3')
shutil.copyfile(fpath2, fpath3)
# Comment and uncomment specified lines in Python 3 version
for fpath in files2:
fpath = fpath.replace('python2','python3')
with open(fpath, 'r+') as f:
lines = f.readlines()
f.seek(0)
f.truncate()
is_python3 = False
is_python2 = False
for line in lines:
if '# Python 3' in line:
is_python3 = True
elif is_python3:
if '# End Python 3' in line:
is_python3 = False
continue
line = line.replace('# ','')
elif '# Python 2' in line:
is_python2 = True
elif is_python2:
if '# End Python 2' in line:
is_python2 = False
continue
line = '# '+str(line)
f.write(line)
|
|
fa1223c661d60033b7d7aba2a27151d6ee18a299
|
tests/ci_checks/test_circle.py
|
tests/ci_checks/test_circle.py
|
import pytest
from semantic_release import ci_checks
from semantic_release.errors import CiVerificationError
def test_circle_should_pass_if_branch_is_master_and_no_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'master')
monkeypatch.setenv('CI_PULL_REQUEST', '')
assert ci_checks.circle('master')
def test_circle_should_pass_if_branch_is_correct_and_no_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', '')
assert ci_checks.circle('other-branch')
def test_circle_should_raise_ci_verification_error_for_wrong_branch(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', '')
with pytest.raises(CiVerificationError):
ci_checks.circle('master')
def test_circle_should_raise_ci_verification_error_for_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', 'http://the-url-of-the-pr')
with pytest.raises(CiVerificationError):
ci_checks.circle('master')
|
Add tests for circle ci checks
|
test: Add tests for circle ci checks
|
Python
|
mit
|
relekang/python-semantic-release,wlonk/python-semantic-release,relekang/python-semantic-release,riddlesio/python-semantic-release
|
test: Add tests for circle ci checks
|
import pytest
from semantic_release import ci_checks
from semantic_release.errors import CiVerificationError
def test_circle_should_pass_if_branch_is_master_and_no_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'master')
monkeypatch.setenv('CI_PULL_REQUEST', '')
assert ci_checks.circle('master')
def test_circle_should_pass_if_branch_is_correct_and_no_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', '')
assert ci_checks.circle('other-branch')
def test_circle_should_raise_ci_verification_error_for_wrong_branch(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', '')
with pytest.raises(CiVerificationError):
ci_checks.circle('master')
def test_circle_should_raise_ci_verification_error_for_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', 'http://the-url-of-the-pr')
with pytest.raises(CiVerificationError):
ci_checks.circle('master')
|
<commit_before><commit_msg>test: Add tests for circle ci checks<commit_after>
|
import pytest
from semantic_release import ci_checks
from semantic_release.errors import CiVerificationError
def test_circle_should_pass_if_branch_is_master_and_no_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'master')
monkeypatch.setenv('CI_PULL_REQUEST', '')
assert ci_checks.circle('master')
def test_circle_should_pass_if_branch_is_correct_and_no_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', '')
assert ci_checks.circle('other-branch')
def test_circle_should_raise_ci_verification_error_for_wrong_branch(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', '')
with pytest.raises(CiVerificationError):
ci_checks.circle('master')
def test_circle_should_raise_ci_verification_error_for_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', 'http://the-url-of-the-pr')
with pytest.raises(CiVerificationError):
ci_checks.circle('master')
|
test: Add tests for circle ci checksimport pytest
from semantic_release import ci_checks
from semantic_release.errors import CiVerificationError
def test_circle_should_pass_if_branch_is_master_and_no_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'master')
monkeypatch.setenv('CI_PULL_REQUEST', '')
assert ci_checks.circle('master')
def test_circle_should_pass_if_branch_is_correct_and_no_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', '')
assert ci_checks.circle('other-branch')
def test_circle_should_raise_ci_verification_error_for_wrong_branch(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', '')
with pytest.raises(CiVerificationError):
ci_checks.circle('master')
def test_circle_should_raise_ci_verification_error_for_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', 'http://the-url-of-the-pr')
with pytest.raises(CiVerificationError):
ci_checks.circle('master')
|
<commit_before><commit_msg>test: Add tests for circle ci checks<commit_after>import pytest
from semantic_release import ci_checks
from semantic_release.errors import CiVerificationError
def test_circle_should_pass_if_branch_is_master_and_no_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'master')
monkeypatch.setenv('CI_PULL_REQUEST', '')
assert ci_checks.circle('master')
def test_circle_should_pass_if_branch_is_correct_and_no_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', '')
assert ci_checks.circle('other-branch')
def test_circle_should_raise_ci_verification_error_for_wrong_branch(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', '')
with pytest.raises(CiVerificationError):
ci_checks.circle('master')
def test_circle_should_raise_ci_verification_error_for_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', 'http://the-url-of-the-pr')
with pytest.raises(CiVerificationError):
ci_checks.circle('master')
|
|
f29dab9a82b44fac483d71c432a40a0bb2ca51b1
|
examples/dbus_client.py
|
examples/dbus_client.py
|
import dbus
bus = dbus.SystemBus()
# This adds a signal match so that the client gets signals sent by Blivet1's
# ObjectManager. These signals are used to notify clients of changes to the
# managed objects (for blivet, this will be devices, formats, and actions).
bus.add_match_string("type='signal',sender='com.redhat.Blivet1',path_namespace='/com/redhat/Blivet1'")
blivet = bus.get_object('com.redhat.Blivet1', '/com/redhat/Blivet1/Blivet')
blivet.Reset()
object_manager = bus.get_object('com.redhat.Blivet1', '/com/redhat/Blivet1')
objects = object_manager.GetManagedObjects()
for object_path in blivet.ListDevices():
device = objects[object_path]['com.redhat.Blivet1.Device']
print(device['Name'], device['Type'], device['Size'], device['FormatType'])
|
Add the beginnings of an example client.
|
Add the beginnings of an example client.
|
Python
|
lgpl-2.1
|
rvykydal/blivet,AdamWill/blivet,jkonecny12/blivet,vpodzime/blivet,vojtechtrefny/blivet,vpodzime/blivet,vojtechtrefny/blivet,jkonecny12/blivet,AdamWill/blivet,rvykydal/blivet
|
Add the beginnings of an example client.
|
import dbus
bus = dbus.SystemBus()
# This adds a signal match so that the client gets signals sent by Blivet1's
# ObjectManager. These signals are used to notify clients of changes to the
# managed objects (for blivet, this will be devices, formats, and actions).
bus.add_match_string("type='signal',sender='com.redhat.Blivet1',path_namespace='/com/redhat/Blivet1'")
blivet = bus.get_object('com.redhat.Blivet1', '/com/redhat/Blivet1/Blivet')
blivet.Reset()
object_manager = bus.get_object('com.redhat.Blivet1', '/com/redhat/Blivet1')
objects = object_manager.GetManagedObjects()
for object_path in blivet.ListDevices():
device = objects[object_path]['com.redhat.Blivet1.Device']
print(device['Name'], device['Type'], device['Size'], device['FormatType'])
|
<commit_before><commit_msg>Add the beginnings of an example client.<commit_after>
|
import dbus
bus = dbus.SystemBus()
# This adds a signal match so that the client gets signals sent by Blivet1's
# ObjectManager. These signals are used to notify clients of changes to the
# managed objects (for blivet, this will be devices, formats, and actions).
bus.add_match_string("type='signal',sender='com.redhat.Blivet1',path_namespace='/com/redhat/Blivet1'")
blivet = bus.get_object('com.redhat.Blivet1', '/com/redhat/Blivet1/Blivet')
blivet.Reset()
object_manager = bus.get_object('com.redhat.Blivet1', '/com/redhat/Blivet1')
objects = object_manager.GetManagedObjects()
for object_path in blivet.ListDevices():
device = objects[object_path]['com.redhat.Blivet1.Device']
print(device['Name'], device['Type'], device['Size'], device['FormatType'])
|
Add the beginnings of an example client.
import dbus
bus = dbus.SystemBus()
# This adds a signal match so that the client gets signals sent by Blivet1's
# ObjectManager. These signals are used to notify clients of changes to the
# managed objects (for blivet, this will be devices, formats, and actions).
bus.add_match_string("type='signal',sender='com.redhat.Blivet1',path_namespace='/com/redhat/Blivet1'")
blivet = bus.get_object('com.redhat.Blivet1', '/com/redhat/Blivet1/Blivet')
blivet.Reset()
object_manager = bus.get_object('com.redhat.Blivet1', '/com/redhat/Blivet1')
objects = object_manager.GetManagedObjects()
for object_path in blivet.ListDevices():
device = objects[object_path]['com.redhat.Blivet1.Device']
print(device['Name'], device['Type'], device['Size'], device['FormatType'])
|
<commit_before><commit_msg>Add the beginnings of an example client.<commit_after>
import dbus
bus = dbus.SystemBus()
# This adds a signal match so that the client gets signals sent by Blivet1's
# ObjectManager. These signals are used to notify clients of changes to the
# managed objects (for blivet, this will be devices, formats, and actions).
bus.add_match_string("type='signal',sender='com.redhat.Blivet1',path_namespace='/com/redhat/Blivet1'")
blivet = bus.get_object('com.redhat.Blivet1', '/com/redhat/Blivet1/Blivet')
blivet.Reset()
object_manager = bus.get_object('com.redhat.Blivet1', '/com/redhat/Blivet1')
objects = object_manager.GetManagedObjects()
for object_path in blivet.ListDevices():
device = objects[object_path]['com.redhat.Blivet1.Device']
print(device['Name'], device['Type'], device['Size'], device['FormatType'])
|
|
ebf4d87390307dcf735c53f18a18f3466a4ee5e4
|
tools/standalonewavetrigger.py
|
tools/standalonewavetrigger.py
|
#!/usr/bin/env python
# Standard library imports
import argparse
import collections
import logging
import os
import time
# Additional library imports
import requests
# Named logger for this module
_logger = logging.getLogger(__name__)
# Parse the command line arguments
_parser = argparse.ArgumentParser('')
_parser.add_argument('-t', '--triggers', default='triggers', help='Folder containing trigger files')
_parser.add_argument('-r', '--rate', default=4.0, help='Poll rate in polls per second')
_parser.add_argument('-d', '--debug', action='store_true', help='Enables debug logging')
_args = _parser.parse_args()
# Configure the logging module
_logformat = '%(asctime)s : %(levelname)s : %(name)s : %(message)s'
_loglevel = logging.DEBUG if _args.debug else logging.INFO
logging.basicConfig(format=_logformat, level=_loglevel)
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
# We use a session variable so that HTTP keep-alive is utilized, and
# also so we'll always remember to set the content type appropriately.
_session = requests.session()
_session.headers['Content-Type'] = 'application/json'
# Stores previous last access times for each file
# so they can be compared each time files are polled.
_atimes = collections.defaultdict(time.time)
# Poll the list of files forever
while True:
# Delay the appropriate amount of time between polls
time.sleep(1.0 / _args.rate)
# Grab a list of all fully-qualified wave file names in the trigger folder
files = (os.path.join(_args.triggers, f) for f in os.listdir(_args.triggers) if os.path.splitext(f)[1] == '.wav')
# Iterate over the list of files
for filename in files:
# If the last access time is newer than what was previous recorded then take
# action on that file. A small threshold is used to prevent "double bouncing".
if os.stat(filename).st_atime - _atimes[filename] > 1.0:
# Open the file and pull out the data
with open(filename, 'rb') as f:
req = f.read()
# Immediately store off the last accessed time
_atimes[filename] = os.stat(filename).st_atime
# Separate the components of the request
method, url, data = req[52:].splitlines(False)
# Attempt to send the request and log the results
_logger.debug('Sending {0} request to {1}'.format(method, url))
try:
response = _session.request(method, url, data=data)
_logger.debug('Received response with status code {0}'.format(response.status_code))
except requests.RequestException:
_logger.warning('Unable to contact {0}'.format(url))
|
Add standalone wave trigger tool.
|
Add standalone wave trigger tool.
|
Python
|
apache-2.0
|
lordjabez/light-maestro,lordjabez/light-maestro,lordjabez/light-maestro,lordjabez/light-maestro
|
Add standalone wave trigger tool.
|
#!/usr/bin/env python
# Standard library imports
import argparse
import collections
import logging
import os
import time
# Additional library imports
import requests
# Named logger for this module
_logger = logging.getLogger(__name__)
# Parse the command line arguments
_parser = argparse.ArgumentParser('')
_parser.add_argument('-t', '--triggers', default='triggers', help='Folder containing trigger files')
_parser.add_argument('-r', '--rate', default=4.0, help='Poll rate in polls per second')
_parser.add_argument('-d', '--debug', action='store_true', help='Enables debug logging')
_args = _parser.parse_args()
# Configure the logging module
_logformat = '%(asctime)s : %(levelname)s : %(name)s : %(message)s'
_loglevel = logging.DEBUG if _args.debug else logging.INFO
logging.basicConfig(format=_logformat, level=_loglevel)
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
# We use a session variable so that HTTP keep-alive is utilized, and
# also so we'll always remember to set the content type appropriately.
_session = requests.session()
_session.headers['Content-Type'] = 'application/json'
# Stores previous last access times for each file
# so they can be compared each time files are polled.
_atimes = collections.defaultdict(time.time)
# Poll the list of files forever
while True:
# Delay the appropriate amount of time between polls
time.sleep(1.0 / _args.rate)
# Grab a list of all fully-qualified wave file names in the trigger folder
files = (os.path.join(_args.triggers, f) for f in os.listdir(_args.triggers) if os.path.splitext(f)[1] == '.wav')
# Iterate over the list of files
for filename in files:
# If the last access time is newer than what was previous recorded then take
# action on that file. A small threshold is used to prevent "double bouncing".
if os.stat(filename).st_atime - _atimes[filename] > 1.0:
# Open the file and pull out the data
with open(filename, 'rb') as f:
req = f.read()
# Immediately store off the last accessed time
_atimes[filename] = os.stat(filename).st_atime
# Separate the components of the request
method, url, data = req[52:].splitlines(False)
# Attempt to send the request and log the results
_logger.debug('Sending {0} request to {1}'.format(method, url))
try:
response = _session.request(method, url, data=data)
_logger.debug('Received response with status code {0}'.format(response.status_code))
except requests.RequestException:
_logger.warning('Unable to contact {0}'.format(url))
|
<commit_before><commit_msg>Add standalone wave trigger tool.<commit_after>
|
#!/usr/bin/env python
# Standard library imports
import argparse
import collections
import logging
import os
import time
# Additional library imports
import requests
# Named logger for this module
_logger = logging.getLogger(__name__)
# Parse the command line arguments
_parser = argparse.ArgumentParser('')
_parser.add_argument('-t', '--triggers', default='triggers', help='Folder containing trigger files')
_parser.add_argument('-r', '--rate', default=4.0, help='Poll rate in polls per second')
_parser.add_argument('-d', '--debug', action='store_true', help='Enables debug logging')
_args = _parser.parse_args()
# Configure the logging module
_logformat = '%(asctime)s : %(levelname)s : %(name)s : %(message)s'
_loglevel = logging.DEBUG if _args.debug else logging.INFO
logging.basicConfig(format=_logformat, level=_loglevel)
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
# We use a session variable so that HTTP keep-alive is utilized, and
# also so we'll always remember to set the content type appropriately.
_session = requests.session()
_session.headers['Content-Type'] = 'application/json'
# Stores previous last access times for each file
# so they can be compared each time files are polled.
_atimes = collections.defaultdict(time.time)
# Poll the list of files forever
while True:
# Delay the appropriate amount of time between polls
time.sleep(1.0 / _args.rate)
# Grab a list of all fully-qualified wave file names in the trigger folder
files = (os.path.join(_args.triggers, f) for f in os.listdir(_args.triggers) if os.path.splitext(f)[1] == '.wav')
# Iterate over the list of files
for filename in files:
# If the last access time is newer than what was previous recorded then take
# action on that file. A small threshold is used to prevent "double bouncing".
if os.stat(filename).st_atime - _atimes[filename] > 1.0:
# Open the file and pull out the data
with open(filename, 'rb') as f:
req = f.read()
# Immediately store off the last accessed time
_atimes[filename] = os.stat(filename).st_atime
# Separate the components of the request
method, url, data = req[52:].splitlines(False)
# Attempt to send the request and log the results
_logger.debug('Sending {0} request to {1}'.format(method, url))
try:
response = _session.request(method, url, data=data)
_logger.debug('Received response with status code {0}'.format(response.status_code))
except requests.RequestException:
_logger.warning('Unable to contact {0}'.format(url))
|
Add standalone wave trigger tool.#!/usr/bin/env python
# Standard library imports
import argparse
import collections
import logging
import os
import time
# Additional library imports
import requests
# Named logger for this module
_logger = logging.getLogger(__name__)
# Parse the command line arguments
_parser = argparse.ArgumentParser('')
_parser.add_argument('-t', '--triggers', default='triggers', help='Folder containing trigger files')
_parser.add_argument('-r', '--rate', default=4.0, help='Poll rate in polls per second')
_parser.add_argument('-d', '--debug', action='store_true', help='Enables debug logging')
_args = _parser.parse_args()
# Configure the logging module
_logformat = '%(asctime)s : %(levelname)s : %(name)s : %(message)s'
_loglevel = logging.DEBUG if _args.debug else logging.INFO
logging.basicConfig(format=_logformat, level=_loglevel)
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
# We use a session variable so that HTTP keep-alive is utilized, and
# also so we'll always remember to set the content type appropriately.
_session = requests.session()
_session.headers['Content-Type'] = 'application/json'
# Stores previous last access times for each file
# so they can be compared each time files are polled.
_atimes = collections.defaultdict(time.time)
# Poll the list of files forever
while True:
# Delay the appropriate amount of time between polls
time.sleep(1.0 / _args.rate)
# Grab a list of all fully-qualified wave file names in the trigger folder
files = (os.path.join(_args.triggers, f) for f in os.listdir(_args.triggers) if os.path.splitext(f)[1] == '.wav')
# Iterate over the list of files
for filename in files:
# If the last access time is newer than what was previous recorded then take
# action on that file. A small threshold is used to prevent "double bouncing".
if os.stat(filename).st_atime - _atimes[filename] > 1.0:
# Open the file and pull out the data
with open(filename, 'rb') as f:
req = f.read()
# Immediately store off the last accessed time
_atimes[filename] = os.stat(filename).st_atime
# Separate the components of the request
method, url, data = req[52:].splitlines(False)
# Attempt to send the request and log the results
_logger.debug('Sending {0} request to {1}'.format(method, url))
try:
response = _session.request(method, url, data=data)
_logger.debug('Received response with status code {0}'.format(response.status_code))
except requests.RequestException:
_logger.warning('Unable to contact {0}'.format(url))
|
<commit_before><commit_msg>Add standalone wave trigger tool.<commit_after>#!/usr/bin/env python
# Standard library imports
import argparse
import collections
import logging
import os
import time
# Additional library imports
import requests
# Named logger for this module
_logger = logging.getLogger(__name__)
# Parse the command line arguments
_parser = argparse.ArgumentParser('')
_parser.add_argument('-t', '--triggers', default='triggers', help='Folder containing trigger files')
_parser.add_argument('-r', '--rate', default=4.0, help='Poll rate in polls per second')
_parser.add_argument('-d', '--debug', action='store_true', help='Enables debug logging')
_args = _parser.parse_args()
# Configure the logging module
_logformat = '%(asctime)s : %(levelname)s : %(name)s : %(message)s'
_loglevel = logging.DEBUG if _args.debug else logging.INFO
logging.basicConfig(format=_logformat, level=_loglevel)
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
# We use a session variable so that HTTP keep-alive is utilized, and
# also so we'll always remember to set the content type appropriately.
_session = requests.session()
_session.headers['Content-Type'] = 'application/json'
# Stores previous last access times for each file
# so they can be compared each time files are polled.
_atimes = collections.defaultdict(time.time)
# Poll the list of files forever
while True:
# Delay the appropriate amount of time between polls
time.sleep(1.0 / _args.rate)
# Grab a list of all fully-qualified wave file names in the trigger folder
files = (os.path.join(_args.triggers, f) for f in os.listdir(_args.triggers) if os.path.splitext(f)[1] == '.wav')
# Iterate over the list of files
for filename in files:
# If the last access time is newer than what was previous recorded then take
# action on that file. A small threshold is used to prevent "double bouncing".
if os.stat(filename).st_atime - _atimes[filename] > 1.0:
# Open the file and pull out the data
with open(filename, 'rb') as f:
req = f.read()
# Immediately store off the last accessed time
_atimes[filename] = os.stat(filename).st_atime
# Separate the components of the request
method, url, data = req[52:].splitlines(False)
# Attempt to send the request and log the results
_logger.debug('Sending {0} request to {1}'.format(method, url))
try:
response = _session.request(method, url, data=data)
_logger.debug('Received response with status code {0}'.format(response.status_code))
except requests.RequestException:
_logger.warning('Unable to contact {0}'.format(url))
|
|
a86852fe908bb0a44ef267a75b9446ddcaf03f6e
|
homeassistant/components/light/limitlessled.py
|
homeassistant/components/light/limitlessled.py
|
"""
homeassistant.components.light.limitlessled
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for LimitlessLED bulbs, also known as...
EasyBulb
AppLight
AppLamp
MiLight
LEDme
dekolight
iLight
"""
import random
import logging
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME
from homeassistant.components.light import ATTR_BRIGHTNESS
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
try:
import ledcontroller
except ImportError:
_LOGGER.exception("Error while importing dependency ledcontroller.")
return
led = ledcontroller.LedController(config['host'])
lights = []
for i in range(1, 5):
if 'group_%d_name' % (i) in config:
lights.append(
LimitlessLED(
led,
i,
config['group_%d_name' % (i)],
STATE_OFF
)
)
add_devices_callback(lights)
class LimitlessLED(ToggleEntity):
def __init__(self, led, group, name, state, brightness=180):
self.led = led
self.group = group
# LimitlessLEDs don't report state, we have track it ourselves.
self.led.off(self.group)
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self._brightness = brightness
@property
def should_poll(self):
""" No polling needed for a demo light. """
return False
@property
def name(self):
""" Returns the name of the device if any. """
return self._name
@property
def state(self):
""" Returns the name of the device if any. """
return self._state
@property
def state_attributes(self):
""" Returns optional state attributes. """
if self.is_on:
return {
ATTR_BRIGHTNESS: self._brightness,
}
@property
def is_on(self):
""" True if device is on. """
return self._state == STATE_ON
def turn_on(self, **kwargs):
""" Turn the device on. """
self._state = STATE_ON
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
self.led.set_brightness(self._brightness, self.group)
def turn_off(self, **kwargs):
""" Turn the device off. """
self._state = STATE_OFF
self.led.off(self.group)
|
Add basic support for LimitlessLED
|
Add basic support for LimitlessLED
|
Python
|
mit
|
Duoxilian/home-assistant,betrisey/home-assistant,CCOSTAN/home-assistant,open-homeautomation/home-assistant,Smart-Torvy/torvy-home-assistant,pschmitt/home-assistant,hexxter/home-assistant,srcLurker/home-assistant,postlund/home-assistant,alexmogavero/home-assistant,MungoRae/home-assistant,srcLurker/home-assistant,turbokongen/home-assistant,shaftoe/home-assistant,keerts/home-assistant,tchellomello/home-assistant,jamespcole/home-assistant,ewandor/home-assistant,jabesq/home-assistant,qedi-r/home-assistant,w1ll1am23/home-assistant,bdfoster/blumate,luxus/home-assistant,ma314smith/home-assistant,miniconfig/home-assistant,bencmbrook/home-assistant,alexmogavero/home-assistant,Duoxilian/home-assistant,FreekingDean/home-assistant,jamespcole/home-assistant,rohitranjan1991/home-assistant,jaharkes/home-assistant,alexmogavero/home-assistant,ewandor/home-assistant,Julian/home-assistant,JshWright/home-assistant,leppa/home-assistant,stefan-jonasson/home-assistant,tomduijf/home-assistant,justyns/home-assistant,sdague/home-assistant,sanmiguel/home-assistant,tmm1/home-assistant,sffjunkie/home-assistant,nugget/home-assistant,devdelay/home-assistant,molobrakos/home-assistant,sanmiguel/home-assistant,open-homeautomation/home-assistant,betrisey/home-assistant,oandrew/home-assistant,kyvinh/home-assistant,instantchow/home-assistant,mezz64/home-assistant,Duoxilian/home-assistant,Zac-HD/home-assistant,soldag/home-assistant,kyvinh/home-assistant,varunr047/homefile,sfam/home-assistant,open-homeautomation/home-assistant,eagleamon/home-assistant,hmronline/home-assistant,morphis/home-assistant,tboyce1/home-assistant,ct-23/home-assistant,EricRho/home-assistant,Nzaga/home-assistant,g12mcgov/home-assistant,aronsky/home-assistant,Julian/home-assistant,LinuxChristian/home-assistant,partofthething/home-assistant,Theb-1/home-assistant,keerts/home-assistant,aoakeson/home-assistant,happyleavesaoc/home-assistant,nnic/home-assistant,teodoc/home-assistant,emilhetty/home-assistant,mKeRix/home-assistant,PetePriority/home-assistant,Danielhiversen/home-assistant,MartinHjelmare/home-assistant,theolind/home-assistant,hmronline/home-assistant,Theb-1/home-assistant,emilhetty/home-assistant,jawilson/home-assistant,alanbowman/home-assistant,turbokongen/home-assistant,dorant/home-assistant,tmm1/home-assistant,nugget/home-assistant,nnic/home-assistant,pschmitt/home-assistant,jaharkes/home-assistant,varunr047/homefile,xifle/home-assistant,tboyce1/home-assistant,robbiet480/home-assistant,keerts/home-assistant,Danielhiversen/home-assistant,Cinntax/home-assistant,coteyr/home-assistant,qedi-r/home-assistant,alexkolar/home-assistant,ma314smith/home-assistant,varunr047/homefile,partofthething/home-assistant,sander76/home-assistant,leoc/home-assistant,mahendra-r/home-assistant,Zyell/home-assistant,devdelay/home-assistant,pottzer/home-assistant,oandrew/home-assistant,MartinHjelmare/home-assistant,PetePriority/home-assistant,MungoRae/home-assistant,JshWright/home-assistant,miniconfig/home-assistant,molobrakos/home-assistant,SEJeff/home-assistant,LinuxChristian/home-assistant,titilambert/home-assistant,stefan-jonasson/home-assistant,shaftoe/home-assistant,badele/home-assistant,sfam/home-assistant,luxus/home-assistant,aronsky/home-assistant,balloob/home-assistant,ma314smith/home-assistant,bencmbrook/home-assistant,deisi/home-assistant,open-homeautomation/home-assistant,tboyce1/home-assistant,kennedyshead/home-assistant,DavidLP/home-assistant,ErykB2000/home-assistant,mikaelboman/home-assistant,mikaelboman/home-assistant,vitorespindola/home-assistant,rohitranjan1991/home-assistant,caiuspb/home-assistant,ewandor/home-assistant,jawilson/home-assistant,GenericStudent/home-assistant,srcLurker/home-assistant,luxus/home-assistant,HydrelioxGitHub/home-assistant,happyleavesaoc/home-assistant,ct-23/home-assistant,stefan-jonasson/home-assistant,xifle/home-assistant,keerts/home-assistant,miniconfig/home-assistant,badele/home-assistant,Smart-Torvy/torvy-home-assistant,coteyr/home-assistant,deisi/home-assistant,Zyell/home-assistant,Julian/home-assistant,tomduijf/home-assistant,miniconfig/home-assistant,tboyce021/home-assistant,hmronline/home-assistant,adrienbrault/home-assistant,aequitas/home-assistant,hexxter/home-assistant,Nzaga/home-assistant,oandrew/home-assistant,happyleavesaoc/home-assistant,theolind/home-assistant,mikaelboman/home-assistant,titilambert/home-assistant,bencmbrook/home-assistant,DavidLP/home-assistant,EricRho/home-assistant,Zyell/home-assistant,auduny/home-assistant,DavidLP/home-assistant,fbradyirl/home-assistant,aoakeson/home-assistant,adrienbrault/home-assistant,shaftoe/home-assistant,joopert/home-assistant,robjohnson189/home-assistant,xifle/home-assistant,shaftoe/home-assistant,soldag/home-assistant,lukas-hetzenecker/home-assistant,hmronline/home-assistant,HydrelioxGitHub/home-assistant,joopert/home-assistant,eagleamon/home-assistant,betrisey/home-assistant,rohitranjan1991/home-assistant,mahendra-r/home-assistant,deisi/home-assistant,jabesq/home-assistant,hexxter/home-assistant,jnewland/home-assistant,mKeRix/home-assistant,mahendra-r/home-assistant,emilhetty/home-assistant,Julian/home-assistant,instantchow/home-assistant,hmronline/home-assistant,bdfoster/blumate,alanbowman/home-assistant,alanbowman/home-assistant,robjohnson189/home-assistant,dorant/home-assistant,sanmiguel/home-assistant,balloob/home-assistant,sffjunkie/home-assistant,dmeulen/home-assistant,sdague/home-assistant,persandstrom/home-assistant,tinloaf/home-assistant,nkgilley/home-assistant,emilhetty/home-assistant,robbiet480/home-assistant,LinuxChristian/home-assistant,FreekingDean/home-assistant,sander76/home-assistant,xifle/home-assistant,emilhetty/home-assistant,MartinHjelmare/home-assistant,GenericStudent/home-assistant,balloob/home-assistant,leoc/home-assistant,w1ll1am23/home-assistant,caiuspb/home-assistant,maddox/home-assistant,MungoRae/home-assistant,philipbl/home-assistant,persandstrom/home-assistant,LinuxChristian/home-assistant,postlund/home-assistant,CCOSTAN/home-assistant,ErykB2000/home-assistant,ErykB2000/home-assistant,mezz64/home-assistant,dmeulen/home-assistant,kennedyshead/home-assistant,HydrelioxGitHub/home-assistant,nevercast/home-assistant,CCOSTAN/home-assistant,nkgilley/home-assistant,ct-23/home-assistant,alexkolar/home-assistant,justyns/home-assistant,aequitas/home-assistant,ct-23/home-assistant,florianholzapfel/home-assistant,toddeye/home-assistant,LinuxChristian/home-assistant,varunr047/homefile,bdfoster/blumate,alexkolar/home-assistant,pottzer/home-assistant,oandrew/home-assistant,mKeRix/home-assistant,Smart-Torvy/torvy-home-assistant,maddox/home-assistant,ct-23/home-assistant,EricRho/home-assistant,aoakeson/home-assistant,jaharkes/home-assistant,Zac-HD/home-assistant,Teagan42/home-assistant,dmeulen/home-assistant,sffjunkie/home-assistant,aequitas/home-assistant,bdfoster/blumate,betrisey/home-assistant,eagleamon/home-assistant,Nzaga/home-assistant,instantchow/home-assistant,PetePriority/home-assistant,vitorespindola/home-assistant,fbradyirl/home-assistant,fbradyirl/home-assistant,deisi/home-assistant,leoc/home-assistant,nugget/home-assistant,morphis/home-assistant,sffjunkie/home-assistant,bdfoster/blumate,MungoRae/home-assistant,jabesq/home-assistant,JshWright/home-assistant,g12mcgov/home-assistant,michaelarnauts/home-assistant,Zac-HD/home-assistant,stefan-jonasson/home-assistant,morphis/home-assistant,SEJeff/home-assistant,robjohnson189/home-assistant,home-assistant/home-assistant,mikaelboman/home-assistant,SEJeff/home-assistant,badele/home-assistant,tinloaf/home-assistant,caiuspb/home-assistant,auduny/home-assistant,MungoRae/home-assistant,Smart-Torvy/torvy-home-assistant,kyvinh/home-assistant,jnewland/home-assistant,mikaelboman/home-assistant,lukas-hetzenecker/home-assistant,Teagan42/home-assistant,eagleamon/home-assistant,jamespcole/home-assistant,tomduijf/home-assistant,srcLurker/home-assistant,happyleavesaoc/home-assistant,michaelarnauts/home-assistant,devdelay/home-assistant,florianholzapfel/home-assistant,pottzer/home-assistant,leoc/home-assistant,Theb-1/home-assistant,alexmogavero/home-assistant,sffjunkie/home-assistant,leppa/home-assistant,vitorespindola/home-assistant,hexxter/home-assistant,tinloaf/home-assistant,florianholzapfel/home-assistant,tboyce021/home-assistant,Cinntax/home-assistant,deisi/home-assistant,kyvinh/home-assistant,teodoc/home-assistant,sfam/home-assistant,philipbl/home-assistant,tmm1/home-assistant,dorant/home-assistant,philipbl/home-assistant,philipbl/home-assistant,tchellomello/home-assistant,morphis/home-assistant,coteyr/home-assistant,justyns/home-assistant,home-assistant/home-assistant,Duoxilian/home-assistant,nevercast/home-assistant,nevercast/home-assistant,molobrakos/home-assistant,JshWright/home-assistant,g12mcgov/home-assistant,mKeRix/home-assistant,jaharkes/home-assistant,tboyce1/home-assistant,auduny/home-assistant,florianholzapfel/home-assistant,michaelarnauts/home-assistant,dmeulen/home-assistant,persandstrom/home-assistant,teodoc/home-assistant,robjohnson189/home-assistant,Zac-HD/home-assistant,varunr047/homefile,theolind/home-assistant,ma314smith/home-assistant,nnic/home-assistant,maddox/home-assistant,jnewland/home-assistant,toddeye/home-assistant,devdelay/home-assistant
|
Add basic support for LimitlessLED
|
"""
homeassistant.components.light.limitlessled
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for LimitlessLED bulbs, also known as...
EasyBulb
AppLight
AppLamp
MiLight
LEDme
dekolight
iLight
"""
import random
import logging
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME
from homeassistant.components.light import ATTR_BRIGHTNESS
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
try:
import ledcontroller
except ImportError:
_LOGGER.exception("Error while importing dependency ledcontroller.")
return
led = ledcontroller.LedController(config['host'])
lights = []
for i in range(1, 5):
if 'group_%d_name' % (i) in config:
lights.append(
LimitlessLED(
led,
i,
config['group_%d_name' % (i)],
STATE_OFF
)
)
add_devices_callback(lights)
class LimitlessLED(ToggleEntity):
def __init__(self, led, group, name, state, brightness=180):
self.led = led
self.group = group
# LimitlessLEDs don't report state, we have track it ourselves.
self.led.off(self.group)
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self._brightness = brightness
@property
def should_poll(self):
""" No polling needed for a demo light. """
return False
@property
def name(self):
""" Returns the name of the device if any. """
return self._name
@property
def state(self):
""" Returns the name of the device if any. """
return self._state
@property
def state_attributes(self):
""" Returns optional state attributes. """
if self.is_on:
return {
ATTR_BRIGHTNESS: self._brightness,
}
@property
def is_on(self):
""" True if device is on. """
return self._state == STATE_ON
def turn_on(self, **kwargs):
""" Turn the device on. """
self._state = STATE_ON
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
self.led.set_brightness(self._brightness, self.group)
def turn_off(self, **kwargs):
""" Turn the device off. """
self._state = STATE_OFF
self.led.off(self.group)
|
<commit_before><commit_msg>Add basic support for LimitlessLED<commit_after>
|
"""
homeassistant.components.light.limitlessled
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for LimitlessLED bulbs, also known as...
EasyBulb
AppLight
AppLamp
MiLight
LEDme
dekolight
iLight
"""
import random
import logging
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME
from homeassistant.components.light import ATTR_BRIGHTNESS
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
try:
import ledcontroller
except ImportError:
_LOGGER.exception("Error while importing dependency ledcontroller.")
return
led = ledcontroller.LedController(config['host'])
lights = []
for i in range(1, 5):
if 'group_%d_name' % (i) in config:
lights.append(
LimitlessLED(
led,
i,
config['group_%d_name' % (i)],
STATE_OFF
)
)
add_devices_callback(lights)
class LimitlessLED(ToggleEntity):
def __init__(self, led, group, name, state, brightness=180):
self.led = led
self.group = group
# LimitlessLEDs don't report state, we have track it ourselves.
self.led.off(self.group)
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self._brightness = brightness
@property
def should_poll(self):
""" No polling needed for a demo light. """
return False
@property
def name(self):
""" Returns the name of the device if any. """
return self._name
@property
def state(self):
""" Returns the name of the device if any. """
return self._state
@property
def state_attributes(self):
""" Returns optional state attributes. """
if self.is_on:
return {
ATTR_BRIGHTNESS: self._brightness,
}
@property
def is_on(self):
""" True if device is on. """
return self._state == STATE_ON
def turn_on(self, **kwargs):
""" Turn the device on. """
self._state = STATE_ON
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
self.led.set_brightness(self._brightness, self.group)
def turn_off(self, **kwargs):
""" Turn the device off. """
self._state = STATE_OFF
self.led.off(self.group)
|
Add basic support for LimitlessLED"""
homeassistant.components.light.limitlessled
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for LimitlessLED bulbs, also known as...
EasyBulb
AppLight
AppLamp
MiLight
LEDme
dekolight
iLight
"""
import random
import logging
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME
from homeassistant.components.light import ATTR_BRIGHTNESS
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
try:
import ledcontroller
except ImportError:
_LOGGER.exception("Error while importing dependency ledcontroller.")
return
led = ledcontroller.LedController(config['host'])
lights = []
for i in range(1, 5):
if 'group_%d_name' % (i) in config:
lights.append(
LimitlessLED(
led,
i,
config['group_%d_name' % (i)],
STATE_OFF
)
)
add_devices_callback(lights)
class LimitlessLED(ToggleEntity):
def __init__(self, led, group, name, state, brightness=180):
self.led = led
self.group = group
# LimitlessLEDs don't report state, we have track it ourselves.
self.led.off(self.group)
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self._brightness = brightness
@property
def should_poll(self):
""" No polling needed for a demo light. """
return False
@property
def name(self):
""" Returns the name of the device if any. """
return self._name
@property
def state(self):
""" Returns the name of the device if any. """
return self._state
@property
def state_attributes(self):
""" Returns optional state attributes. """
if self.is_on:
return {
ATTR_BRIGHTNESS: self._brightness,
}
@property
def is_on(self):
""" True if device is on. """
return self._state == STATE_ON
def turn_on(self, **kwargs):
""" Turn the device on. """
self._state = STATE_ON
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
self.led.set_brightness(self._brightness, self.group)
def turn_off(self, **kwargs):
""" Turn the device off. """
self._state = STATE_OFF
self.led.off(self.group)
|
<commit_before><commit_msg>Add basic support for LimitlessLED<commit_after>"""
homeassistant.components.light.limitlessled
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for LimitlessLED bulbs, also known as...
EasyBulb
AppLight
AppLamp
MiLight
LEDme
dekolight
iLight
"""
import random
import logging
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME
from homeassistant.components.light import ATTR_BRIGHTNESS
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
try:
import ledcontroller
except ImportError:
_LOGGER.exception("Error while importing dependency ledcontroller.")
return
led = ledcontroller.LedController(config['host'])
lights = []
for i in range(1, 5):
if 'group_%d_name' % (i) in config:
lights.append(
LimitlessLED(
led,
i,
config['group_%d_name' % (i)],
STATE_OFF
)
)
add_devices_callback(lights)
class LimitlessLED(ToggleEntity):
def __init__(self, led, group, name, state, brightness=180):
self.led = led
self.group = group
# LimitlessLEDs don't report state, we have track it ourselves.
self.led.off(self.group)
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self._brightness = brightness
@property
def should_poll(self):
""" No polling needed for a demo light. """
return False
@property
def name(self):
""" Returns the name of the device if any. """
return self._name
@property
def state(self):
""" Returns the name of the device if any. """
return self._state
@property
def state_attributes(self):
""" Returns optional state attributes. """
if self.is_on:
return {
ATTR_BRIGHTNESS: self._brightness,
}
@property
def is_on(self):
""" True if device is on. """
return self._state == STATE_ON
def turn_on(self, **kwargs):
""" Turn the device on. """
self._state = STATE_ON
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
self.led.set_brightness(self._brightness, self.group)
def turn_off(self, **kwargs):
""" Turn the device off. """
self._state = STATE_OFF
self.led.off(self.group)
|
|
37f286812bea7429bea67172a40d26ad435d6f67
|
test/examples/hole_in_square.py
|
test/examples/hole_in_square.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pygmsh as pg
import numpy as np
def generate():
# Characteristic length
lcar = 1e-1
# Coordinates of lower-left and upper-right vertices of a square domain
xmin = 0.0
xmax = 5.0
ymin = 0.0
ymax = 5.0
# Vertices of a square hole
squareHoleCoordinates = np.array([[1, 1, 0],
[4, 1, 0],
[4, 4, 0],
[1, 4, 0]])
# Create geometric object
geom = pg.Geometry()
# Create square hole
squareHole = [geom.add_polygon_loop(squareHoleCoordinates, lcar)]
# Create square domain with square hole
geom.add_rectangle(xmin, xmax, ymin, ymax, 0.0, lcar, holes=squareHole)
# Return geo-file code
return geom.get_code()
if __name__ == '__main__':
print(generate())
|
Add test for 'holes' argument in add_polygon
|
Add test for 'holes' argument in add_polygon
|
Python
|
bsd-3-clause
|
nschloe/python4gmsh
|
Add test for 'holes' argument in add_polygon
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pygmsh as pg
import numpy as np
def generate():
# Characteristic length
lcar = 1e-1
# Coordinates of lower-left and upper-right vertices of a square domain
xmin = 0.0
xmax = 5.0
ymin = 0.0
ymax = 5.0
# Vertices of a square hole
squareHoleCoordinates = np.array([[1, 1, 0],
[4, 1, 0],
[4, 4, 0],
[1, 4, 0]])
# Create geometric object
geom = pg.Geometry()
# Create square hole
squareHole = [geom.add_polygon_loop(squareHoleCoordinates, lcar)]
# Create square domain with square hole
geom.add_rectangle(xmin, xmax, ymin, ymax, 0.0, lcar, holes=squareHole)
# Return geo-file code
return geom.get_code()
if __name__ == '__main__':
print(generate())
|
<commit_before><commit_msg>Add test for 'holes' argument in add_polygon<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pygmsh as pg
import numpy as np
def generate():
# Characteristic length
lcar = 1e-1
# Coordinates of lower-left and upper-right vertices of a square domain
xmin = 0.0
xmax = 5.0
ymin = 0.0
ymax = 5.0
# Vertices of a square hole
squareHoleCoordinates = np.array([[1, 1, 0],
[4, 1, 0],
[4, 4, 0],
[1, 4, 0]])
# Create geometric object
geom = pg.Geometry()
# Create square hole
squareHole = [geom.add_polygon_loop(squareHoleCoordinates, lcar)]
# Create square domain with square hole
geom.add_rectangle(xmin, xmax, ymin, ymax, 0.0, lcar, holes=squareHole)
# Return geo-file code
return geom.get_code()
if __name__ == '__main__':
print(generate())
|
Add test for 'holes' argument in add_polygon#!/usr/bin/python
# -*- coding: utf-8 -*-
import pygmsh as pg
import numpy as np
def generate():
# Characteristic length
lcar = 1e-1
# Coordinates of lower-left and upper-right vertices of a square domain
xmin = 0.0
xmax = 5.0
ymin = 0.0
ymax = 5.0
# Vertices of a square hole
squareHoleCoordinates = np.array([[1, 1, 0],
[4, 1, 0],
[4, 4, 0],
[1, 4, 0]])
# Create geometric object
geom = pg.Geometry()
# Create square hole
squareHole = [geom.add_polygon_loop(squareHoleCoordinates, lcar)]
# Create square domain with square hole
geom.add_rectangle(xmin, xmax, ymin, ymax, 0.0, lcar, holes=squareHole)
# Return geo-file code
return geom.get_code()
if __name__ == '__main__':
print(generate())
|
<commit_before><commit_msg>Add test for 'holes' argument in add_polygon<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
import pygmsh as pg
import numpy as np
def generate():
# Characteristic length
lcar = 1e-1
# Coordinates of lower-left and upper-right vertices of a square domain
xmin = 0.0
xmax = 5.0
ymin = 0.0
ymax = 5.0
# Vertices of a square hole
squareHoleCoordinates = np.array([[1, 1, 0],
[4, 1, 0],
[4, 4, 0],
[1, 4, 0]])
# Create geometric object
geom = pg.Geometry()
# Create square hole
squareHole = [geom.add_polygon_loop(squareHoleCoordinates, lcar)]
# Create square domain with square hole
geom.add_rectangle(xmin, xmax, ymin, ymax, 0.0, lcar, holes=squareHole)
# Return geo-file code
return geom.get_code()
if __name__ == '__main__':
print(generate())
|
|
db4bc200f9a48edf9e160c2134293df0313183a7
|
conditional_prefix.py
|
conditional_prefix.py
|
from cloudbot import hook
import re
@hook.sieve
def conditional_prefix(bot, event, plugin):
if plugin.type == 'command':
if event.chan in event.conn.config['prefix_blocked_channels']:
command_prefix = event.conn.config['command_prefix']
if not event.chan.lower() == event.nick.lower(): # private message, no command prefix
command_re = r'(?i)^(?:[{}])(\w+)(?:$|\s+)(.*)'.format(command_prefix, event.conn.nick)
if re.match(command_re, event.content):
return None
return event
|
Add conditional command prefix plugin
|
Add conditional command prefix plugin
|
Python
|
mit
|
Aaron1011/CloudBotPlugins
|
Add conditional command prefix plugin
|
from cloudbot import hook
import re
@hook.sieve
def conditional_prefix(bot, event, plugin):
if plugin.type == 'command':
if event.chan in event.conn.config['prefix_blocked_channels']:
command_prefix = event.conn.config['command_prefix']
if not event.chan.lower() == event.nick.lower(): # private message, no command prefix
command_re = r'(?i)^(?:[{}])(\w+)(?:$|\s+)(.*)'.format(command_prefix, event.conn.nick)
if re.match(command_re, event.content):
return None
return event
|
<commit_before><commit_msg>Add conditional command prefix plugin<commit_after>
|
from cloudbot import hook
import re
@hook.sieve
def conditional_prefix(bot, event, plugin):
if plugin.type == 'command':
if event.chan in event.conn.config['prefix_blocked_channels']:
command_prefix = event.conn.config['command_prefix']
if not event.chan.lower() == event.nick.lower(): # private message, no command prefix
command_re = r'(?i)^(?:[{}])(\w+)(?:$|\s+)(.*)'.format(command_prefix, event.conn.nick)
if re.match(command_re, event.content):
return None
return event
|
Add conditional command prefix pluginfrom cloudbot import hook
import re
@hook.sieve
def conditional_prefix(bot, event, plugin):
if plugin.type == 'command':
if event.chan in event.conn.config['prefix_blocked_channels']:
command_prefix = event.conn.config['command_prefix']
if not event.chan.lower() == event.nick.lower(): # private message, no command prefix
command_re = r'(?i)^(?:[{}])(\w+)(?:$|\s+)(.*)'.format(command_prefix, event.conn.nick)
if re.match(command_re, event.content):
return None
return event
|
<commit_before><commit_msg>Add conditional command prefix plugin<commit_after>from cloudbot import hook
import re
@hook.sieve
def conditional_prefix(bot, event, plugin):
if plugin.type == 'command':
if event.chan in event.conn.config['prefix_blocked_channels']:
command_prefix = event.conn.config['command_prefix']
if not event.chan.lower() == event.nick.lower(): # private message, no command prefix
command_re = r'(?i)^(?:[{}])(\w+)(?:$|\s+)(.*)'.format(command_prefix, event.conn.nick)
if re.match(command_re, event.content):
return None
return event
|
|
b80e52ecf09f96e84625eb6fff9aa7a20059c0f8
|
test_single.py
|
test_single.py
|
import sys
import unittest
from toast.mpirunner import MPITestRunner
file = sys.argv[1]
loader = unittest.TestLoader()
runner = MPITestRunner(verbosity=2)
suite = loader.discover('tests', pattern='{}'.format(file), top_level_dir='.')
runner.run(suite)
|
Add new top level script to ease running of individual unittests.
|
Add new top level script to ease running of individual unittests.
|
Python
|
bsd-2-clause
|
tskisner/pytoast,tskisner/pytoast
|
Add new top level script to ease running of individual unittests.
|
import sys
import unittest
from toast.mpirunner import MPITestRunner
file = sys.argv[1]
loader = unittest.TestLoader()
runner = MPITestRunner(verbosity=2)
suite = loader.discover('tests', pattern='{}'.format(file), top_level_dir='.')
runner.run(suite)
|
<commit_before><commit_msg>Add new top level script to ease running of individual unittests.<commit_after>
|
import sys
import unittest
from toast.mpirunner import MPITestRunner
file = sys.argv[1]
loader = unittest.TestLoader()
runner = MPITestRunner(verbosity=2)
suite = loader.discover('tests', pattern='{}'.format(file), top_level_dir='.')
runner.run(suite)
|
Add new top level script to ease running of individual unittests.
import sys
import unittest
from toast.mpirunner import MPITestRunner
file = sys.argv[1]
loader = unittest.TestLoader()
runner = MPITestRunner(verbosity=2)
suite = loader.discover('tests', pattern='{}'.format(file), top_level_dir='.')
runner.run(suite)
|
<commit_before><commit_msg>Add new top level script to ease running of individual unittests.<commit_after>
import sys
import unittest
from toast.mpirunner import MPITestRunner
file = sys.argv[1]
loader = unittest.TestLoader()
runner = MPITestRunner(verbosity=2)
suite = loader.discover('tests', pattern='{}'.format(file), top_level_dir='.')
runner.run(suite)
|
|
2e6c7235c555799cc9dbb9d1fa7faeab4557ac13
|
db.py
|
db.py
|
import sqlite3
connection = sqlite3.connect('data.db')
class SavedRoll:
@staticmethod
def save(user, name, args):
pass
@staticmethod
def get(user, name):
pass
@staticmethod
def delete(user, name):
pass
|
Add stubby saved roll class
|
Add stubby saved roll class
|
Python
|
mit
|
foxscotch/foxrollbot
|
Add stubby saved roll class
|
import sqlite3
connection = sqlite3.connect('data.db')
class SavedRoll:
@staticmethod
def save(user, name, args):
pass
@staticmethod
def get(user, name):
pass
@staticmethod
def delete(user, name):
pass
|
<commit_before><commit_msg>Add stubby saved roll class<commit_after>
|
import sqlite3
connection = sqlite3.connect('data.db')
class SavedRoll:
@staticmethod
def save(user, name, args):
pass
@staticmethod
def get(user, name):
pass
@staticmethod
def delete(user, name):
pass
|
Add stubby saved roll classimport sqlite3
connection = sqlite3.connect('data.db')
class SavedRoll:
@staticmethod
def save(user, name, args):
pass
@staticmethod
def get(user, name):
pass
@staticmethod
def delete(user, name):
pass
|
<commit_before><commit_msg>Add stubby saved roll class<commit_after>import sqlite3
connection = sqlite3.connect('data.db')
class SavedRoll:
@staticmethod
def save(user, name, args):
pass
@staticmethod
def get(user, name):
pass
@staticmethod
def delete(user, name):
pass
|
|
4f08f057c7e4cc8230a996d853892ab3eef36065
|
rps.py
|
rps.py
|
from random import choice
class RPSGame:
shapes = ['rock', 'paper', 'scissors']
draws = [('rock', 'rock'), ('paper', 'paper'), ('scissors', 'scissors')]
first_wins = [('rock', 'scissors'), ('scissors', 'paper'), ('paper', 'rock')]
def _evaluate(self, player_move, computer_move):
if (player_move, computer_move) in RPSGame.draws:
return "Draw!"
elif (player_move, computer_move) in RPSGame.first_wins:
return "Player wins!"
else:
return "Computer wins!"
def play(self, rounds=1):
for i in range(rounds):
player_move = input("[rock,paper,scissors]: ")
computer_move = choice(RPSGame.shapes)
winner = self._evaluate(player_move, computer_move)
print(20 * "-")
print("You played: %s" % player_move)
print("Computer played: %s" % computer_move)
print(winner)
print(20 * "-")
if __name__ == '__main__':
game = RPSGame()
game.play(rounds=10)
|
Add simple terminal-based version of rock-paper-scissors.
|
Add simple terminal-based version of rock-paper-scissors.
|
Python
|
mit
|
kubkon/ee106-additional-material
|
Add simple terminal-based version of rock-paper-scissors.
|
from random import choice
class RPSGame:
shapes = ['rock', 'paper', 'scissors']
draws = [('rock', 'rock'), ('paper', 'paper'), ('scissors', 'scissors')]
first_wins = [('rock', 'scissors'), ('scissors', 'paper'), ('paper', 'rock')]
def _evaluate(self, player_move, computer_move):
if (player_move, computer_move) in RPSGame.draws:
return "Draw!"
elif (player_move, computer_move) in RPSGame.first_wins:
return "Player wins!"
else:
return "Computer wins!"
def play(self, rounds=1):
for i in range(rounds):
player_move = input("[rock,paper,scissors]: ")
computer_move = choice(RPSGame.shapes)
winner = self._evaluate(player_move, computer_move)
print(20 * "-")
print("You played: %s" % player_move)
print("Computer played: %s" % computer_move)
print(winner)
print(20 * "-")
if __name__ == '__main__':
game = RPSGame()
game.play(rounds=10)
|
<commit_before><commit_msg>Add simple terminal-based version of rock-paper-scissors.<commit_after>
|
from random import choice
class RPSGame:
shapes = ['rock', 'paper', 'scissors']
draws = [('rock', 'rock'), ('paper', 'paper'), ('scissors', 'scissors')]
first_wins = [('rock', 'scissors'), ('scissors', 'paper'), ('paper', 'rock')]
def _evaluate(self, player_move, computer_move):
if (player_move, computer_move) in RPSGame.draws:
return "Draw!"
elif (player_move, computer_move) in RPSGame.first_wins:
return "Player wins!"
else:
return "Computer wins!"
def play(self, rounds=1):
for i in range(rounds):
player_move = input("[rock,paper,scissors]: ")
computer_move = choice(RPSGame.shapes)
winner = self._evaluate(player_move, computer_move)
print(20 * "-")
print("You played: %s" % player_move)
print("Computer played: %s" % computer_move)
print(winner)
print(20 * "-")
if __name__ == '__main__':
game = RPSGame()
game.play(rounds=10)
|
Add simple terminal-based version of rock-paper-scissors.from random import choice
class RPSGame:
shapes = ['rock', 'paper', 'scissors']
draws = [('rock', 'rock'), ('paper', 'paper'), ('scissors', 'scissors')]
first_wins = [('rock', 'scissors'), ('scissors', 'paper'), ('paper', 'rock')]
def _evaluate(self, player_move, computer_move):
if (player_move, computer_move) in RPSGame.draws:
return "Draw!"
elif (player_move, computer_move) in RPSGame.first_wins:
return "Player wins!"
else:
return "Computer wins!"
def play(self, rounds=1):
for i in range(rounds):
player_move = input("[rock,paper,scissors]: ")
computer_move = choice(RPSGame.shapes)
winner = self._evaluate(player_move, computer_move)
print(20 * "-")
print("You played: %s" % player_move)
print("Computer played: %s" % computer_move)
print(winner)
print(20 * "-")
if __name__ == '__main__':
game = RPSGame()
game.play(rounds=10)
|
<commit_before><commit_msg>Add simple terminal-based version of rock-paper-scissors.<commit_after>from random import choice
class RPSGame:
shapes = ['rock', 'paper', 'scissors']
draws = [('rock', 'rock'), ('paper', 'paper'), ('scissors', 'scissors')]
first_wins = [('rock', 'scissors'), ('scissors', 'paper'), ('paper', 'rock')]
def _evaluate(self, player_move, computer_move):
if (player_move, computer_move) in RPSGame.draws:
return "Draw!"
elif (player_move, computer_move) in RPSGame.first_wins:
return "Player wins!"
else:
return "Computer wins!"
def play(self, rounds=1):
for i in range(rounds):
player_move = input("[rock,paper,scissors]: ")
computer_move = choice(RPSGame.shapes)
winner = self._evaluate(player_move, computer_move)
print(20 * "-")
print("You played: %s" % player_move)
print("Computer played: %s" % computer_move)
print(winner)
print(20 * "-")
if __name__ == '__main__':
game = RPSGame()
game.play(rounds=10)
|
|
a193f1d9b1816f72661254bba69c2c4a1e2c1b30
|
tests/extensions/functional/tests/test_google_menu.py
|
tests/extensions/functional/tests/test_google_menu.py
|
"""
Google Menu tests
"""
from base import BaseTouchscreenTest
import time
from base import MAPS_URL, ZOOMED_IN_MAPS_URL, Pose
from base import screenshot_on_error, make_screenshot
import re
class TestGoogleMenu(BaseTouchscreenTest):
@screenshot_on_error
def test_google_menu_is_visible(self):
self.browser.get(MAPS_URL)
morefun = self.browser.find_element_by_id('morefun')
assert morefun.is_displayed() is True
items = self.browser.find_element_by_id('morefun_items')
assert items.is_displayed() is False
@screenshot_on_error
def test_google_items_are_visible_on_click(self):
self.browser.get(MAPS_URL)
morefun = self.browser.find_element_by_id('morefun')
morefun.click()
assert morefun.is_displayed() is True
items = self.browser.find_element_by_id('morefun_items')
assert items.is_displayed() is True
@screenshot_on_error
def test_clicking_doodle_item(self):
"Clicking on the doodle item should change the url to the doodles page"
self.browser.get(ZOOMED_IN_MAPS_URL)
time.sleep(5)
morefun = self.browser.find_element_by_id('morefun')
morefun.click()
items = self.browser.find_element_by_id('morefun_items')
li_items = items.find_elements_by_tag_name('li')
assert len(li_items) == 2
doodle = li_items[1]
doodle.click()
assert re.match(r'chrome-extension:\/\/[a-z]+\/pages\/doodles.html',
self.browser.current_url)
|
Add tests for google menu
|
Add tests for google menu
|
Python
|
apache-2.0
|
EndPointCorp/appctl,EndPointCorp/appctl
|
Add tests for google menu
|
"""
Google Menu tests
"""
from base import BaseTouchscreenTest
import time
from base import MAPS_URL, ZOOMED_IN_MAPS_URL, Pose
from base import screenshot_on_error, make_screenshot
import re
class TestGoogleMenu(BaseTouchscreenTest):
@screenshot_on_error
def test_google_menu_is_visible(self):
self.browser.get(MAPS_URL)
morefun = self.browser.find_element_by_id('morefun')
assert morefun.is_displayed() is True
items = self.browser.find_element_by_id('morefun_items')
assert items.is_displayed() is False
@screenshot_on_error
def test_google_items_are_visible_on_click(self):
self.browser.get(MAPS_URL)
morefun = self.browser.find_element_by_id('morefun')
morefun.click()
assert morefun.is_displayed() is True
items = self.browser.find_element_by_id('morefun_items')
assert items.is_displayed() is True
@screenshot_on_error
def test_clicking_doodle_item(self):
"Clicking on the doodle item should change the url to the doodles page"
self.browser.get(ZOOMED_IN_MAPS_URL)
time.sleep(5)
morefun = self.browser.find_element_by_id('morefun')
morefun.click()
items = self.browser.find_element_by_id('morefun_items')
li_items = items.find_elements_by_tag_name('li')
assert len(li_items) == 2
doodle = li_items[1]
doodle.click()
assert re.match(r'chrome-extension:\/\/[a-z]+\/pages\/doodles.html',
self.browser.current_url)
|
<commit_before><commit_msg>Add tests for google menu<commit_after>
|
"""
Google Menu tests
"""
from base import BaseTouchscreenTest
import time
from base import MAPS_URL, ZOOMED_IN_MAPS_URL, Pose
from base import screenshot_on_error, make_screenshot
import re
class TestGoogleMenu(BaseTouchscreenTest):
@screenshot_on_error
def test_google_menu_is_visible(self):
self.browser.get(MAPS_URL)
morefun = self.browser.find_element_by_id('morefun')
assert morefun.is_displayed() is True
items = self.browser.find_element_by_id('morefun_items')
assert items.is_displayed() is False
@screenshot_on_error
def test_google_items_are_visible_on_click(self):
self.browser.get(MAPS_URL)
morefun = self.browser.find_element_by_id('morefun')
morefun.click()
assert morefun.is_displayed() is True
items = self.browser.find_element_by_id('morefun_items')
assert items.is_displayed() is True
@screenshot_on_error
def test_clicking_doodle_item(self):
"Clicking on the doodle item should change the url to the doodles page"
self.browser.get(ZOOMED_IN_MAPS_URL)
time.sleep(5)
morefun = self.browser.find_element_by_id('morefun')
morefun.click()
items = self.browser.find_element_by_id('morefun_items')
li_items = items.find_elements_by_tag_name('li')
assert len(li_items) == 2
doodle = li_items[1]
doodle.click()
assert re.match(r'chrome-extension:\/\/[a-z]+\/pages\/doodles.html',
self.browser.current_url)
|
Add tests for google menu"""
Google Menu tests
"""
from base import BaseTouchscreenTest
import time
from base import MAPS_URL, ZOOMED_IN_MAPS_URL, Pose
from base import screenshot_on_error, make_screenshot
import re
class TestGoogleMenu(BaseTouchscreenTest):
@screenshot_on_error
def test_google_menu_is_visible(self):
self.browser.get(MAPS_URL)
morefun = self.browser.find_element_by_id('morefun')
assert morefun.is_displayed() is True
items = self.browser.find_element_by_id('morefun_items')
assert items.is_displayed() is False
@screenshot_on_error
def test_google_items_are_visible_on_click(self):
self.browser.get(MAPS_URL)
morefun = self.browser.find_element_by_id('morefun')
morefun.click()
assert morefun.is_displayed() is True
items = self.browser.find_element_by_id('morefun_items')
assert items.is_displayed() is True
@screenshot_on_error
def test_clicking_doodle_item(self):
"Clicking on the doodle item should change the url to the doodles page"
self.browser.get(ZOOMED_IN_MAPS_URL)
time.sleep(5)
morefun = self.browser.find_element_by_id('morefun')
morefun.click()
items = self.browser.find_element_by_id('morefun_items')
li_items = items.find_elements_by_tag_name('li')
assert len(li_items) == 2
doodle = li_items[1]
doodle.click()
assert re.match(r'chrome-extension:\/\/[a-z]+\/pages\/doodles.html',
self.browser.current_url)
|
<commit_before><commit_msg>Add tests for google menu<commit_after>"""
Google Menu tests
"""
from base import BaseTouchscreenTest
import time
from base import MAPS_URL, ZOOMED_IN_MAPS_URL, Pose
from base import screenshot_on_error, make_screenshot
import re
class TestGoogleMenu(BaseTouchscreenTest):
@screenshot_on_error
def test_google_menu_is_visible(self):
self.browser.get(MAPS_URL)
morefun = self.browser.find_element_by_id('morefun')
assert morefun.is_displayed() is True
items = self.browser.find_element_by_id('morefun_items')
assert items.is_displayed() is False
@screenshot_on_error
def test_google_items_are_visible_on_click(self):
self.browser.get(MAPS_URL)
morefun = self.browser.find_element_by_id('morefun')
morefun.click()
assert morefun.is_displayed() is True
items = self.browser.find_element_by_id('morefun_items')
assert items.is_displayed() is True
@screenshot_on_error
def test_clicking_doodle_item(self):
"Clicking on the doodle item should change the url to the doodles page"
self.browser.get(ZOOMED_IN_MAPS_URL)
time.sleep(5)
morefun = self.browser.find_element_by_id('morefun')
morefun.click()
items = self.browser.find_element_by_id('morefun_items')
li_items = items.find_elements_by_tag_name('li')
assert len(li_items) == 2
doodle = li_items[1]
doodle.click()
assert re.match(r'chrome-extension:\/\/[a-z]+\/pages\/doodles.html',
self.browser.current_url)
|
|
26df96a0c772c70013cc7a027022e84383ccaee2
|
utils/chunk-print-before-all.py
|
utils/chunk-print-before-all.py
|
#!/usr/bin/env python
# Given a -print-before-all -print-module-scope log from an opt invocation,
# chunk it into a series of individual IR files, one for each pass invocation.
# If the log ends with an obvious stack trace, try to split off a separate
# "crashinfo.txt" file leaving only the valid input IR in the last chunk.
# Files are written to current working directory.
import sys
basename = "chunk-"
chunk_id = 0
def print_chunk(lines):
global chunk_id
global basename
fname = basename + str(chunk_id) + ".ll"
chunk_id = chunk_id + 1
print "writing chunk " + fname + " (" + str(len(lines)) + " lines)"
with open(fname, "w") as f:
f.writelines(lines)
is_dump = False
cur = []
for line in sys.stdin:
if line.startswith("*** IR Dump Before ") and len(cur) != 0:
print_chunk(cur);
cur = []
cur.append("; " + line)
elif line.startswith("Stack dump:"):
print_chunk(cur);
cur = []
cur.append(line)
is_dump = True
else:
cur.append(line)
if is_dump:
print "writing crashinfo.txt (" + str(len(cur)) + " lines)"
with open("crashinfo.txt", "w") as f:
f.writelines(cur)
else:
print_chunk(cur);
|
Add a helper script for converting -print-before-all output into a file based equivelent
|
[Util] Add a helper script for converting -print-before-all output into a file based equivelent
Simple little utility which takes a opt logfile generated with "opt -print-before-all -print-module-scope -o /dev/null <args> 2&>1", and splits into a series of individual "chunk-X.ll" files. The intended purpose is to help automate one step in failure reduction.
The imagined workflow is:
New crasher bug reported against clang or other frontend
Frontend run with -emit-llvm equivalent and manually confirmed that opt -O2 <emit.ll> crashes
Run this splitter script
Manually map pass name to invocation command (next on the to automate list)
Run bugpoint on last chunk file + manual command
I chose to dump every chunk rather than only the last since miscompile debugging frequently requires either manual step by step reduction, or cross feeding IR into different compiler versions. Not an immediate target, but there may be applications.
Differential Revision: https://reviews.llvm.org/D63461
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@363884 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
llvm-mirror/llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,llvm-mirror/llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,llvm-mirror/llvm,llvm-mirror/llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm
|
[Util] Add a helper script for converting -print-before-all output into a file based equivelent
Simple little utility which takes a opt logfile generated with "opt -print-before-all -print-module-scope -o /dev/null <args> 2&>1", and splits into a series of individual "chunk-X.ll" files. The intended purpose is to help automate one step in failure reduction.
The imagined workflow is:
New crasher bug reported against clang or other frontend
Frontend run with -emit-llvm equivalent and manually confirmed that opt -O2 <emit.ll> crashes
Run this splitter script
Manually map pass name to invocation command (next on the to automate list)
Run bugpoint on last chunk file + manual command
I chose to dump every chunk rather than only the last since miscompile debugging frequently requires either manual step by step reduction, or cross feeding IR into different compiler versions. Not an immediate target, but there may be applications.
Differential Revision: https://reviews.llvm.org/D63461
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@363884 91177308-0d34-0410-b5e6-96231b3b80d8
|
#!/usr/bin/env python
# Given a -print-before-all -print-module-scope log from an opt invocation,
# chunk it into a series of individual IR files, one for each pass invocation.
# If the log ends with an obvious stack trace, try to split off a separate
# "crashinfo.txt" file leaving only the valid input IR in the last chunk.
# Files are written to current working directory.
import sys
basename = "chunk-"
chunk_id = 0
def print_chunk(lines):
global chunk_id
global basename
fname = basename + str(chunk_id) + ".ll"
chunk_id = chunk_id + 1
print "writing chunk " + fname + " (" + str(len(lines)) + " lines)"
with open(fname, "w") as f:
f.writelines(lines)
is_dump = False
cur = []
for line in sys.stdin:
if line.startswith("*** IR Dump Before ") and len(cur) != 0:
print_chunk(cur);
cur = []
cur.append("; " + line)
elif line.startswith("Stack dump:"):
print_chunk(cur);
cur = []
cur.append(line)
is_dump = True
else:
cur.append(line)
if is_dump:
print "writing crashinfo.txt (" + str(len(cur)) + " lines)"
with open("crashinfo.txt", "w") as f:
f.writelines(cur)
else:
print_chunk(cur);
|
<commit_before><commit_msg>[Util] Add a helper script for converting -print-before-all output into a file based equivelent
Simple little utility which takes a opt logfile generated with "opt -print-before-all -print-module-scope -o /dev/null <args> 2&>1", and splits into a series of individual "chunk-X.ll" files. The intended purpose is to help automate one step in failure reduction.
The imagined workflow is:
New crasher bug reported against clang or other frontend
Frontend run with -emit-llvm equivalent and manually confirmed that opt -O2 <emit.ll> crashes
Run this splitter script
Manually map pass name to invocation command (next on the to automate list)
Run bugpoint on last chunk file + manual command
I chose to dump every chunk rather than only the last since miscompile debugging frequently requires either manual step by step reduction, or cross feeding IR into different compiler versions. Not an immediate target, but there may be applications.
Differential Revision: https://reviews.llvm.org/D63461
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@363884 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
#!/usr/bin/env python
# Given a -print-before-all -print-module-scope log from an opt invocation,
# chunk it into a series of individual IR files, one for each pass invocation.
# If the log ends with an obvious stack trace, try to split off a separate
# "crashinfo.txt" file leaving only the valid input IR in the last chunk.
# Files are written to current working directory.
import sys
basename = "chunk-"
chunk_id = 0
def print_chunk(lines):
global chunk_id
global basename
fname = basename + str(chunk_id) + ".ll"
chunk_id = chunk_id + 1
print "writing chunk " + fname + " (" + str(len(lines)) + " lines)"
with open(fname, "w") as f:
f.writelines(lines)
is_dump = False
cur = []
for line in sys.stdin:
if line.startswith("*** IR Dump Before ") and len(cur) != 0:
print_chunk(cur);
cur = []
cur.append("; " + line)
elif line.startswith("Stack dump:"):
print_chunk(cur);
cur = []
cur.append(line)
is_dump = True
else:
cur.append(line)
if is_dump:
print "writing crashinfo.txt (" + str(len(cur)) + " lines)"
with open("crashinfo.txt", "w") as f:
f.writelines(cur)
else:
print_chunk(cur);
|
[Util] Add a helper script for converting -print-before-all output into a file based equivelent
Simple little utility which takes a opt logfile generated with "opt -print-before-all -print-module-scope -o /dev/null <args> 2&>1", and splits into a series of individual "chunk-X.ll" files. The intended purpose is to help automate one step in failure reduction.
The imagined workflow is:
New crasher bug reported against clang or other frontend
Frontend run with -emit-llvm equivalent and manually confirmed that opt -O2 <emit.ll> crashes
Run this splitter script
Manually map pass name to invocation command (next on the to automate list)
Run bugpoint on last chunk file + manual command
I chose to dump every chunk rather than only the last since miscompile debugging frequently requires either manual step by step reduction, or cross feeding IR into different compiler versions. Not an immediate target, but there may be applications.
Differential Revision: https://reviews.llvm.org/D63461
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@363884 91177308-0d34-0410-b5e6-96231b3b80d8#!/usr/bin/env python
# Given a -print-before-all -print-module-scope log from an opt invocation,
# chunk it into a series of individual IR files, one for each pass invocation.
# If the log ends with an obvious stack trace, try to split off a separate
# "crashinfo.txt" file leaving only the valid input IR in the last chunk.
# Files are written to current working directory.
import sys
basename = "chunk-"
chunk_id = 0
def print_chunk(lines):
global chunk_id
global basename
fname = basename + str(chunk_id) + ".ll"
chunk_id = chunk_id + 1
print "writing chunk " + fname + " (" + str(len(lines)) + " lines)"
with open(fname, "w") as f:
f.writelines(lines)
is_dump = False
cur = []
for line in sys.stdin:
if line.startswith("*** IR Dump Before ") and len(cur) != 0:
print_chunk(cur);
cur = []
cur.append("; " + line)
elif line.startswith("Stack dump:"):
print_chunk(cur);
cur = []
cur.append(line)
is_dump = True
else:
cur.append(line)
if is_dump:
print "writing crashinfo.txt (" + str(len(cur)) + " lines)"
with open("crashinfo.txt", "w") as f:
f.writelines(cur)
else:
print_chunk(cur);
|
<commit_before><commit_msg>[Util] Add a helper script for converting -print-before-all output into a file based equivelent
Simple little utility which takes a opt logfile generated with "opt -print-before-all -print-module-scope -o /dev/null <args> 2&>1", and splits into a series of individual "chunk-X.ll" files. The intended purpose is to help automate one step in failure reduction.
The imagined workflow is:
New crasher bug reported against clang or other frontend
Frontend run with -emit-llvm equivalent and manually confirmed that opt -O2 <emit.ll> crashes
Run this splitter script
Manually map pass name to invocation command (next on the to automate list)
Run bugpoint on last chunk file + manual command
I chose to dump every chunk rather than only the last since miscompile debugging frequently requires either manual step by step reduction, or cross feeding IR into different compiler versions. Not an immediate target, but there may be applications.
Differential Revision: https://reviews.llvm.org/D63461
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@363884 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>#!/usr/bin/env python
# Given a -print-before-all -print-module-scope log from an opt invocation,
# chunk it into a series of individual IR files, one for each pass invocation.
# If the log ends with an obvious stack trace, try to split off a separate
# "crashinfo.txt" file leaving only the valid input IR in the last chunk.
# Files are written to current working directory.
import sys
basename = "chunk-"
chunk_id = 0
def print_chunk(lines):
global chunk_id
global basename
fname = basename + str(chunk_id) + ".ll"
chunk_id = chunk_id + 1
print "writing chunk " + fname + " (" + str(len(lines)) + " lines)"
with open(fname, "w") as f:
f.writelines(lines)
is_dump = False
cur = []
for line in sys.stdin:
if line.startswith("*** IR Dump Before ") and len(cur) != 0:
print_chunk(cur);
cur = []
cur.append("; " + line)
elif line.startswith("Stack dump:"):
print_chunk(cur);
cur = []
cur.append(line)
is_dump = True
else:
cur.append(line)
if is_dump:
print "writing crashinfo.txt (" + str(len(cur)) + " lines)"
with open("crashinfo.txt", "w") as f:
f.writelines(cur)
else:
print_chunk(cur);
|
|
45e86e49e845ef25df6e1db3bcb336809ffb5f5f
|
ipv6_Checker.py
|
ipv6_Checker.py
|
#!/usr/bin/python
#Copyright 2014 Quam Sodji
import subprocess
def getinfo(hardware): #Return network info on select interface
info = subprocess.check_output(["networksetup", "-getinfo", hardware])
return info
wireless = ["Airport", "Wi-Fi"] #The two type of interfaces that refers to wireless
list_network = subprocess.check_output(["networksetup", "-listallnetworkservices"])
list_network = list_network.split('\n')
for device in wireless:
if device in list_network:
response = getinfo(device)
response_check = response.split("\n")
if "IPv6: Off" not in response_check:
check = subprocess.check_output(["networksetup", "-setv6off", device])
Status = "Off"
else:
for setting in response_check:
if setting.startswith("IPv6:"):
if setting != "IPv6: Off":
Status = setting
else:
Status = "Off"
else:
Status = "No wireless interfaces configured"
continue
print "<result>%s</result>"%Status
|
Disable IPv6 on wireless (Extension Attribute for Casper)
|
Disable IPv6 on wireless (Extension Attribute for Casper)
|
Python
|
mit
|
killahquam/JAMF,killahquam/JAMF
|
Disable IPv6 on wireless (Extension Attribute for Casper)
|
#!/usr/bin/python
#Copyright 2014 Quam Sodji
import subprocess
def getinfo(hardware): #Return network info on select interface
info = subprocess.check_output(["networksetup", "-getinfo", hardware])
return info
wireless = ["Airport", "Wi-Fi"] #The two type of interfaces that refers to wireless
list_network = subprocess.check_output(["networksetup", "-listallnetworkservices"])
list_network = list_network.split('\n')
for device in wireless:
if device in list_network:
response = getinfo(device)
response_check = response.split("\n")
if "IPv6: Off" not in response_check:
check = subprocess.check_output(["networksetup", "-setv6off", device])
Status = "Off"
else:
for setting in response_check:
if setting.startswith("IPv6:"):
if setting != "IPv6: Off":
Status = setting
else:
Status = "Off"
else:
Status = "No wireless interfaces configured"
continue
print "<result>%s</result>"%Status
|
<commit_before><commit_msg>Disable IPv6 on wireless (Extension Attribute for Casper)<commit_after>
|
#!/usr/bin/python
#Copyright 2014 Quam Sodji
import subprocess
def getinfo(hardware): #Return network info on select interface
info = subprocess.check_output(["networksetup", "-getinfo", hardware])
return info
wireless = ["Airport", "Wi-Fi"] #The two type of interfaces that refers to wireless
list_network = subprocess.check_output(["networksetup", "-listallnetworkservices"])
list_network = list_network.split('\n')
for device in wireless:
if device in list_network:
response = getinfo(device)
response_check = response.split("\n")
if "IPv6: Off" not in response_check:
check = subprocess.check_output(["networksetup", "-setv6off", device])
Status = "Off"
else:
for setting in response_check:
if setting.startswith("IPv6:"):
if setting != "IPv6: Off":
Status = setting
else:
Status = "Off"
else:
Status = "No wireless interfaces configured"
continue
print "<result>%s</result>"%Status
|
Disable IPv6 on wireless (Extension Attribute for Casper)#!/usr/bin/python
#Copyright 2014 Quam Sodji
import subprocess
def getinfo(hardware): #Return network info on select interface
info = subprocess.check_output(["networksetup", "-getinfo", hardware])
return info
wireless = ["Airport", "Wi-Fi"] #The two type of interfaces that refers to wireless
list_network = subprocess.check_output(["networksetup", "-listallnetworkservices"])
list_network = list_network.split('\n')
for device in wireless:
if device in list_network:
response = getinfo(device)
response_check = response.split("\n")
if "IPv6: Off" not in response_check:
check = subprocess.check_output(["networksetup", "-setv6off", device])
Status = "Off"
else:
for setting in response_check:
if setting.startswith("IPv6:"):
if setting != "IPv6: Off":
Status = setting
else:
Status = "Off"
else:
Status = "No wireless interfaces configured"
continue
print "<result>%s</result>"%Status
|
<commit_before><commit_msg>Disable IPv6 on wireless (Extension Attribute for Casper)<commit_after>#!/usr/bin/python
#Copyright 2014 Quam Sodji
import subprocess
def getinfo(hardware): #Return network info on select interface
info = subprocess.check_output(["networksetup", "-getinfo", hardware])
return info
wireless = ["Airport", "Wi-Fi"] #The two type of interfaces that refers to wireless
list_network = subprocess.check_output(["networksetup", "-listallnetworkservices"])
list_network = list_network.split('\n')
for device in wireless:
if device in list_network:
response = getinfo(device)
response_check = response.split("\n")
if "IPv6: Off" not in response_check:
check = subprocess.check_output(["networksetup", "-setv6off", device])
Status = "Off"
else:
for setting in response_check:
if setting.startswith("IPv6:"):
if setting != "IPv6: Off":
Status = setting
else:
Status = "Off"
else:
Status = "No wireless interfaces configured"
continue
print "<result>%s</result>"%Status
|
|
0aaed7764d743afe46af503fe5938fa718fe3abc
|
teamworkApp/lib/dbCalls.py
|
teamworkApp/lib/dbCalls.py
|
# muddersOnRails()
# Sara McAllister November 5, 2-17
# Last updated: 11-5-2017
# library for SQLite database calls for teamwork analysis app
import contextlib
import sqlite3
DB = 'db/development.sqlite3'
def connect(sqlite_file):
""" Make connection to an SQLite database file """
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
return conn, c
def close(conn):
""" Commit changes and close connection to the database """
conn.commit()
conn.close()
@contextlib.contextmanager
def dbconnect(sqlite_file=DB):
conn, cursor = connect(sqlite_file)
try:
yield cursor
finally:
close(conn)
def getAllStyles():
"""Get all style entries in db ordered based on entry in db"""
with dbconnect() as cursor:
scores = cursor.execute('SELECT * FROM styles').fetchall()
return scores
|
Set up contextmanager for db cals
|
Set up contextmanager for db cals
|
Python
|
mit
|
nathanljustin/teamwork-analysis,nathanljustin/teamwork-analysis,nathanljustin/teamwork-analysis,nathanljustin/teamwork-analysis
|
Set up contextmanager for db cals
|
# muddersOnRails()
# Sara McAllister November 5, 2-17
# Last updated: 11-5-2017
# library for SQLite database calls for teamwork analysis app
import contextlib
import sqlite3
DB = 'db/development.sqlite3'
def connect(sqlite_file):
""" Make connection to an SQLite database file """
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
return conn, c
def close(conn):
""" Commit changes and close connection to the database """
conn.commit()
conn.close()
@contextlib.contextmanager
def dbconnect(sqlite_file=DB):
conn, cursor = connect(sqlite_file)
try:
yield cursor
finally:
close(conn)
def getAllStyles():
"""Get all style entries in db ordered based on entry in db"""
with dbconnect() as cursor:
scores = cursor.execute('SELECT * FROM styles').fetchall()
return scores
|
<commit_before><commit_msg>Set up contextmanager for db cals<commit_after>
|
# muddersOnRails()
# Sara McAllister November 5, 2-17
# Last updated: 11-5-2017
# library for SQLite database calls for teamwork analysis app
import contextlib
import sqlite3
DB = 'db/development.sqlite3'
def connect(sqlite_file):
""" Make connection to an SQLite database file """
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
return conn, c
def close(conn):
""" Commit changes and close connection to the database """
conn.commit()
conn.close()
@contextlib.contextmanager
def dbconnect(sqlite_file=DB):
conn, cursor = connect(sqlite_file)
try:
yield cursor
finally:
close(conn)
def getAllStyles():
"""Get all style entries in db ordered based on entry in db"""
with dbconnect() as cursor:
scores = cursor.execute('SELECT * FROM styles').fetchall()
return scores
|
Set up contextmanager for db cals# muddersOnRails()
# Sara McAllister November 5, 2-17
# Last updated: 11-5-2017
# library for SQLite database calls for teamwork analysis app
import contextlib
import sqlite3
DB = 'db/development.sqlite3'
def connect(sqlite_file):
""" Make connection to an SQLite database file """
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
return conn, c
def close(conn):
""" Commit changes and close connection to the database """
conn.commit()
conn.close()
@contextlib.contextmanager
def dbconnect(sqlite_file=DB):
conn, cursor = connect(sqlite_file)
try:
yield cursor
finally:
close(conn)
def getAllStyles():
"""Get all style entries in db ordered based on entry in db"""
with dbconnect() as cursor:
scores = cursor.execute('SELECT * FROM styles').fetchall()
return scores
|
<commit_before><commit_msg>Set up contextmanager for db cals<commit_after># muddersOnRails()
# Sara McAllister November 5, 2-17
# Last updated: 11-5-2017
# library for SQLite database calls for teamwork analysis app
import contextlib
import sqlite3
DB = 'db/development.sqlite3'
def connect(sqlite_file):
""" Make connection to an SQLite database file """
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
return conn, c
def close(conn):
""" Commit changes and close connection to the database """
conn.commit()
conn.close()
@contextlib.contextmanager
def dbconnect(sqlite_file=DB):
conn, cursor = connect(sqlite_file)
try:
yield cursor
finally:
close(conn)
def getAllStyles():
"""Get all style entries in db ordered based on entry in db"""
with dbconnect() as cursor:
scores = cursor.execute('SELECT * FROM styles').fetchall()
return scores
|
|
dac5f9e406f3c205d6ed212d4414ca55c94b8f15
|
tests/local/test_search.py
|
tests/local/test_search.py
|
from __future__ import unicode_literals
import unittest
from mopidy.local import search
from mopidy.models import Album, Track
class LocalLibrarySearchTest(unittest.TestCase):
def test_find_exact_with_album_query(self):
expected_tracks = [Track(album=Album(name='foo'))]
tracks = [Track(), Track(album=Album(name='bar'))] + expected_tracks
search_result = search.find_exact(tracks, {'album': ['foo']})
self.assertEqual(search_result.tracks, tuple(expected_tracks))
|
Add test for exact search with album query
|
Add test for exact search with album query
|
Python
|
apache-2.0
|
pacificIT/mopidy,hkariti/mopidy,bencevans/mopidy,jmarsik/mopidy,swak/mopidy,swak/mopidy,diandiankan/mopidy,vrs01/mopidy,dbrgn/mopidy,jodal/mopidy,tkem/mopidy,quartz55/mopidy,hkariti/mopidy,ali/mopidy,swak/mopidy,mokieyue/mopidy,mokieyue/mopidy,swak/mopidy,dbrgn/mopidy,SuperStarPL/mopidy,kingosticks/mopidy,tkem/mopidy,glogiotatidis/mopidy,SuperStarPL/mopidy,bacontext/mopidy,dbrgn/mopidy,kingosticks/mopidy,hkariti/mopidy,pacificIT/mopidy,rawdlite/mopidy,jcass77/mopidy,jodal/mopidy,kingosticks/mopidy,quartz55/mopidy,pacificIT/mopidy,jcass77/mopidy,diandiankan/mopidy,SuperStarPL/mopidy,ZenithDK/mopidy,mokieyue/mopidy,bencevans/mopidy,mopidy/mopidy,glogiotatidis/mopidy,ZenithDK/mopidy,adamcik/mopidy,vrs01/mopidy,jodal/mopidy,ali/mopidy,vrs01/mopidy,ali/mopidy,mopidy/mopidy,quartz55/mopidy,rawdlite/mopidy,bacontext/mopidy,bacontext/mopidy,diandiankan/mopidy,jmarsik/mopidy,dbrgn/mopidy,tkem/mopidy,jmarsik/mopidy,pacificIT/mopidy,mokieyue/mopidy,SuperStarPL/mopidy,bacontext/mopidy,diandiankan/mopidy,adamcik/mopidy,quartz55/mopidy,tkem/mopidy,vrs01/mopidy,ZenithDK/mopidy,glogiotatidis/mopidy,rawdlite/mopidy,ali/mopidy,hkariti/mopidy,jmarsik/mopidy,bencevans/mopidy,ZenithDK/mopidy,jcass77/mopidy,mopidy/mopidy,rawdlite/mopidy,glogiotatidis/mopidy,bencevans/mopidy,adamcik/mopidy
|
Add test for exact search with album query
|
from __future__ import unicode_literals
import unittest
from mopidy.local import search
from mopidy.models import Album, Track
class LocalLibrarySearchTest(unittest.TestCase):
def test_find_exact_with_album_query(self):
expected_tracks = [Track(album=Album(name='foo'))]
tracks = [Track(), Track(album=Album(name='bar'))] + expected_tracks
search_result = search.find_exact(tracks, {'album': ['foo']})
self.assertEqual(search_result.tracks, tuple(expected_tracks))
|
<commit_before><commit_msg>Add test for exact search with album query<commit_after>
|
from __future__ import unicode_literals
import unittest
from mopidy.local import search
from mopidy.models import Album, Track
class LocalLibrarySearchTest(unittest.TestCase):
def test_find_exact_with_album_query(self):
expected_tracks = [Track(album=Album(name='foo'))]
tracks = [Track(), Track(album=Album(name='bar'))] + expected_tracks
search_result = search.find_exact(tracks, {'album': ['foo']})
self.assertEqual(search_result.tracks, tuple(expected_tracks))
|
Add test for exact search with album queryfrom __future__ import unicode_literals
import unittest
from mopidy.local import search
from mopidy.models import Album, Track
class LocalLibrarySearchTest(unittest.TestCase):
def test_find_exact_with_album_query(self):
expected_tracks = [Track(album=Album(name='foo'))]
tracks = [Track(), Track(album=Album(name='bar'))] + expected_tracks
search_result = search.find_exact(tracks, {'album': ['foo']})
self.assertEqual(search_result.tracks, tuple(expected_tracks))
|
<commit_before><commit_msg>Add test for exact search with album query<commit_after>from __future__ import unicode_literals
import unittest
from mopidy.local import search
from mopidy.models import Album, Track
class LocalLibrarySearchTest(unittest.TestCase):
def test_find_exact_with_album_query(self):
expected_tracks = [Track(album=Album(name='foo'))]
tracks = [Track(), Track(album=Album(name='bar'))] + expected_tracks
search_result = search.find_exact(tracks, {'album': ['foo']})
self.assertEqual(search_result.tracks, tuple(expected_tracks))
|
|
0ca298f6706706637dccd4f27c56eed6e91c98ba
|
tests/runners.py
|
tests/runners.py
|
from spec import Spec
class Runner_(Spec):
class run:
def out_stream_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
def err_stream_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
def out_stream_can_be_overridden(self):
"out_stream can be overridden"
def err_stream_can_be_overridden(self):
"err_stream can be overridden"
|
import sys
from spec import Spec, trap, eq_
from invoke import Local, Context
from _utils import mock_subprocess
class Local_(Spec):
class run:
@trap
@mock_subprocess(out="sup")
def out_stream_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
Local(Context()).run("command")
eq_(sys.stdout.getvalue(), "sup")
@trap
@mock_subprocess(err="sup")
def err_stream_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
Local(Context()).run("command")
eq_(sys.stderr.getvalue(), "sup")
def out_stream_can_be_overridden(self):
"out_stream can be overridden"
def err_stream_can_be_overridden(self):
"err_stream can be overridden"
|
Rename new test class correctly and flesh out first passing tests
|
Rename new test class correctly and flesh out first passing tests
|
Python
|
bsd-2-clause
|
mattrobenolt/invoke,pyinvoke/invoke,mattrobenolt/invoke,pyinvoke/invoke,kejbaly2/invoke,frol/invoke,tyewang/invoke,pfmoore/invoke,mkusz/invoke,mkusz/invoke,pfmoore/invoke,singingwolfboy/invoke,kejbaly2/invoke,frol/invoke
|
from spec import Spec
class Runner_(Spec):
class run:
def out_stream_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
def err_stream_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
def out_stream_can_be_overridden(self):
"out_stream can be overridden"
def err_stream_can_be_overridden(self):
"err_stream can be overridden"
Rename new test class correctly and flesh out first passing tests
|
import sys
from spec import Spec, trap, eq_
from invoke import Local, Context
from _utils import mock_subprocess
class Local_(Spec):
class run:
@trap
@mock_subprocess(out="sup")
def out_stream_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
Local(Context()).run("command")
eq_(sys.stdout.getvalue(), "sup")
@trap
@mock_subprocess(err="sup")
def err_stream_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
Local(Context()).run("command")
eq_(sys.stderr.getvalue(), "sup")
def out_stream_can_be_overridden(self):
"out_stream can be overridden"
def err_stream_can_be_overridden(self):
"err_stream can be overridden"
|
<commit_before>from spec import Spec
class Runner_(Spec):
class run:
def out_stream_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
def err_stream_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
def out_stream_can_be_overridden(self):
"out_stream can be overridden"
def err_stream_can_be_overridden(self):
"err_stream can be overridden"
<commit_msg>Rename new test class correctly and flesh out first passing tests<commit_after>
|
import sys
from spec import Spec, trap, eq_
from invoke import Local, Context
from _utils import mock_subprocess
class Local_(Spec):
class run:
@trap
@mock_subprocess(out="sup")
def out_stream_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
Local(Context()).run("command")
eq_(sys.stdout.getvalue(), "sup")
@trap
@mock_subprocess(err="sup")
def err_stream_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
Local(Context()).run("command")
eq_(sys.stderr.getvalue(), "sup")
def out_stream_can_be_overridden(self):
"out_stream can be overridden"
def err_stream_can_be_overridden(self):
"err_stream can be overridden"
|
from spec import Spec
class Runner_(Spec):
class run:
def out_stream_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
def err_stream_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
def out_stream_can_be_overridden(self):
"out_stream can be overridden"
def err_stream_can_be_overridden(self):
"err_stream can be overridden"
Rename new test class correctly and flesh out first passing testsimport sys
from spec import Spec, trap, eq_
from invoke import Local, Context
from _utils import mock_subprocess
class Local_(Spec):
class run:
@trap
@mock_subprocess(out="sup")
def out_stream_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
Local(Context()).run("command")
eq_(sys.stdout.getvalue(), "sup")
@trap
@mock_subprocess(err="sup")
def err_stream_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
Local(Context()).run("command")
eq_(sys.stderr.getvalue(), "sup")
def out_stream_can_be_overridden(self):
"out_stream can be overridden"
def err_stream_can_be_overridden(self):
"err_stream can be overridden"
|
<commit_before>from spec import Spec
class Runner_(Spec):
class run:
def out_stream_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
def err_stream_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
def out_stream_can_be_overridden(self):
"out_stream can be overridden"
def err_stream_can_be_overridden(self):
"err_stream can be overridden"
<commit_msg>Rename new test class correctly and flesh out first passing tests<commit_after>import sys
from spec import Spec, trap, eq_
from invoke import Local, Context
from _utils import mock_subprocess
class Local_(Spec):
class run:
@trap
@mock_subprocess(out="sup")
def out_stream_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
Local(Context()).run("command")
eq_(sys.stdout.getvalue(), "sup")
@trap
@mock_subprocess(err="sup")
def err_stream_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
Local(Context()).run("command")
eq_(sys.stderr.getvalue(), "sup")
def out_stream_can_be_overridden(self):
"out_stream can be overridden"
def err_stream_can_be_overridden(self):
"err_stream can be overridden"
|
4ebfc2e6ffb21fd55ef1fc4f1fd836153b2da545
|
tests/unit/test_exceptions.py
|
tests/unit/test_exceptions.py
|
# coding: utf-8
import pytest
import responses
import kiteconnect.exceptions as ex
@responses.activate
def test_wrong_json_response(kiteconnect):
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body="{a:b}",
content_type="application/json"
)
with pytest.raises(ex.DataException) as exc:
positions = kiteconnect.positions()
assert exc.message == "Couldn't parse the JSON response "\
"received from the server: {a:b}"
@responses.activate
def test_wrong_content_type(kiteconnect):
rdf_data = "<rdf:Description rdf:about=''><rdfs:label>zerodha</rdfs:label></rdf:Description"
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body=rdf_data,
content_type="application/rdf+xml"
)
with pytest.raises(ex.DataException) as exc:
positions = kiteconnect.positions()
assert exc.message == "Unknown Content-Type ({content_type}) with response: ({content})".format(
content_type='application/rdf+xml',
content=rdf_data
)
@pytest.mark.parametrize("error_type,message", [
('PermissionException', 'oops! permission issue'),
('OrderException', 'oops! cannot place order'),
('InputException', 'missing or invalid params'),
('NetworkException', 'oopsy doopsy network issues damn!'),
('CustomException', 'this is an exception i just created')
])
@responses.activate
def test_native_exceptions(error_type, message, kiteconnect):
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body='{"error_type": "%s", "message": "%s"}' % (error_type, message),
content_type="application/json"
)
with pytest.raises(getattr(ex, error_type, ex.GeneralException)) as exc:
positions = kiteconnect.positions()
assert exc.message == message
|
Add tests for all exceptions
|
Add tests for all exceptions
|
Python
|
mit
|
rainmattertech/pykiteconnect
|
Add tests for all exceptions
|
# coding: utf-8
import pytest
import responses
import kiteconnect.exceptions as ex
@responses.activate
def test_wrong_json_response(kiteconnect):
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body="{a:b}",
content_type="application/json"
)
with pytest.raises(ex.DataException) as exc:
positions = kiteconnect.positions()
assert exc.message == "Couldn't parse the JSON response "\
"received from the server: {a:b}"
@responses.activate
def test_wrong_content_type(kiteconnect):
rdf_data = "<rdf:Description rdf:about=''><rdfs:label>zerodha</rdfs:label></rdf:Description"
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body=rdf_data,
content_type="application/rdf+xml"
)
with pytest.raises(ex.DataException) as exc:
positions = kiteconnect.positions()
assert exc.message == "Unknown Content-Type ({content_type}) with response: ({content})".format(
content_type='application/rdf+xml',
content=rdf_data
)
@pytest.mark.parametrize("error_type,message", [
('PermissionException', 'oops! permission issue'),
('OrderException', 'oops! cannot place order'),
('InputException', 'missing or invalid params'),
('NetworkException', 'oopsy doopsy network issues damn!'),
('CustomException', 'this is an exception i just created')
])
@responses.activate
def test_native_exceptions(error_type, message, kiteconnect):
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body='{"error_type": "%s", "message": "%s"}' % (error_type, message),
content_type="application/json"
)
with pytest.raises(getattr(ex, error_type, ex.GeneralException)) as exc:
positions = kiteconnect.positions()
assert exc.message == message
|
<commit_before><commit_msg>Add tests for all exceptions<commit_after>
|
# coding: utf-8
import pytest
import responses
import kiteconnect.exceptions as ex
@responses.activate
def test_wrong_json_response(kiteconnect):
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body="{a:b}",
content_type="application/json"
)
with pytest.raises(ex.DataException) as exc:
positions = kiteconnect.positions()
assert exc.message == "Couldn't parse the JSON response "\
"received from the server: {a:b}"
@responses.activate
def test_wrong_content_type(kiteconnect):
rdf_data = "<rdf:Description rdf:about=''><rdfs:label>zerodha</rdfs:label></rdf:Description"
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body=rdf_data,
content_type="application/rdf+xml"
)
with pytest.raises(ex.DataException) as exc:
positions = kiteconnect.positions()
assert exc.message == "Unknown Content-Type ({content_type}) with response: ({content})".format(
content_type='application/rdf+xml',
content=rdf_data
)
@pytest.mark.parametrize("error_type,message", [
('PermissionException', 'oops! permission issue'),
('OrderException', 'oops! cannot place order'),
('InputException', 'missing or invalid params'),
('NetworkException', 'oopsy doopsy network issues damn!'),
('CustomException', 'this is an exception i just created')
])
@responses.activate
def test_native_exceptions(error_type, message, kiteconnect):
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body='{"error_type": "%s", "message": "%s"}' % (error_type, message),
content_type="application/json"
)
with pytest.raises(getattr(ex, error_type, ex.GeneralException)) as exc:
positions = kiteconnect.positions()
assert exc.message == message
|
Add tests for all exceptions# coding: utf-8
import pytest
import responses
import kiteconnect.exceptions as ex
@responses.activate
def test_wrong_json_response(kiteconnect):
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body="{a:b}",
content_type="application/json"
)
with pytest.raises(ex.DataException) as exc:
positions = kiteconnect.positions()
assert exc.message == "Couldn't parse the JSON response "\
"received from the server: {a:b}"
@responses.activate
def test_wrong_content_type(kiteconnect):
rdf_data = "<rdf:Description rdf:about=''><rdfs:label>zerodha</rdfs:label></rdf:Description"
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body=rdf_data,
content_type="application/rdf+xml"
)
with pytest.raises(ex.DataException) as exc:
positions = kiteconnect.positions()
assert exc.message == "Unknown Content-Type ({content_type}) with response: ({content})".format(
content_type='application/rdf+xml',
content=rdf_data
)
@pytest.mark.parametrize("error_type,message", [
('PermissionException', 'oops! permission issue'),
('OrderException', 'oops! cannot place order'),
('InputException', 'missing or invalid params'),
('NetworkException', 'oopsy doopsy network issues damn!'),
('CustomException', 'this is an exception i just created')
])
@responses.activate
def test_native_exceptions(error_type, message, kiteconnect):
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body='{"error_type": "%s", "message": "%s"}' % (error_type, message),
content_type="application/json"
)
with pytest.raises(getattr(ex, error_type, ex.GeneralException)) as exc:
positions = kiteconnect.positions()
assert exc.message == message
|
<commit_before><commit_msg>Add tests for all exceptions<commit_after># coding: utf-8
import pytest
import responses
import kiteconnect.exceptions as ex
@responses.activate
def test_wrong_json_response(kiteconnect):
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body="{a:b}",
content_type="application/json"
)
with pytest.raises(ex.DataException) as exc:
positions = kiteconnect.positions()
assert exc.message == "Couldn't parse the JSON response "\
"received from the server: {a:b}"
@responses.activate
def test_wrong_content_type(kiteconnect):
rdf_data = "<rdf:Description rdf:about=''><rdfs:label>zerodha</rdfs:label></rdf:Description"
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body=rdf_data,
content_type="application/rdf+xml"
)
with pytest.raises(ex.DataException) as exc:
positions = kiteconnect.positions()
assert exc.message == "Unknown Content-Type ({content_type}) with response: ({content})".format(
content_type='application/rdf+xml',
content=rdf_data
)
@pytest.mark.parametrize("error_type,message", [
('PermissionException', 'oops! permission issue'),
('OrderException', 'oops! cannot place order'),
('InputException', 'missing or invalid params'),
('NetworkException', 'oopsy doopsy network issues damn!'),
('CustomException', 'this is an exception i just created')
])
@responses.activate
def test_native_exceptions(error_type, message, kiteconnect):
responses.add(
responses.GET,
"%s%s" % (kiteconnect.root, kiteconnect._routes["portfolio.positions"]),
body='{"error_type": "%s", "message": "%s"}' % (error_type, message),
content_type="application/json"
)
with pytest.raises(getattr(ex, error_type, ex.GeneralException)) as exc:
positions = kiteconnect.positions()
assert exc.message == message
|
|
f131cd221b2ce6fc144b2aa9882cb0ad1b116675
|
tests/views/test_dashboard.py
|
tests/views/test_dashboard.py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dashboard view.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import httplib
from tests.profile_utils import GSoCProfileHelper
from tests.test_utils import DjangoTestCase
# TODO: perhaps we should move this out?
from soc.modules.seeder.logic.seeder import logic as seeder_logic
class DashboardTest(DjangoTestCase):
"""Tests dashboard page.
"""
def setUp(self):
from soc.modules.gsoc.models.program import GSoCProgram
self.gsoc = seeder_logic.seed(GSoCProgram)
self.data = GSoCProfileHelper(self.gsoc)
def assertDashboardTemplatesUsed(self, response):
"""Asserts that all the templates from the dashboard were used.
"""
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gsoc/dashboard/base.html')
def testDasbhoardNoRole(self):
url = '/gsoc/dashboard/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertDashboardTemplatesUsed(response)
def testDashboardWithProfile(self):
self.data.createProfile()
url = '/gsoc/dashboard/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertDashboardTemplatesUsed(response)
|
Add (failing) tests for the dashboard
|
Add (failing) tests for the dashboard
|
Python
|
apache-2.0
|
SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange
|
Add (failing) tests for the dashboard
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dashboard view.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import httplib
from tests.profile_utils import GSoCProfileHelper
from tests.test_utils import DjangoTestCase
# TODO: perhaps we should move this out?
from soc.modules.seeder.logic.seeder import logic as seeder_logic
class DashboardTest(DjangoTestCase):
"""Tests dashboard page.
"""
def setUp(self):
from soc.modules.gsoc.models.program import GSoCProgram
self.gsoc = seeder_logic.seed(GSoCProgram)
self.data = GSoCProfileHelper(self.gsoc)
def assertDashboardTemplatesUsed(self, response):
"""Asserts that all the templates from the dashboard were used.
"""
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gsoc/dashboard/base.html')
def testDasbhoardNoRole(self):
url = '/gsoc/dashboard/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertDashboardTemplatesUsed(response)
def testDashboardWithProfile(self):
self.data.createProfile()
url = '/gsoc/dashboard/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertDashboardTemplatesUsed(response)
|
<commit_before><commit_msg>Add (failing) tests for the dashboard<commit_after>
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dashboard view.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import httplib
from tests.profile_utils import GSoCProfileHelper
from tests.test_utils import DjangoTestCase
# TODO: perhaps we should move this out?
from soc.modules.seeder.logic.seeder import logic as seeder_logic
class DashboardTest(DjangoTestCase):
"""Tests dashboard page.
"""
def setUp(self):
from soc.modules.gsoc.models.program import GSoCProgram
self.gsoc = seeder_logic.seed(GSoCProgram)
self.data = GSoCProfileHelper(self.gsoc)
def assertDashboardTemplatesUsed(self, response):
"""Asserts that all the templates from the dashboard were used.
"""
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gsoc/dashboard/base.html')
def testDasbhoardNoRole(self):
url = '/gsoc/dashboard/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertDashboardTemplatesUsed(response)
def testDashboardWithProfile(self):
self.data.createProfile()
url = '/gsoc/dashboard/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertDashboardTemplatesUsed(response)
|
Add (failing) tests for the dashboard#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dashboard view.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import httplib
from tests.profile_utils import GSoCProfileHelper
from tests.test_utils import DjangoTestCase
# TODO: perhaps we should move this out?
from soc.modules.seeder.logic.seeder import logic as seeder_logic
class DashboardTest(DjangoTestCase):
"""Tests dashboard page.
"""
def setUp(self):
from soc.modules.gsoc.models.program import GSoCProgram
self.gsoc = seeder_logic.seed(GSoCProgram)
self.data = GSoCProfileHelper(self.gsoc)
def assertDashboardTemplatesUsed(self, response):
"""Asserts that all the templates from the dashboard were used.
"""
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gsoc/dashboard/base.html')
def testDasbhoardNoRole(self):
url = '/gsoc/dashboard/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertDashboardTemplatesUsed(response)
def testDashboardWithProfile(self):
self.data.createProfile()
url = '/gsoc/dashboard/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertDashboardTemplatesUsed(response)
|
<commit_before><commit_msg>Add (failing) tests for the dashboard<commit_after>#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dashboard view.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import httplib
from tests.profile_utils import GSoCProfileHelper
from tests.test_utils import DjangoTestCase
# TODO: perhaps we should move this out?
from soc.modules.seeder.logic.seeder import logic as seeder_logic
class DashboardTest(DjangoTestCase):
"""Tests dashboard page.
"""
def setUp(self):
from soc.modules.gsoc.models.program import GSoCProgram
self.gsoc = seeder_logic.seed(GSoCProgram)
self.data = GSoCProfileHelper(self.gsoc)
def assertDashboardTemplatesUsed(self, response):
"""Asserts that all the templates from the dashboard were used.
"""
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gsoc/dashboard/base.html')
def testDasbhoardNoRole(self):
url = '/gsoc/dashboard/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertDashboardTemplatesUsed(response)
def testDashboardWithProfile(self):
self.data.createProfile()
url = '/gsoc/dashboard/' + self.gsoc.key().name()
response = self.client.get(url)
self.assertDashboardTemplatesUsed(response)
|
|
226c2f36b9cc8257ce99bd15648be4aba2ccb606
|
cla_public/apps/checker/utils.py
|
cla_public/apps/checker/utils.py
|
from cla_public.apps.checker.constants import PASSPORTED_BENEFITS, \
NASS_BENEFITS
def passported(benefits):
return bool(set(benefits).intersection(PASSPORTED_BENEFITS))
def nass(benefits):
return bool(set(benefits).intersection(NASS_BENEFITS))
|
Move utility functions for checking passported benefits into separate module
|
Move utility functions for checking passported benefits into separate module
|
Python
|
mit
|
ministryofjustice/cla_public,ministryofjustice/cla_public,ministryofjustice/cla_public,ministryofjustice/cla_public
|
Move utility functions for checking passported benefits into separate module
|
from cla_public.apps.checker.constants import PASSPORTED_BENEFITS, \
NASS_BENEFITS
def passported(benefits):
return bool(set(benefits).intersection(PASSPORTED_BENEFITS))
def nass(benefits):
return bool(set(benefits).intersection(NASS_BENEFITS))
|
<commit_before><commit_msg>Move utility functions for checking passported benefits into separate module<commit_after>
|
from cla_public.apps.checker.constants import PASSPORTED_BENEFITS, \
NASS_BENEFITS
def passported(benefits):
return bool(set(benefits).intersection(PASSPORTED_BENEFITS))
def nass(benefits):
return bool(set(benefits).intersection(NASS_BENEFITS))
|
Move utility functions for checking passported benefits into separate modulefrom cla_public.apps.checker.constants import PASSPORTED_BENEFITS, \
NASS_BENEFITS
def passported(benefits):
return bool(set(benefits).intersection(PASSPORTED_BENEFITS))
def nass(benefits):
return bool(set(benefits).intersection(NASS_BENEFITS))
|
<commit_before><commit_msg>Move utility functions for checking passported benefits into separate module<commit_after>from cla_public.apps.checker.constants import PASSPORTED_BENEFITS, \
NASS_BENEFITS
def passported(benefits):
return bool(set(benefits).intersection(PASSPORTED_BENEFITS))
def nass(benefits):
return bool(set(benefits).intersection(NASS_BENEFITS))
|
|
b2dd561322f6f277f470eae425028412a209da93
|
morenines/repository.py
|
morenines/repository.py
|
import os
from morenines import output
from morenines import util
from morenines.index import Index
from morenines.ignores import Ignores
NAMES = {
'repo_dir': '.morenines',
'index': 'index',
'ignore': 'ignore',
}
class Repository(object):
def __init__(self):
self.path = None
self.index = None
self.ignores = None
def open(self, path):
repo_dir_path = find_repo(path)
if not repo_dir_path:
output.error("Cannot find repository in '{}' or any parent dir".format(path))
util.abort()
self.path = repo_dir_path
self.index = Index.read(os.path.join(self.path, NAMES['index']))
self.ignores = Ignores.read(os.path.join(self.path, NAMES['ignore']))
def find_repo(start_path):
if start_path == '/':
return None
path = os.path.join(start_path, NAMES['repo_dir'])
if os.path.isdir(path):
return path
parent = os.path.split(start_path)[0]
return find_repo(parent)
|
Add Repository class and module
|
Add Repository class and module
|
Python
|
mit
|
mcgid/morenines,mcgid/morenines
|
Add Repository class and module
|
import os
from morenines import output
from morenines import util
from morenines.index import Index
from morenines.ignores import Ignores
NAMES = {
'repo_dir': '.morenines',
'index': 'index',
'ignore': 'ignore',
}
class Repository(object):
def __init__(self):
self.path = None
self.index = None
self.ignores = None
def open(self, path):
repo_dir_path = find_repo(path)
if not repo_dir_path:
output.error("Cannot find repository in '{}' or any parent dir".format(path))
util.abort()
self.path = repo_dir_path
self.index = Index.read(os.path.join(self.path, NAMES['index']))
self.ignores = Ignores.read(os.path.join(self.path, NAMES['ignore']))
def find_repo(start_path):
if start_path == '/':
return None
path = os.path.join(start_path, NAMES['repo_dir'])
if os.path.isdir(path):
return path
parent = os.path.split(start_path)[0]
return find_repo(parent)
|
<commit_before><commit_msg>Add Repository class and module<commit_after>
|
import os
from morenines import output
from morenines import util
from morenines.index import Index
from morenines.ignores import Ignores
NAMES = {
'repo_dir': '.morenines',
'index': 'index',
'ignore': 'ignore',
}
class Repository(object):
def __init__(self):
self.path = None
self.index = None
self.ignores = None
def open(self, path):
repo_dir_path = find_repo(path)
if not repo_dir_path:
output.error("Cannot find repository in '{}' or any parent dir".format(path))
util.abort()
self.path = repo_dir_path
self.index = Index.read(os.path.join(self.path, NAMES['index']))
self.ignores = Ignores.read(os.path.join(self.path, NAMES['ignore']))
def find_repo(start_path):
if start_path == '/':
return None
path = os.path.join(start_path, NAMES['repo_dir'])
if os.path.isdir(path):
return path
parent = os.path.split(start_path)[0]
return find_repo(parent)
|
Add Repository class and moduleimport os
from morenines import output
from morenines import util
from morenines.index import Index
from morenines.ignores import Ignores
NAMES = {
'repo_dir': '.morenines',
'index': 'index',
'ignore': 'ignore',
}
class Repository(object):
def __init__(self):
self.path = None
self.index = None
self.ignores = None
def open(self, path):
repo_dir_path = find_repo(path)
if not repo_dir_path:
output.error("Cannot find repository in '{}' or any parent dir".format(path))
util.abort()
self.path = repo_dir_path
self.index = Index.read(os.path.join(self.path, NAMES['index']))
self.ignores = Ignores.read(os.path.join(self.path, NAMES['ignore']))
def find_repo(start_path):
if start_path == '/':
return None
path = os.path.join(start_path, NAMES['repo_dir'])
if os.path.isdir(path):
return path
parent = os.path.split(start_path)[0]
return find_repo(parent)
|
<commit_before><commit_msg>Add Repository class and module<commit_after>import os
from morenines import output
from morenines import util
from morenines.index import Index
from morenines.ignores import Ignores
NAMES = {
'repo_dir': '.morenines',
'index': 'index',
'ignore': 'ignore',
}
class Repository(object):
def __init__(self):
self.path = None
self.index = None
self.ignores = None
def open(self, path):
repo_dir_path = find_repo(path)
if not repo_dir_path:
output.error("Cannot find repository in '{}' or any parent dir".format(path))
util.abort()
self.path = repo_dir_path
self.index = Index.read(os.path.join(self.path, NAMES['index']))
self.ignores = Ignores.read(os.path.join(self.path, NAMES['ignore']))
def find_repo(start_path):
if start_path == '/':
return None
path = os.path.join(start_path, NAMES['repo_dir'])
if os.path.isdir(path):
return path
parent = os.path.split(start_path)[0]
return find_repo(parent)
|
|
af2a0a851be91931f96a7e9d44a1e8c460d70918
|
web/main/migrations/0052_migrate_casebook_dates.py
|
web/main/migrations/0052_migrate_casebook_dates.py
|
# Generated by Django 2.2.10 on 2020-04-14 11:38
from django.db import migrations
from main.models import Casebook
def copy_old_dates(app, schema):
update_cbs = []
for casebook in Casebook.objects.select_related('old_casebook').all():
if casebook.old_casebook:
casebook.created_at = casebook.old_casebook.created_at
update_cbs.append(casebook)
Casebook.objects.bulk_update(update_cbs, ['created_at'])
class Migration(migrations.Migration):
dependencies = [
('main', '0051_auto_20200407_1714'),
]
operations = [
migrations.RunPython(copy_old_dates, migrations.RunPython.noop)
]
|
Migrate creation date to new casebooks
|
Migrate creation date to new casebooks
|
Python
|
agpl-3.0
|
harvard-lil/h2o,harvard-lil/h2o,harvard-lil/h2o,harvard-lil/h2o
|
Migrate creation date to new casebooks
|
# Generated by Django 2.2.10 on 2020-04-14 11:38
from django.db import migrations
from main.models import Casebook
def copy_old_dates(app, schema):
update_cbs = []
for casebook in Casebook.objects.select_related('old_casebook').all():
if casebook.old_casebook:
casebook.created_at = casebook.old_casebook.created_at
update_cbs.append(casebook)
Casebook.objects.bulk_update(update_cbs, ['created_at'])
class Migration(migrations.Migration):
dependencies = [
('main', '0051_auto_20200407_1714'),
]
operations = [
migrations.RunPython(copy_old_dates, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Migrate creation date to new casebooks<commit_after>
|
# Generated by Django 2.2.10 on 2020-04-14 11:38
from django.db import migrations
from main.models import Casebook
def copy_old_dates(app, schema):
update_cbs = []
for casebook in Casebook.objects.select_related('old_casebook').all():
if casebook.old_casebook:
casebook.created_at = casebook.old_casebook.created_at
update_cbs.append(casebook)
Casebook.objects.bulk_update(update_cbs, ['created_at'])
class Migration(migrations.Migration):
dependencies = [
('main', '0051_auto_20200407_1714'),
]
operations = [
migrations.RunPython(copy_old_dates, migrations.RunPython.noop)
]
|
Migrate creation date to new casebooks# Generated by Django 2.2.10 on 2020-04-14 11:38
from django.db import migrations
from main.models import Casebook
def copy_old_dates(app, schema):
update_cbs = []
for casebook in Casebook.objects.select_related('old_casebook').all():
if casebook.old_casebook:
casebook.created_at = casebook.old_casebook.created_at
update_cbs.append(casebook)
Casebook.objects.bulk_update(update_cbs, ['created_at'])
class Migration(migrations.Migration):
dependencies = [
('main', '0051_auto_20200407_1714'),
]
operations = [
migrations.RunPython(copy_old_dates, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Migrate creation date to new casebooks<commit_after># Generated by Django 2.2.10 on 2020-04-14 11:38
from django.db import migrations
from main.models import Casebook
def copy_old_dates(app, schema):
update_cbs = []
for casebook in Casebook.objects.select_related('old_casebook').all():
if casebook.old_casebook:
casebook.created_at = casebook.old_casebook.created_at
update_cbs.append(casebook)
Casebook.objects.bulk_update(update_cbs, ['created_at'])
class Migration(migrations.Migration):
dependencies = [
('main', '0051_auto_20200407_1714'),
]
operations = [
migrations.RunPython(copy_old_dates, migrations.RunPython.noop)
]
|
|
0cad5e1673069d0fb8f2abb4eb6b062e3461fb70
|
scipy/linalg/tests/test_build.py
|
scipy/linalg/tests/test_build.py
|
from subprocess import call, PIPE, Popen
import sys
import re
import numpy as np
from numpy.testing import TestCase, dec
from scipy.linalg import flapack
# XXX: this is copied from numpy trunk. Can be removed when we will depend on
# numpy 1.3
class FindDependenciesLdd:
def __init__(self):
self.cmd = ['ldd']
try:
st = call(self.cmd, stdout=PIPE, stderr=PIPE)
except OSError:
raise RuntimeError("command %s cannot be run" % self.cmd)
def get_dependencies(self, file):
p = Popen(self.cmd + [file], stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if not (p.returncode == 0):
raise RuntimeError("Failed to check dependencies for %s" % libfile)
return stdout
def grep_dependencies(self, file, deps):
stdout = self.get_dependencies(file)
rdeps = dict([(dep, re.compile(dep)) for dep in deps])
founds = []
for l in stdout.splitlines():
for k, v in rdeps.items():
if v.search(l):
founds.append(k)
return founds
class TestF77Mismatch(TestCase):
@dec.skipif(not(sys.platform[:5] == 'linux'),
"Skipping fortran compiler mismatch on non Linux platform")
def test_lapack(self):
f = FindDependenciesLdd()
deps = f.grep_dependencies(flapack.__file__,
['libg2c', 'libgfortran'])
self.failIf(len(deps) > 1,
"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
cause random crashes and wrong results. See numpy INSTALL.txt for more
information.""")
|
Add fortran ABI mismatch test for scipy.linalg.
|
Add fortran ABI mismatch test for scipy.linalg.
|
Python
|
bsd-3-clause
|
mikebenfield/scipy,ChanderG/scipy,chatcannon/scipy,juliantaylor/scipy,newemailjdm/scipy,sonnyhu/scipy,WarrenWeckesser/scipy,mhogg/scipy,arokem/scipy,newemailjdm/scipy,FRidh/scipy,Eric89GXL/scipy,mgaitan/scipy,pschella/scipy,larsmans/scipy,anntzer/scipy,pizzathief/scipy,chatcannon/scipy,mortonjt/scipy,vigna/scipy,sriki18/scipy,scipy/scipy,sriki18/scipy,haudren/scipy,jamestwebber/scipy,niknow/scipy,tylerjereddy/scipy,maciejkula/scipy,ogrisel/scipy,fernand/scipy,josephcslater/scipy,ales-erjavec/scipy,josephcslater/scipy,dominicelse/scipy,ChanderG/scipy,ilayn/scipy,jor-/scipy,ilayn/scipy,aman-iitj/scipy,FRidh/scipy,vberaudi/scipy,larsmans/scipy,mortada/scipy,lukauskas/scipy,lukauskas/scipy,haudren/scipy,aman-iitj/scipy,futurulus/scipy,raoulbq/scipy,vanpact/scipy,zaxliu/scipy,scipy/scipy,e-q/scipy,vberaudi/scipy,matthewalbani/scipy,haudren/scipy,petebachant/scipy,dominicelse/scipy,nmayorov/scipy,maciejkula/scipy,person142/scipy,njwilson23/scipy,maciejkula/scipy,josephcslater/scipy,lhilt/scipy,ogrisel/scipy,felipebetancur/scipy,gertingold/scipy,niknow/scipy,aeklant/scipy,woodscn/scipy,rgommers/scipy,felipebetancur/scipy,newemailjdm/scipy,nvoron23/scipy,fredrikw/scipy,mdhaber/scipy,FRidh/scipy,njwilson23/scipy,Newman101/scipy,trankmichael/scipy,pnedunuri/scipy,maciejkula/scipy,mhogg/scipy,Shaswat27/scipy,anielsen001/scipy,grlee77/scipy,sonnyhu/scipy,mikebenfield/scipy,Srisai85/scipy,pyramania/scipy,bkendzior/scipy,jsilter/scipy,aarchiba/scipy,piyush0609/scipy,zxsted/scipy,hainm/scipy,trankmichael/scipy,person142/scipy,WarrenWeckesser/scipy,andyfaff/scipy,Srisai85/scipy,sonnyhu/scipy,person142/scipy,cpaulik/scipy,njwilson23/scipy,maniteja123/scipy,befelix/scipy,WillieMaddox/scipy,matthewalbani/scipy,zaxliu/scipy,trankmichael/scipy,petebachant/scipy,jjhelmus/scipy,argriffing/scipy,grlee77/scipy,argriffing/scipy,minhlongdo/scipy,pyramania/scipy,fredrikw/scipy,jseabold/scipy,rmcgibbo/scipy,jonycgn/scipy,vhaasteren/scipy,tylerjereddy/scipy,mdhaber/scipy,dch312/scipy,andyfaff/scipy,gertingold/scipy,Newman101/scipy,WarrenWeckesser/scipy,rmcgibbo/scipy,Shaswat27/scipy,vhaasteren/scipy,anntzer/scipy,haudren/scipy,jjhelmus/scipy,perimosocordiae/scipy,fernand/scipy,ndchorley/scipy,WillieMaddox/scipy,lukauskas/scipy,newemailjdm/scipy,aman-iitj/scipy,bkendzior/scipy,endolith/scipy,futurulus/scipy,Kamp9/scipy,mdhaber/scipy,larsmans/scipy,pizzathief/scipy,mortonjt/scipy,jamestwebber/scipy,teoliphant/scipy,zerothi/scipy,matthewalbani/scipy,richardotis/scipy,jseabold/scipy,ales-erjavec/scipy,mortada/scipy,arokem/scipy,kalvdans/scipy,jonycgn/scipy,aeklant/scipy,juliantaylor/scipy,arokem/scipy,giorgiop/scipy,jonycgn/scipy,arokem/scipy,Kamp9/scipy,fernand/scipy,Dapid/scipy,gertingold/scipy,bkendzior/scipy,nvoron23/scipy,Dapid/scipy,jsilter/scipy,vigna/scipy,jseabold/scipy,vhaasteren/scipy,pbrod/scipy,bkendzior/scipy,juliantaylor/scipy,mikebenfield/scipy,vigna/scipy,raoulbq/scipy,njwilson23/scipy,arokem/scipy,gfyoung/scipy,lukauskas/scipy,befelix/scipy,chatcannon/scipy,scipy/scipy,hainm/scipy,mortada/scipy,vanpact/scipy,mingwpy/scipy,scipy/scipy,ndchorley/scipy,zaxliu/scipy,cpaulik/scipy,vanpact/scipy,ilayn/scipy,haudren/scipy,mgaitan/scipy,pbrod/scipy,pbrod/scipy,Gillu13/scipy,rmcgibbo/scipy,vberaudi/scipy,fernand/scipy,lhilt/scipy,mgaitan/scipy,josephcslater/scipy,surhudm/scipy,mortada/scipy,Eric89GXL/scipy,aman-iitj/scipy,fernand/scipy,behzadnouri/scipy,WarrenWeckesser/scipy,maciejkula/scipy,ogrisel/scipy,kalvdans/scipy,futurulus/scipy,richardotis/scipy,ilayn/scipy,Dapid/scipy,anielsen001/scipy,juliantaylor/scipy,witcxc/scipy,felipebetancur/scipy,Eric89GXL/scipy,FRidh/scipy,tylerjereddy/scipy,ales-erjavec/scipy,minhlongdo/scipy,newemailjdm/scipy,endolith/scipy,endolith/scipy,nvoron23/scipy,petebachant/scipy,kleskjr/scipy,larsmans/scipy,kalvdans/scipy,fredrikw/scipy,minhlongdo/scipy,efiring/scipy,larsmans/scipy,sriki18/scipy,Newman101/scipy,pnedunuri/scipy,zaxliu/scipy,pbrod/scipy,lhilt/scipy,Dapid/scipy,cpaulik/scipy,felipebetancur/scipy,argriffing/scipy,mgaitan/scipy,Eric89GXL/scipy,teoliphant/scipy,gertingold/scipy,efiring/scipy,mingwpy/scipy,aeklant/scipy,jseabold/scipy,mortada/scipy,surhudm/scipy,mortonjt/scipy,nonhermitian/scipy,ilayn/scipy,gef756/scipy,chatcannon/scipy,jonycgn/scipy,fredrikw/scipy,jsilter/scipy,aarchiba/scipy,jseabold/scipy,Newman101/scipy,apbard/scipy,vberaudi/scipy,richardotis/scipy,maniteja123/scipy,josephcslater/scipy,gdooper/scipy,rgommers/scipy,piyush0609/scipy,andim/scipy,surhudm/scipy,sargas/scipy,aarchiba/scipy,efiring/scipy,nmayorov/scipy,cpaulik/scipy,matthewalbani/scipy,Shaswat27/scipy,nmayorov/scipy,person142/scipy,mhogg/scipy,gef756/scipy,felipebetancur/scipy,raoulbq/scipy,mgaitan/scipy,Stefan-Endres/scipy,Stefan-Endres/scipy,behzadnouri/scipy,mortonjt/scipy,hainm/scipy,ChanderG/scipy,Gillu13/scipy,fredrikw/scipy,zerothi/scipy,Kamp9/scipy,vhaasteren/scipy,witcxc/scipy,woodscn/scipy,Stefan-Endres/scipy,teoliphant/scipy,vanpact/scipy,Kamp9/scipy,rgommers/scipy,sauliusl/scipy,endolith/scipy,e-q/scipy,sargas/scipy,petebachant/scipy,jakevdp/scipy,surhudm/scipy,rgommers/scipy,kleskjr/scipy,jonycgn/scipy,anntzer/scipy,nvoron23/scipy,aeklant/scipy,hainm/scipy,jonycgn/scipy,e-q/scipy,zxsted/scipy,behzadnouri/scipy,richardotis/scipy,sargas/scipy,lukauskas/scipy,teoliphant/scipy,woodscn/scipy,grlee77/scipy,sriki18/scipy,gef756/scipy,scipy/scipy,pnedunuri/scipy,gef756/scipy,aarchiba/scipy,gfyoung/scipy,zxsted/scipy,ortylp/scipy,ortylp/scipy,zaxliu/scipy,efiring/scipy,gdooper/scipy,matthew-brett/scipy,futurulus/scipy,minhlongdo/scipy,Gillu13/scipy,nonhermitian/scipy,vberaudi/scipy,raoulbq/scipy,maniteja123/scipy,sargas/scipy,fernand/scipy,andim/scipy,felipebetancur/scipy,perimosocordiae/scipy,matthew-brett/scipy,sonnyhu/scipy,rmcgibbo/scipy,pyramania/scipy,giorgiop/scipy,dominicelse/scipy,apbard/scipy,nonhermitian/scipy,niknow/scipy,ortylp/scipy,WillieMaddox/scipy,pizzathief/scipy,mtrbean/scipy,witcxc/scipy,argriffing/scipy,apbard/scipy,matthew-brett/scipy,sauliusl/scipy,mhogg/scipy,Stefan-Endres/scipy,mdhaber/scipy,WarrenWeckesser/scipy,behzadnouri/scipy,endolith/scipy,grlee77/scipy,raoulbq/scipy,niknow/scipy,befelix/scipy,mingwpy/scipy,zxsted/scipy,FRidh/scipy,mingwpy/scipy,anielsen001/scipy,dominicelse/scipy,e-q/scipy,jor-/scipy,ndchorley/scipy,andyfaff/scipy,Shaswat27/scipy,sonnyhu/scipy,lhilt/scipy,sriki18/scipy,pizzathief/scipy,andim/scipy,WarrenWeckesser/scipy,pyramania/scipy,anielsen001/scipy,mhogg/scipy,sauliusl/scipy,sauliusl/scipy,sargas/scipy,anielsen001/scipy,kalvdans/scipy,mingwpy/scipy,aarchiba/scipy,Srisai85/scipy,minhlongdo/scipy,anntzer/scipy,rmcgibbo/scipy,chatcannon/scipy,rmcgibbo/scipy,e-q/scipy,ortylp/scipy,mdhaber/scipy,mgaitan/scipy,zxsted/scipy,ortylp/scipy,futurulus/scipy,niknow/scipy,argriffing/scipy,andim/scipy,hainm/scipy,person142/scipy,giorgiop/scipy,mtrbean/scipy,surhudm/scipy,Gillu13/scipy,pnedunuri/scipy,jakevdp/scipy,mtrbean/scipy,gef756/scipy,tylerjereddy/scipy,FRidh/scipy,juliantaylor/scipy,mtrbean/scipy,mortonjt/scipy,vhaasteren/scipy,trankmichael/scipy,nvoron23/scipy,Eric89GXL/scipy,jseabold/scipy,haudren/scipy,zerothi/scipy,Gillu13/scipy,pschella/scipy,vigna/scipy,mdhaber/scipy,lhilt/scipy,ales-erjavec/scipy,ales-erjavec/scipy,mortada/scipy,nonhermitian/scipy,jamestwebber/scipy,andyfaff/scipy,matthew-brett/scipy,cpaulik/scipy,andyfaff/scipy,vanpact/scipy,argriffing/scipy,zxsted/scipy,gfyoung/scipy,dch312/scipy,Newman101/scipy,aeklant/scipy,sriki18/scipy,nvoron23/scipy,hainm/scipy,anntzer/scipy,perimosocordiae/scipy,behzadnouri/scipy,sonnyhu/scipy,gdooper/scipy,Stefan-Endres/scipy,woodscn/scipy,apbard/scipy,jjhelmus/scipy,futurulus/scipy,WillieMaddox/scipy,njwilson23/scipy,befelix/scipy,efiring/scipy,kalvdans/scipy,kleskjr/scipy,dominicelse/scipy,befelix/scipy,Gillu13/scipy,surhudm/scipy,ortylp/scipy,teoliphant/scipy,sauliusl/scipy,Dapid/scipy,jakevdp/scipy,mtrbean/scipy,kleskjr/scipy,Srisai85/scipy,piyush0609/scipy,jamestwebber/scipy,Srisai85/scipy,Stefan-Endres/scipy,andyfaff/scipy,ales-erjavec/scipy,ChanderG/scipy,niknow/scipy,bkendzior/scipy,pbrod/scipy,richardotis/scipy,ndchorley/scipy,scipy/scipy,ChanderG/scipy,pnedunuri/scipy,jor-/scipy,jsilter/scipy,andim/scipy,ogrisel/scipy,jor-/scipy,giorgiop/scipy,maniteja123/scipy,mikebenfield/scipy,dch312/scipy,dch312/scipy,Srisai85/scipy,lukauskas/scipy,matthewalbani/scipy,zerothi/scipy,mikebenfield/scipy,maniteja123/scipy,gdooper/scipy,ndchorley/scipy,kleskjr/scipy,vhaasteren/scipy,nmayorov/scipy,mortonjt/scipy,richardotis/scipy,pschella/scipy,larsmans/scipy,vberaudi/scipy,jsilter/scipy,minhlongdo/scipy,perimosocordiae/scipy,pschella/scipy,nmayorov/scipy,Eric89GXL/scipy,Shaswat27/scipy,trankmichael/scipy,vanpact/scipy,nonhermitian/scipy,jjhelmus/scipy,andim/scipy,anielsen001/scipy,piyush0609/scipy,behzadnouri/scipy,Dapid/scipy,zerothi/scipy,ogrisel/scipy,jakevdp/scipy,apbard/scipy,tylerjereddy/scipy,mtrbean/scipy,woodscn/scipy,fredrikw/scipy,petebachant/scipy,woodscn/scipy,jor-/scipy,gertingold/scipy,ilayn/scipy,matthew-brett/scipy,ChanderG/scipy,giorgiop/scipy,perimosocordiae/scipy,jjhelmus/scipy,kleskjr/scipy,witcxc/scipy,zerothi/scipy,gfyoung/scipy,Newman101/scipy,sauliusl/scipy,endolith/scipy,raoulbq/scipy,perimosocordiae/scipy,ndchorley/scipy,mhogg/scipy,anntzer/scipy,piyush0609/scipy,aman-iitj/scipy,mingwpy/scipy,pizzathief/scipy,gef756/scipy,jakevdp/scipy,chatcannon/scipy,pyramania/scipy,dch312/scipy,njwilson23/scipy,witcxc/scipy,Shaswat27/scipy,cpaulik/scipy,gdooper/scipy,trankmichael/scipy,pbrod/scipy,pnedunuri/scipy,pschella/scipy,vigna/scipy,rgommers/scipy,WillieMaddox/scipy,piyush0609/scipy,gfyoung/scipy,WillieMaddox/scipy,giorgiop/scipy,aman-iitj/scipy,maniteja123/scipy,Kamp9/scipy,grlee77/scipy,jamestwebber/scipy,petebachant/scipy,zaxliu/scipy,efiring/scipy,Kamp9/scipy,newemailjdm/scipy
|
Add fortran ABI mismatch test for scipy.linalg.
|
from subprocess import call, PIPE, Popen
import sys
import re
import numpy as np
from numpy.testing import TestCase, dec
from scipy.linalg import flapack
# XXX: this is copied from numpy trunk. Can be removed when we will depend on
# numpy 1.3
class FindDependenciesLdd:
def __init__(self):
self.cmd = ['ldd']
try:
st = call(self.cmd, stdout=PIPE, stderr=PIPE)
except OSError:
raise RuntimeError("command %s cannot be run" % self.cmd)
def get_dependencies(self, file):
p = Popen(self.cmd + [file], stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if not (p.returncode == 0):
raise RuntimeError("Failed to check dependencies for %s" % libfile)
return stdout
def grep_dependencies(self, file, deps):
stdout = self.get_dependencies(file)
rdeps = dict([(dep, re.compile(dep)) for dep in deps])
founds = []
for l in stdout.splitlines():
for k, v in rdeps.items():
if v.search(l):
founds.append(k)
return founds
class TestF77Mismatch(TestCase):
@dec.skipif(not(sys.platform[:5] == 'linux'),
"Skipping fortran compiler mismatch on non Linux platform")
def test_lapack(self):
f = FindDependenciesLdd()
deps = f.grep_dependencies(flapack.__file__,
['libg2c', 'libgfortran'])
self.failIf(len(deps) > 1,
"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
cause random crashes and wrong results. See numpy INSTALL.txt for more
information.""")
|
<commit_before><commit_msg>Add fortran ABI mismatch test for scipy.linalg.<commit_after>
|
from subprocess import call, PIPE, Popen
import sys
import re
import numpy as np
from numpy.testing import TestCase, dec
from scipy.linalg import flapack
# XXX: this is copied from numpy trunk. Can be removed when we will depend on
# numpy 1.3
class FindDependenciesLdd:
def __init__(self):
self.cmd = ['ldd']
try:
st = call(self.cmd, stdout=PIPE, stderr=PIPE)
except OSError:
raise RuntimeError("command %s cannot be run" % self.cmd)
def get_dependencies(self, file):
p = Popen(self.cmd + [file], stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if not (p.returncode == 0):
raise RuntimeError("Failed to check dependencies for %s" % libfile)
return stdout
def grep_dependencies(self, file, deps):
stdout = self.get_dependencies(file)
rdeps = dict([(dep, re.compile(dep)) for dep in deps])
founds = []
for l in stdout.splitlines():
for k, v in rdeps.items():
if v.search(l):
founds.append(k)
return founds
class TestF77Mismatch(TestCase):
@dec.skipif(not(sys.platform[:5] == 'linux'),
"Skipping fortran compiler mismatch on non Linux platform")
def test_lapack(self):
f = FindDependenciesLdd()
deps = f.grep_dependencies(flapack.__file__,
['libg2c', 'libgfortran'])
self.failIf(len(deps) > 1,
"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
cause random crashes and wrong results. See numpy INSTALL.txt for more
information.""")
|
Add fortran ABI mismatch test for scipy.linalg.from subprocess import call, PIPE, Popen
import sys
import re
import numpy as np
from numpy.testing import TestCase, dec
from scipy.linalg import flapack
# XXX: this is copied from numpy trunk. Can be removed when we will depend on
# numpy 1.3
class FindDependenciesLdd:
def __init__(self):
self.cmd = ['ldd']
try:
st = call(self.cmd, stdout=PIPE, stderr=PIPE)
except OSError:
raise RuntimeError("command %s cannot be run" % self.cmd)
def get_dependencies(self, file):
p = Popen(self.cmd + [file], stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if not (p.returncode == 0):
raise RuntimeError("Failed to check dependencies for %s" % libfile)
return stdout
def grep_dependencies(self, file, deps):
stdout = self.get_dependencies(file)
rdeps = dict([(dep, re.compile(dep)) for dep in deps])
founds = []
for l in stdout.splitlines():
for k, v in rdeps.items():
if v.search(l):
founds.append(k)
return founds
class TestF77Mismatch(TestCase):
@dec.skipif(not(sys.platform[:5] == 'linux'),
"Skipping fortran compiler mismatch on non Linux platform")
def test_lapack(self):
f = FindDependenciesLdd()
deps = f.grep_dependencies(flapack.__file__,
['libg2c', 'libgfortran'])
self.failIf(len(deps) > 1,
"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
cause random crashes and wrong results. See numpy INSTALL.txt for more
information.""")
|
<commit_before><commit_msg>Add fortran ABI mismatch test for scipy.linalg.<commit_after>from subprocess import call, PIPE, Popen
import sys
import re
import numpy as np
from numpy.testing import TestCase, dec
from scipy.linalg import flapack
# XXX: this is copied from numpy trunk. Can be removed when we will depend on
# numpy 1.3
class FindDependenciesLdd:
def __init__(self):
self.cmd = ['ldd']
try:
st = call(self.cmd, stdout=PIPE, stderr=PIPE)
except OSError:
raise RuntimeError("command %s cannot be run" % self.cmd)
def get_dependencies(self, file):
p = Popen(self.cmd + [file], stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if not (p.returncode == 0):
raise RuntimeError("Failed to check dependencies for %s" % libfile)
return stdout
def grep_dependencies(self, file, deps):
stdout = self.get_dependencies(file)
rdeps = dict([(dep, re.compile(dep)) for dep in deps])
founds = []
for l in stdout.splitlines():
for k, v in rdeps.items():
if v.search(l):
founds.append(k)
return founds
class TestF77Mismatch(TestCase):
@dec.skipif(not(sys.platform[:5] == 'linux'),
"Skipping fortran compiler mismatch on non Linux platform")
def test_lapack(self):
f = FindDependenciesLdd()
deps = f.grep_dependencies(flapack.__file__,
['libg2c', 'libgfortran'])
self.failIf(len(deps) > 1,
"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
cause random crashes and wrong results. See numpy INSTALL.txt for more
information.""")
|
|
e61dbf66d6f73e4999a5ff9f732a8df0637fdbf2
|
server/models.py
|
server/models.py
|
from flask.ext.sqlalchemy import SQLAlchemy
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return '<User %r>' % self.username
|
Add an example of SQLalchemy model
|
Add an example of SQLalchemy model
|
Python
|
bsd-3-clause
|
raspberrywhite/raspberrywhite,raspberrywhite/raspberrywhite,raspberrywhite/raspberrywhite,raspberrywhite/raspberrywhite
|
Add an example of SQLalchemy model
|
from flask.ext.sqlalchemy import SQLAlchemy
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return '<User %r>' % self.username
|
<commit_before><commit_msg>Add an example of SQLalchemy model<commit_after>
|
from flask.ext.sqlalchemy import SQLAlchemy
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return '<User %r>' % self.username
|
Add an example of SQLalchemy modelfrom flask.ext.sqlalchemy import SQLAlchemy
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return '<User %r>' % self.username
|
<commit_before><commit_msg>Add an example of SQLalchemy model<commit_after>from flask.ext.sqlalchemy import SQLAlchemy
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return '<User %r>' % self.username
|
|
b01c602f156b5a72db1ea4f27989aa5b1afdada8
|
src/behavior/features/terrain.py
|
src/behavior/features/terrain.py
|
from lettuce import *
import requests
TARGET_URL='http://localhost:8080'
tenantList = [ "511", "615", "634", "515" ]
@before.each_scenario
def cleanContext(feature):
for tenant in tenantList:
url = TARGET_URL + '/pap/v1/' + tenant
r = requests.delete(url)
|
ADD Cleaning before each test
|
ADD Cleaning before each test
|
Python
|
apache-2.0
|
telefonicaid/fiware-keypass,telefonicaid/fiware-keypass,telefonicaid/fiware-keypass
|
ADD Cleaning before each test
|
from lettuce import *
import requests
TARGET_URL='http://localhost:8080'
tenantList = [ "511", "615", "634", "515" ]
@before.each_scenario
def cleanContext(feature):
for tenant in tenantList:
url = TARGET_URL + '/pap/v1/' + tenant
r = requests.delete(url)
|
<commit_before><commit_msg>ADD Cleaning before each test<commit_after>
|
from lettuce import *
import requests
TARGET_URL='http://localhost:8080'
tenantList = [ "511", "615", "634", "515" ]
@before.each_scenario
def cleanContext(feature):
for tenant in tenantList:
url = TARGET_URL + '/pap/v1/' + tenant
r = requests.delete(url)
|
ADD Cleaning before each testfrom lettuce import *
import requests
TARGET_URL='http://localhost:8080'
tenantList = [ "511", "615", "634", "515" ]
@before.each_scenario
def cleanContext(feature):
for tenant in tenantList:
url = TARGET_URL + '/pap/v1/' + tenant
r = requests.delete(url)
|
<commit_before><commit_msg>ADD Cleaning before each test<commit_after>from lettuce import *
import requests
TARGET_URL='http://localhost:8080'
tenantList = [ "511", "615", "634", "515" ]
@before.each_scenario
def cleanContext(feature):
for tenant in tenantList:
url = TARGET_URL + '/pap/v1/' + tenant
r = requests.delete(url)
|
|
1ece8c8640214d69a224f94f1b1ac93ec53d7699
|
chunsabot/modules/images.py
|
chunsabot/modules/images.py
|
from chunsabot.botlogic import brain
@brain.route("@image")
def add_image_description(msg, extras):
attachment = extras['attachment']
if not attachment:
return None
return "asdf"
|
Add image processing system (dummy)
|
Add image processing system (dummy)
|
Python
|
mit
|
susemeee/Chunsabot-framework
|
Add image processing system (dummy)
|
from chunsabot.botlogic import brain
@brain.route("@image")
def add_image_description(msg, extras):
attachment = extras['attachment']
if not attachment:
return None
return "asdf"
|
<commit_before><commit_msg>Add image processing system (dummy)<commit_after>
|
from chunsabot.botlogic import brain
@brain.route("@image")
def add_image_description(msg, extras):
attachment = extras['attachment']
if not attachment:
return None
return "asdf"
|
Add image processing system (dummy)from chunsabot.botlogic import brain
@brain.route("@image")
def add_image_description(msg, extras):
attachment = extras['attachment']
if not attachment:
return None
return "asdf"
|
<commit_before><commit_msg>Add image processing system (dummy)<commit_after>from chunsabot.botlogic import brain
@brain.route("@image")
def add_image_description(msg, extras):
attachment = extras['attachment']
if not attachment:
return None
return "asdf"
|
|
6274ee8d776c829998dfaa56cb419d1263242a48
|
Algorithms/Sort_Algorithms/Topological_Sort/TopologicalSort.py
|
Algorithms/Sort_Algorithms/Topological_Sort/TopologicalSort.py
|
'''
Topological sort.
Taken from :
http://stackoverflow.com/questions/15038876/topological-sort-python
'''
from collections import defaultdict
from itertools import takewhile, count
def sort_topologically(graph):
levels_by_name = {}
names_by_level = defaultdict(set)
def walk_depth_first(name):
if name in levels_by_name:
return levels_by_name[name]
children = graph.get(name, None)
level = 0 if not children else (1 + max(walk_depth_first(lname) for lname in children))
levels_by_name[name] = level
names_by_level[level].add(name)
return level
for name in graph:
walk_depth_first(name)
return list(takewhile(lambda x: x is not None, (names_by_level.get(i, None) for i in count())))
graph = {
1: [2, 3],
2: [4, 5, 6],
3: [4,6],
4: [5,6],
5: [6],
6: []
}
print(sort_topologically(graph))
|
Add topological sorting in Python
|
Add topological sorting in Python
Issue #57
|
Python
|
mit
|
salman-bhai/DS-Algo-Handbook,salman-bhai/DS-Algo-Handbook,salman-bhai/DS-Algo-Handbook,salman-bhai/DS-Algo-Handbook
|
Add topological sorting in Python
Issue #57
|
'''
Topological sort.
Taken from :
http://stackoverflow.com/questions/15038876/topological-sort-python
'''
from collections import defaultdict
from itertools import takewhile, count
def sort_topologically(graph):
levels_by_name = {}
names_by_level = defaultdict(set)
def walk_depth_first(name):
if name in levels_by_name:
return levels_by_name[name]
children = graph.get(name, None)
level = 0 if not children else (1 + max(walk_depth_first(lname) for lname in children))
levels_by_name[name] = level
names_by_level[level].add(name)
return level
for name in graph:
walk_depth_first(name)
return list(takewhile(lambda x: x is not None, (names_by_level.get(i, None) for i in count())))
graph = {
1: [2, 3],
2: [4, 5, 6],
3: [4,6],
4: [5,6],
5: [6],
6: []
}
print(sort_topologically(graph))
|
<commit_before><commit_msg>Add topological sorting in Python
Issue #57<commit_after>
|
'''
Topological sort.
Taken from :
http://stackoverflow.com/questions/15038876/topological-sort-python
'''
from collections import defaultdict
from itertools import takewhile, count
def sort_topologically(graph):
levels_by_name = {}
names_by_level = defaultdict(set)
def walk_depth_first(name):
if name in levels_by_name:
return levels_by_name[name]
children = graph.get(name, None)
level = 0 if not children else (1 + max(walk_depth_first(lname) for lname in children))
levels_by_name[name] = level
names_by_level[level].add(name)
return level
for name in graph:
walk_depth_first(name)
return list(takewhile(lambda x: x is not None, (names_by_level.get(i, None) for i in count())))
graph = {
1: [2, 3],
2: [4, 5, 6],
3: [4,6],
4: [5,6],
5: [6],
6: []
}
print(sort_topologically(graph))
|
Add topological sorting in Python
Issue #57'''
Topological sort.
Taken from :
http://stackoverflow.com/questions/15038876/topological-sort-python
'''
from collections import defaultdict
from itertools import takewhile, count
def sort_topologically(graph):
levels_by_name = {}
names_by_level = defaultdict(set)
def walk_depth_first(name):
if name in levels_by_name:
return levels_by_name[name]
children = graph.get(name, None)
level = 0 if not children else (1 + max(walk_depth_first(lname) for lname in children))
levels_by_name[name] = level
names_by_level[level].add(name)
return level
for name in graph:
walk_depth_first(name)
return list(takewhile(lambda x: x is not None, (names_by_level.get(i, None) for i in count())))
graph = {
1: [2, 3],
2: [4, 5, 6],
3: [4,6],
4: [5,6],
5: [6],
6: []
}
print(sort_topologically(graph))
|
<commit_before><commit_msg>Add topological sorting in Python
Issue #57<commit_after>'''
Topological sort.
Taken from :
http://stackoverflow.com/questions/15038876/topological-sort-python
'''
from collections import defaultdict
from itertools import takewhile, count
def sort_topologically(graph):
levels_by_name = {}
names_by_level = defaultdict(set)
def walk_depth_first(name):
if name in levels_by_name:
return levels_by_name[name]
children = graph.get(name, None)
level = 0 if not children else (1 + max(walk_depth_first(lname) for lname in children))
levels_by_name[name] = level
names_by_level[level].add(name)
return level
for name in graph:
walk_depth_first(name)
return list(takewhile(lambda x: x is not None, (names_by_level.get(i, None) for i in count())))
graph = {
1: [2, 3],
2: [4, 5, 6],
3: [4,6],
4: [5,6],
5: [6],
6: []
}
print(sort_topologically(graph))
|
|
3d1cef9e56d7fac8a1b89861b7443e4ca660e4a8
|
nova/ipv6/api.py
|
nova/ipv6/api.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
|
Reduce indentation to avoid PEP8 failures
|
Reduce indentation to avoid PEP8 failures
|
Python
|
apache-2.0
|
vmturbo/nova,fnordahl/nova,cloudbau/nova,CEG-FYP-OpenStack/scheduler,vladikr/nova_drafts,yrobla/nova,bigswitch/nova,eharney/nova,KarimAllah/nova,Stavitsky/nova,TwinkleChawla/nova,zzicewind/nova,klmitch/nova,rickerc/nova_audit,belmiromoreira/nova,cloudbase/nova-virtualbox,luogangyi/bcec-nova,Yusuke1987/openstack_template,eneabio/nova,SUSE-Cloud/nova,nikesh-mahalka/nova,fajoy/nova,petrutlucian94/nova_dev,gooddata/openstack-nova,NewpTone/stacklab-nova,Juniper/nova,NoBodyCam/TftpPxeBootBareMetal,sebrandon1/nova,shail2810/nova,usc-isi/extra-specs,orbitfp7/nova,usc-isi/nova,yatinkumbhare/openstack-nova,spring-week-topos/nova-week,yrobla/nova,usc-isi/nova,joker946/nova,usc-isi/extra-specs,apporc/nova,tanglei528/nova,klmitch/nova,cernops/nova,varunarya10/nova_test_latest,salv-orlando/MyRepo,blueboxgroup/nova,BeyondTheClouds/nova,watonyweng/nova,akash1808/nova_test_latest,angdraug/nova,dims/nova,sebrandon1/nova,sridevikoushik31/nova,bgxavier/nova,superstack/nova,ntt-sic/nova,gspilio/nova,NewpTone/stacklab-nova,viggates/nova,adelina-t/nova,maheshp/novatest,varunarya10/nova_test_latest,CiscoSystems/nova,orbitfp7/nova,fajoy/nova,usc-isi/nova,silenceli/nova,psiwczak/openstack,akash1808/nova,mgagne/nova,fnordahl/nova,luogangyi/bcec-nova,NoBodyCam/TftpPxeBootBareMetal,redhat-openstack/nova,NeCTAR-RC/nova,cloudbase/nova,virtualopensystems/nova,mikalstill/nova,cloudbase/nova,CEG-FYP-OpenStack/scheduler,scripnichenko/nova,superstack/nova,DirectXMan12/nova-hacking,edulramirez/nova,houshengbo/nova_vmware_compute_driver,Yuriy-Leonov/nova,Stavitsky/nova,usc-isi/extra-specs,double12gzh/nova,eneabio/nova,plumgrid/plumgrid-nova,imsplitbit/nova,mmnelemane/nova,zhimin711/nova,eonpatapon/nova,ted-gould/nova,aristanetworks/arista-ovs-nova,akash1808/nova_test_latest,aristanetworks/arista-ovs-nova,sileht/deb-openstack-nova,yatinkumbhare/openstack-nova,TieWei/nova,raildo/nova,shootstar/novatest,KarimAllah/nova,Metaswitch/calico-nova,maheshp/novatest,jianghuaw/nova,josephsuh/extra-specs,adelina-t/nova,Juniper/nova,yosshy/nova,whitepages/nova,affo/nova,yrobla/nova,silenceli/nova,bigswitch/nova,whitepages/nova,tudorvio/nova,openstack/nova,phenoxim/nova,CloudServer/nova,joker946/nova,mikalstill/nova,maoy/zknova,bgxavier/nova,citrix-openstack-build/nova,aristanetworks/arista-ovs-nova,dims/nova,gooddata/openstack-nova,devendermishrajio/nova_test_latest,thomasem/nova,NewpTone/stacklab-nova,qwefi/nova,dstroppa/openstack-smartos-nova-grizzly,MountainWei/nova,eayunstack/nova,maoy/zknova,zzicewind/nova,rajalokan/nova,CCI-MOC/nova,mahak/nova,Yusuke1987/openstack_template,saleemjaveds/https-github.com-openstack-nova,kimjaejoong/nova,cloudbase/nova-virtualbox,zhimin711/nova,Juniper/nova,Tehsmash/nova,Francis-Liu/animated-broccoli,phenoxim/nova,JioCloud/nova_test_latest,rahulunair/nova,citrix-openstack-build/nova,badock/nova,petrutlucian94/nova,Triv90/Nova,JianyuWang/nova,sridevikoushik31/nova,apporc/nova,dawnpower/nova,psiwczak/openstack,JioCloud/nova,rajalokan/nova,hanlind/nova,DirectXMan12/nova-hacking,vladikr/nova_drafts,tangfeixiong/nova,eonpatapon/nova,mahak/nova,sileht/deb-openstack-nova,JioCloud/nova_test_latest,leilihh/nova,alexandrucoman/vbox-nova-driver,savi-dev/nova,barnsnake351/nova,jeffrey4l/nova,gspilio/nova,Triv90/Nova,viggates/nova,j-carpentier/nova,klmitch/nova,mikalstill/nova,thomasem/nova,cyx1231st/nova,scripnichenko/nova,josephsuh/extra-specs,sileht/deb-openstack-nova,MountainWei/nova,CCI-MOC/nova,badock/nova,imsplitbit/nova,Metaswitch/calico-nova,leilihh/novaha,russellb/nova,klmitch/nova,shootstar/novatest,JioCloud/nova,rajalokan/nova,ted-gould/nova,BeyondTheClouds/nova,kimjaejoong/nova,rajalokan/nova,rrader/nova-docker-plugin,psiwczak/openstack,CiscoSystems/nova,bclau/nova,watonyweng/nova,maoy/zknova,alvarolopez/nova,devoid/nova,cernops/nova,qwefi/nova,josephsuh/extra-specs,rahulunair/nova,tanglei528/nova,ruslanloman/nova,eneabio/nova,nikesh-mahalka/nova,SUSE-Cloud/nova,vmturbo/nova,jeffrey4l/nova,takeshineshiro/nova,salv-orlando/MyRepo,paulmathews/nova,tianweizhang/nova,dawnpower/nova,devoid/nova,houshengbo/nova_vmware_compute_driver,NoBodyCam/TftpPxeBootBareMetal,jianghuaw/nova,tealover/nova,superstack/nova,rrader/nova-docker-plugin,KarimAllah/nova,felixma/nova,russellb/nova,NeCTAR-RC/nova,berrange/nova,maelnor/nova,mmnelemane/nova,j-carpentier/nova,eharney/nova,sacharya/nova,alvarolopez/nova,LoHChina/nova,spring-week-topos/nova-week,tealover/nova,virtualopensystems/nova,berrange/nova,edulramirez/nova,russellb/nova,plumgrid/plumgrid-nova,raildo/nova,petrutlucian94/nova_dev,projectcalico/calico-nova,ewindisch/nova,vmturbo/nova,DirectXMan12/nova-hacking,felixma/nova,gspilio/nova,double12gzh/nova,devendermishrajio/nova,jianghuaw/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,sebrandon1/nova,akash1808/nova,saleemjaveds/https-github.com-openstack-nova,cloudbau/nova,blueboxgroup/nova,maelnor/nova,rickerc/nova_audit,CloudServer/nova,tangfeixiong/nova,eayunstack/nova,mgagne/nova,Yuriy-Leonov/nova,shahar-stratoscale/nova,hanlind/nova,TieWei/nova,rahulunair/nova,dstroppa/openstack-smartos-nova-grizzly,alaski/nova,mandeepdhami/nova,Juniper/nova,angdraug/nova,jianghuaw/nova,LoHChina/nova,sridevikoushik31/nova,paulmathews/nova,sridevikoushik31/openstack,iuliat/nova,cloudbase/nova,paulmathews/nova,belmiromoreira/nova,JianyuWang/nova,OpenAcademy-OpenStack/nova-scheduler,Francis-Liu/animated-broccoli,bclau/nova,fajoy/nova,openstack/nova,ntt-sic/nova,ewindisch/nova,leilihh/nova,petrutlucian94/nova,sacharya/nova,sridevikoushik31/nova,openstack/nova,redhat-openstack/nova,OpenAcademy-OpenStack/nova-scheduler,devendermishrajio/nova,savi-dev/nova,TwinkleChawla/nova,sridevikoushik31/openstack,sridevikoushik31/openstack,barnsnake351/nova,alaski/nova,BeyondTheClouds/nova,cernops/nova,isyippee/nova,zaina/nova,noironetworks/nova,Triv90/Nova,zaina/nova,tianweizhang/nova,shail2810/nova,mandeepdhami/nova,yosshy/nova,noironetworks/nova,vmturbo/nova,leilihh/novaha,projectcalico/calico-nova,maheshp/novatest,houshengbo/nova_vmware_compute_driver,hanlind/nova,iuliat/nova,devendermishrajio/nova_test_latest,alexandrucoman/vbox-nova-driver,isyippee/nova,takeshineshiro/nova,dstroppa/openstack-smartos-nova-grizzly,tudorvio/nova,salv-orlando/MyRepo,mahak/nova,gooddata/openstack-nova,Tehsmash/nova,ruslanloman/nova,affo/nova,savi-dev/nova,shahar-stratoscale/nova,cyx1231st/nova,gooddata/openstack-nova,Brocade-OpenSource/OpenStack-DNRM-Nova
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
Reduce indentation to avoid PEP8 failures
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
|
<commit_before># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
<commit_msg>Reduce indentation to avoid PEP8 failures<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
Reduce indentation to avoid PEP8 failures# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
|
<commit_before># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
<commit_msg>Reduce indentation to avoid PEP8 failures<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ipv6_backend',
'rfc2462',
'Backend to use for IPv6 generation')
def reset_backend():
global IMPL
IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
rfc2462='nova.ipv6.rfc2462',
account_identifier='nova.ipv6.account_identifier')
def to_global(prefix, mac, project_id):
return IMPL.to_global(prefix, mac, project_id)
def to_mac(ipv6_address):
return IMPL.to_mac(ipv6_address)
reset_backend()
|
72678c437f1b1110fb8a14c78dcdd4c3c8b64157
|
rtm.py
|
rtm.py
|
import time
from slackclient import SlackClient
token = 'kekmao'
sc = SlackClient(token)
team_join_event = 'team_join'
def send_welcome_message(user):
user_id = user['id']
response = sc.api_call('im.open', user=user_id)
try:
dm_channel_id = response['channel']['id']
except (KeyError, ValueError):
print('Shite happened')
return
sc.rtm_send_message(dm_channel_id, 'welcome to devup')
def main():
if sc.rtm_connect():
while True:
for event in sc.rtm_read():
if event.get('type') == team_join_event and (
event['user']['is_bot'] is False):
send_welcome_message(user=event['user'])
time.sleep(1)
else:
print ("Connection Failed, invalid token?")
if __name__ == '__main__':
main()
|
Add initial version of bot script
|
Add initial version of bot script
|
Python
|
mit
|
devupin/allie
|
Add initial version of bot script
|
import time
from slackclient import SlackClient
token = 'kekmao'
sc = SlackClient(token)
team_join_event = 'team_join'
def send_welcome_message(user):
user_id = user['id']
response = sc.api_call('im.open', user=user_id)
try:
dm_channel_id = response['channel']['id']
except (KeyError, ValueError):
print('Shite happened')
return
sc.rtm_send_message(dm_channel_id, 'welcome to devup')
def main():
if sc.rtm_connect():
while True:
for event in sc.rtm_read():
if event.get('type') == team_join_event and (
event['user']['is_bot'] is False):
send_welcome_message(user=event['user'])
time.sleep(1)
else:
print ("Connection Failed, invalid token?")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add initial version of bot script<commit_after>
|
import time
from slackclient import SlackClient
token = 'kekmao'
sc = SlackClient(token)
team_join_event = 'team_join'
def send_welcome_message(user):
user_id = user['id']
response = sc.api_call('im.open', user=user_id)
try:
dm_channel_id = response['channel']['id']
except (KeyError, ValueError):
print('Shite happened')
return
sc.rtm_send_message(dm_channel_id, 'welcome to devup')
def main():
if sc.rtm_connect():
while True:
for event in sc.rtm_read():
if event.get('type') == team_join_event and (
event['user']['is_bot'] is False):
send_welcome_message(user=event['user'])
time.sleep(1)
else:
print ("Connection Failed, invalid token?")
if __name__ == '__main__':
main()
|
Add initial version of bot scriptimport time
from slackclient import SlackClient
token = 'kekmao'
sc = SlackClient(token)
team_join_event = 'team_join'
def send_welcome_message(user):
user_id = user['id']
response = sc.api_call('im.open', user=user_id)
try:
dm_channel_id = response['channel']['id']
except (KeyError, ValueError):
print('Shite happened')
return
sc.rtm_send_message(dm_channel_id, 'welcome to devup')
def main():
if sc.rtm_connect():
while True:
for event in sc.rtm_read():
if event.get('type') == team_join_event and (
event['user']['is_bot'] is False):
send_welcome_message(user=event['user'])
time.sleep(1)
else:
print ("Connection Failed, invalid token?")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add initial version of bot script<commit_after>import time
from slackclient import SlackClient
token = 'kekmao'
sc = SlackClient(token)
team_join_event = 'team_join'
def send_welcome_message(user):
user_id = user['id']
response = sc.api_call('im.open', user=user_id)
try:
dm_channel_id = response['channel']['id']
except (KeyError, ValueError):
print('Shite happened')
return
sc.rtm_send_message(dm_channel_id, 'welcome to devup')
def main():
if sc.rtm_connect():
while True:
for event in sc.rtm_read():
if event.get('type') == team_join_event and (
event['user']['is_bot'] is False):
send_welcome_message(user=event['user'])
time.sleep(1)
else:
print ("Connection Failed, invalid token?")
if __name__ == '__main__':
main()
|
|
41631175c7aae124f7504f068d9c2f8cf1c9e617
|
plugins/configuration/configurationtype/configuration_error.py
|
plugins/configuration/configurationtype/configuration_error.py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Defines a class of exceptions used to denote a false state of being for
configuration.
"""
class ConfigurationError(Exception):
"""
This exception denotes that something went wrong in the configuration.
It is mostly a marker class, but also provides the type of configuration in
which something went wrong.
"""
def __init__(self, message, configuration_type):
"""
Creates a new ConfigurationError.
:param message: The message describing the error that occurred.
:param configuration_type: The configuration type with which the error
occurred.
"""
#Prepend the configuration type before the error message.
super(ConfigurationError, self).__init__("{configuration_type}: {message}".format(configuration_type=configuration_type, message=message))
self.configuration_type = configuration_type #Also store it here for debugging purposes.
|
Add exception to describe errors in configuration processing
|
Add exception to describe errors in configuration processing
Mostly just a marker, but with some extra debug information.
|
Python
|
cc0-1.0
|
Ghostkeeper/Luna
|
Add exception to describe errors in configuration processing
Mostly just a marker, but with some extra debug information.
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Defines a class of exceptions used to denote a false state of being for
configuration.
"""
class ConfigurationError(Exception):
"""
This exception denotes that something went wrong in the configuration.
It is mostly a marker class, but also provides the type of configuration in
which something went wrong.
"""
def __init__(self, message, configuration_type):
"""
Creates a new ConfigurationError.
:param message: The message describing the error that occurred.
:param configuration_type: The configuration type with which the error
occurred.
"""
#Prepend the configuration type before the error message.
super(ConfigurationError, self).__init__("{configuration_type}: {message}".format(configuration_type=configuration_type, message=message))
self.configuration_type = configuration_type #Also store it here for debugging purposes.
|
<commit_before><commit_msg>Add exception to describe errors in configuration processing
Mostly just a marker, but with some extra debug information.<commit_after>
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Defines a class of exceptions used to denote a false state of being for
configuration.
"""
class ConfigurationError(Exception):
"""
This exception denotes that something went wrong in the configuration.
It is mostly a marker class, but also provides the type of configuration in
which something went wrong.
"""
def __init__(self, message, configuration_type):
"""
Creates a new ConfigurationError.
:param message: The message describing the error that occurred.
:param configuration_type: The configuration type with which the error
occurred.
"""
#Prepend the configuration type before the error message.
super(ConfigurationError, self).__init__("{configuration_type}: {message}".format(configuration_type=configuration_type, message=message))
self.configuration_type = configuration_type #Also store it here for debugging purposes.
|
Add exception to describe errors in configuration processing
Mostly just a marker, but with some extra debug information.#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Defines a class of exceptions used to denote a false state of being for
configuration.
"""
class ConfigurationError(Exception):
"""
This exception denotes that something went wrong in the configuration.
It is mostly a marker class, but also provides the type of configuration in
which something went wrong.
"""
def __init__(self, message, configuration_type):
"""
Creates a new ConfigurationError.
:param message: The message describing the error that occurred.
:param configuration_type: The configuration type with which the error
occurred.
"""
#Prepend the configuration type before the error message.
super(ConfigurationError, self).__init__("{configuration_type}: {message}".format(configuration_type=configuration_type, message=message))
self.configuration_type = configuration_type #Also store it here for debugging purposes.
|
<commit_before><commit_msg>Add exception to describe errors in configuration processing
Mostly just a marker, but with some extra debug information.<commit_after>#!/usr/bin/env python
#-*- coding: utf-8 -*-
#This software is distributed under the Creative Commons license (CC0) version 1.0. A copy of this license should have been distributed with this software.
#The license can also be read online: <https://creativecommons.org/publicdomain/zero/1.0/>. If this online license differs from the license provided with this software, the license provided with this software should be applied.
"""
Defines a class of exceptions used to denote a false state of being for
configuration.
"""
class ConfigurationError(Exception):
"""
This exception denotes that something went wrong in the configuration.
It is mostly a marker class, but also provides the type of configuration in
which something went wrong.
"""
def __init__(self, message, configuration_type):
"""
Creates a new ConfigurationError.
:param message: The message describing the error that occurred.
:param configuration_type: The configuration type with which the error
occurred.
"""
#Prepend the configuration type before the error message.
super(ConfigurationError, self).__init__("{configuration_type}: {message}".format(configuration_type=configuration_type, message=message))
self.configuration_type = configuration_type #Also store it here for debugging purposes.
|
|
c7987bde28992ef0ae8cae9fca500730b2fcea15
|
flexget/plugins/urlrewrite_eztv.py
|
flexget/plugins/urlrewrite_eztv.py
|
from __future__ import unicode_literals, division, absolute_import
import re
import logging
from urlparse import urlparse, urlunparse
from requests import RequestException
from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils import requests
from flexget.utils.soup import get_soup
log = logging.getLogger('eztv')
EZTV_MIRRORS = [
('http', 'eztv.it'),
('https', 'eztv-proxy.net'),
('http', 'eztv.come.in')]
class UrlRewriteEztv(object):
"""Eztv url rewriter."""
def url_rewritable(self, task, entry):
return urlparse(entry['url']).netloc == 'eztv.it'
def url_rewrite(self, task, entry):
url = entry['url']
for (scheme, netloc) in EZTV_MIRRORS:
try:
_, _, path, params, query, fragment = urlparse(url)
url = urlunparse((scheme, netloc, path, params, query, fragment))
page = requests.get(url).content
except RequestException as e:
log.debug('Eztv mirror `%s` seems to be down', url)
continue
break
if not page:
raise UrlRewritingError('No mirrors found for url %s' % entry['url'])
log.debug('Eztv mirror `%s` chosen', url)
try:
soup = get_soup(page)
mirrors = soup.find('a', attrs={'class': re.compile(r'download_\d')})
if not mirrors:
raise UrlRewritingError('Unable to locate download link from url %s'
% url)
entry['urls'] = [m.get('href') for m in mirrors]
entry['url'] = mirrors[0].get('href')
except Exception as e:
raise UrlRewritingError(e)
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteEztv, 'eztv', groups=['urlrewriter'], api_ver=2)
|
Add url rewriter for eztv
|
Add url rewriter for eztv
|
Python
|
mit
|
jawilson/Flexget,crawln45/Flexget,ZefQ/Flexget,crawln45/Flexget,crawln45/Flexget,grrr2/Flexget,poulpito/Flexget,tobinjt/Flexget,ianstalk/Flexget,dsemi/Flexget,Flexget/Flexget,Flexget/Flexget,OmgOhnoes/Flexget,qk4l/Flexget,jacobmetrick/Flexget,JorisDeRieck/Flexget,ZefQ/Flexget,tsnoam/Flexget,Danfocus/Flexget,antivirtel/Flexget,voriux/Flexget,thalamus/Flexget,tobinjt/Flexget,thalamus/Flexget,ianstalk/Flexget,antivirtel/Flexget,spencerjanssen/Flexget,xfouloux/Flexget,qvazzler/Flexget,Pretagonist/Flexget,LynxyssCZ/Flexget,malkavi/Flexget,offbyone/Flexget,X-dark/Flexget,vfrc2/Flexget,xfouloux/Flexget,spencerjanssen/Flexget,JorisDeRieck/Flexget,thalamus/Flexget,patsissons/Flexget,ianstalk/Flexget,tsnoam/Flexget,OmgOhnoes/Flexget,cvium/Flexget,lildadou/Flexget,antivirtel/Flexget,crawln45/Flexget,lildadou/Flexget,qvazzler/Flexget,jacobmetrick/Flexget,Flexget/Flexget,qvazzler/Flexget,ibrahimkarahan/Flexget,cvium/Flexget,drwyrm/Flexget,offbyone/Flexget,sean797/Flexget,dsemi/Flexget,camon/Flexget,jawilson/Flexget,lildadou/Flexget,oxc/Flexget,gazpachoking/Flexget,jacobmetrick/Flexget,malkavi/Flexget,grrr2/Flexget,cvium/Flexget,jawilson/Flexget,malkavi/Flexget,patsissons/Flexget,Pretagonist/Flexget,tvcsantos/Flexget,camon/Flexget,sean797/Flexget,poulpito/Flexget,ZefQ/Flexget,dsemi/Flexget,spencerjanssen/Flexget,jawilson/Flexget,voriux/Flexget,LynxyssCZ/Flexget,OmgOhnoes/Flexget,qk4l/Flexget,ratoaq2/Flexget,tobinjt/Flexget,X-dark/Flexget,drwyrm/Flexget,offbyone/Flexget,JorisDeRieck/Flexget,ratoaq2/Flexget,oxc/Flexget,ibrahimkarahan/Flexget,tvcsantos/Flexget,vfrc2/Flexget,poulpito/Flexget,Pretagonist/Flexget,ibrahimkarahan/Flexget,qk4l/Flexget,ratoaq2/Flexget,patsissons/Flexget,gazpachoking/Flexget,tsnoam/Flexget,v17al/Flexget,oxc/Flexget,v17al/Flexget,X-dark/Flexget,tarzasai/Flexget,tarzasai/Flexget,LynxyssCZ/Flexget,JorisDeRieck/Flexget,LynxyssCZ/Flexget,sean797/Flexget,xfouloux/Flexget,malkavi/Flexget,Danfocus/Flexget,Danfocus/Flexget,Danfocus/Flexget,v17al/Flexget,grrr2/Flexget,tarzasai/Flexget,tobinjt/Flexget,vfrc2/Flexget,drwyrm/Flexget,Flexget/Flexget
|
Add url rewriter for eztv
|
from __future__ import unicode_literals, division, absolute_import
import re
import logging
from urlparse import urlparse, urlunparse
from requests import RequestException
from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils import requests
from flexget.utils.soup import get_soup
log = logging.getLogger('eztv')
EZTV_MIRRORS = [
('http', 'eztv.it'),
('https', 'eztv-proxy.net'),
('http', 'eztv.come.in')]
class UrlRewriteEztv(object):
"""Eztv url rewriter."""
def url_rewritable(self, task, entry):
return urlparse(entry['url']).netloc == 'eztv.it'
def url_rewrite(self, task, entry):
url = entry['url']
for (scheme, netloc) in EZTV_MIRRORS:
try:
_, _, path, params, query, fragment = urlparse(url)
url = urlunparse((scheme, netloc, path, params, query, fragment))
page = requests.get(url).content
except RequestException as e:
log.debug('Eztv mirror `%s` seems to be down', url)
continue
break
if not page:
raise UrlRewritingError('No mirrors found for url %s' % entry['url'])
log.debug('Eztv mirror `%s` chosen', url)
try:
soup = get_soup(page)
mirrors = soup.find('a', attrs={'class': re.compile(r'download_\d')})
if not mirrors:
raise UrlRewritingError('Unable to locate download link from url %s'
% url)
entry['urls'] = [m.get('href') for m in mirrors]
entry['url'] = mirrors[0].get('href')
except Exception as e:
raise UrlRewritingError(e)
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteEztv, 'eztv', groups=['urlrewriter'], api_ver=2)
|
<commit_before><commit_msg>Add url rewriter for eztv<commit_after>
|
from __future__ import unicode_literals, division, absolute_import
import re
import logging
from urlparse import urlparse, urlunparse
from requests import RequestException
from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils import requests
from flexget.utils.soup import get_soup
log = logging.getLogger('eztv')
EZTV_MIRRORS = [
('http', 'eztv.it'),
('https', 'eztv-proxy.net'),
('http', 'eztv.come.in')]
class UrlRewriteEztv(object):
"""Eztv url rewriter."""
def url_rewritable(self, task, entry):
return urlparse(entry['url']).netloc == 'eztv.it'
def url_rewrite(self, task, entry):
url = entry['url']
for (scheme, netloc) in EZTV_MIRRORS:
try:
_, _, path, params, query, fragment = urlparse(url)
url = urlunparse((scheme, netloc, path, params, query, fragment))
page = requests.get(url).content
except RequestException as e:
log.debug('Eztv mirror `%s` seems to be down', url)
continue
break
if not page:
raise UrlRewritingError('No mirrors found for url %s' % entry['url'])
log.debug('Eztv mirror `%s` chosen', url)
try:
soup = get_soup(page)
mirrors = soup.find('a', attrs={'class': re.compile(r'download_\d')})
if not mirrors:
raise UrlRewritingError('Unable to locate download link from url %s'
% url)
entry['urls'] = [m.get('href') for m in mirrors]
entry['url'] = mirrors[0].get('href')
except Exception as e:
raise UrlRewritingError(e)
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteEztv, 'eztv', groups=['urlrewriter'], api_ver=2)
|
Add url rewriter for eztvfrom __future__ import unicode_literals, division, absolute_import
import re
import logging
from urlparse import urlparse, urlunparse
from requests import RequestException
from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils import requests
from flexget.utils.soup import get_soup
log = logging.getLogger('eztv')
EZTV_MIRRORS = [
('http', 'eztv.it'),
('https', 'eztv-proxy.net'),
('http', 'eztv.come.in')]
class UrlRewriteEztv(object):
"""Eztv url rewriter."""
def url_rewritable(self, task, entry):
return urlparse(entry['url']).netloc == 'eztv.it'
def url_rewrite(self, task, entry):
url = entry['url']
for (scheme, netloc) in EZTV_MIRRORS:
try:
_, _, path, params, query, fragment = urlparse(url)
url = urlunparse((scheme, netloc, path, params, query, fragment))
page = requests.get(url).content
except RequestException as e:
log.debug('Eztv mirror `%s` seems to be down', url)
continue
break
if not page:
raise UrlRewritingError('No mirrors found for url %s' % entry['url'])
log.debug('Eztv mirror `%s` chosen', url)
try:
soup = get_soup(page)
mirrors = soup.find('a', attrs={'class': re.compile(r'download_\d')})
if not mirrors:
raise UrlRewritingError('Unable to locate download link from url %s'
% url)
entry['urls'] = [m.get('href') for m in mirrors]
entry['url'] = mirrors[0].get('href')
except Exception as e:
raise UrlRewritingError(e)
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteEztv, 'eztv', groups=['urlrewriter'], api_ver=2)
|
<commit_before><commit_msg>Add url rewriter for eztv<commit_after>from __future__ import unicode_literals, division, absolute_import
import re
import logging
from urlparse import urlparse, urlunparse
from requests import RequestException
from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils import requests
from flexget.utils.soup import get_soup
log = logging.getLogger('eztv')
EZTV_MIRRORS = [
('http', 'eztv.it'),
('https', 'eztv-proxy.net'),
('http', 'eztv.come.in')]
class UrlRewriteEztv(object):
"""Eztv url rewriter."""
def url_rewritable(self, task, entry):
return urlparse(entry['url']).netloc == 'eztv.it'
def url_rewrite(self, task, entry):
url = entry['url']
for (scheme, netloc) in EZTV_MIRRORS:
try:
_, _, path, params, query, fragment = urlparse(url)
url = urlunparse((scheme, netloc, path, params, query, fragment))
page = requests.get(url).content
except RequestException as e:
log.debug('Eztv mirror `%s` seems to be down', url)
continue
break
if not page:
raise UrlRewritingError('No mirrors found for url %s' % entry['url'])
log.debug('Eztv mirror `%s` chosen', url)
try:
soup = get_soup(page)
mirrors = soup.find('a', attrs={'class': re.compile(r'download_\d')})
if not mirrors:
raise UrlRewritingError('Unable to locate download link from url %s'
% url)
entry['urls'] = [m.get('href') for m in mirrors]
entry['url'] = mirrors[0].get('href')
except Exception as e:
raise UrlRewritingError(e)
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteEztv, 'eztv', groups=['urlrewriter'], api_ver=2)
|
|
d3f556b6d7da2c67fc9dcc6b7d73a0d1b76d278c
|
tests/functional_tests/test_valid_recipes.py
|
tests/functional_tests/test_valid_recipes.py
|
import os
import pytest
from conda_verify import utils
from conda_verify.exceptions import RecipeError
from conda_verify.verify import Verify
@pytest.fixture
def recipe_dir():
return os.path.join(os.path.dirname(__file__), 'test-recipes')
@pytest.fixture
def verifier():
recipe_verifier = Verify()
return recipe_verifier
def test_valid_recipe_named_icu(recipe_dir, verifier):
recipe = os.path.join(recipe_dir, 'icu')
metadata = utils.render_metadata(recipe, None)
try:
verifier.verify_recipe(pedantic=False, rendered_meta=metadata,
recipe_dir=recipe)
except RecipeError as error:
pytest.fail(error)
def test_valid_test_file(recipe_dir, verifier):
recipe = os.path.join(recipe_dir, 'valid_test_file')
metadata = utils.render_metadata(recipe, None)
try:
verifier.verify_recipe(pedantic=False, rendered_meta=metadata,
recipe_dir=recipe)
except RecipeError as error:
pytest.fail(error)
|
Add tests for valid recipes
|
Add tests for valid recipes
|
Python
|
bsd-3-clause
|
mandeep/conda-verify
|
Add tests for valid recipes
|
import os
import pytest
from conda_verify import utils
from conda_verify.exceptions import RecipeError
from conda_verify.verify import Verify
@pytest.fixture
def recipe_dir():
return os.path.join(os.path.dirname(__file__), 'test-recipes')
@pytest.fixture
def verifier():
recipe_verifier = Verify()
return recipe_verifier
def test_valid_recipe_named_icu(recipe_dir, verifier):
recipe = os.path.join(recipe_dir, 'icu')
metadata = utils.render_metadata(recipe, None)
try:
verifier.verify_recipe(pedantic=False, rendered_meta=metadata,
recipe_dir=recipe)
except RecipeError as error:
pytest.fail(error)
def test_valid_test_file(recipe_dir, verifier):
recipe = os.path.join(recipe_dir, 'valid_test_file')
metadata = utils.render_metadata(recipe, None)
try:
verifier.verify_recipe(pedantic=False, rendered_meta=metadata,
recipe_dir=recipe)
except RecipeError as error:
pytest.fail(error)
|
<commit_before><commit_msg>Add tests for valid recipes<commit_after>
|
import os
import pytest
from conda_verify import utils
from conda_verify.exceptions import RecipeError
from conda_verify.verify import Verify
@pytest.fixture
def recipe_dir():
return os.path.join(os.path.dirname(__file__), 'test-recipes')
@pytest.fixture
def verifier():
recipe_verifier = Verify()
return recipe_verifier
def test_valid_recipe_named_icu(recipe_dir, verifier):
recipe = os.path.join(recipe_dir, 'icu')
metadata = utils.render_metadata(recipe, None)
try:
verifier.verify_recipe(pedantic=False, rendered_meta=metadata,
recipe_dir=recipe)
except RecipeError as error:
pytest.fail(error)
def test_valid_test_file(recipe_dir, verifier):
recipe = os.path.join(recipe_dir, 'valid_test_file')
metadata = utils.render_metadata(recipe, None)
try:
verifier.verify_recipe(pedantic=False, rendered_meta=metadata,
recipe_dir=recipe)
except RecipeError as error:
pytest.fail(error)
|
Add tests for valid recipesimport os
import pytest
from conda_verify import utils
from conda_verify.exceptions import RecipeError
from conda_verify.verify import Verify
@pytest.fixture
def recipe_dir():
return os.path.join(os.path.dirname(__file__), 'test-recipes')
@pytest.fixture
def verifier():
recipe_verifier = Verify()
return recipe_verifier
def test_valid_recipe_named_icu(recipe_dir, verifier):
recipe = os.path.join(recipe_dir, 'icu')
metadata = utils.render_metadata(recipe, None)
try:
verifier.verify_recipe(pedantic=False, rendered_meta=metadata,
recipe_dir=recipe)
except RecipeError as error:
pytest.fail(error)
def test_valid_test_file(recipe_dir, verifier):
recipe = os.path.join(recipe_dir, 'valid_test_file')
metadata = utils.render_metadata(recipe, None)
try:
verifier.verify_recipe(pedantic=False, rendered_meta=metadata,
recipe_dir=recipe)
except RecipeError as error:
pytest.fail(error)
|
<commit_before><commit_msg>Add tests for valid recipes<commit_after>import os
import pytest
from conda_verify import utils
from conda_verify.exceptions import RecipeError
from conda_verify.verify import Verify
@pytest.fixture
def recipe_dir():
return os.path.join(os.path.dirname(__file__), 'test-recipes')
@pytest.fixture
def verifier():
recipe_verifier = Verify()
return recipe_verifier
def test_valid_recipe_named_icu(recipe_dir, verifier):
recipe = os.path.join(recipe_dir, 'icu')
metadata = utils.render_metadata(recipe, None)
try:
verifier.verify_recipe(pedantic=False, rendered_meta=metadata,
recipe_dir=recipe)
except RecipeError as error:
pytest.fail(error)
def test_valid_test_file(recipe_dir, verifier):
recipe = os.path.join(recipe_dir, 'valid_test_file')
metadata = utils.render_metadata(recipe, None)
try:
verifier.verify_recipe(pedantic=False, rendered_meta=metadata,
recipe_dir=recipe)
except RecipeError as error:
pytest.fail(error)
|
|
58c604a8574ade75aecbd80314004a9539e80c84
|
plugins/volunteers.py
|
plugins/volunteers.py
|
__commands__ = '''
?wv [name] - congratulates people on their public sprited gesture
'''
def plugin(bot):
bot.hear(r'^\?wv$', "Well volunteered!")
bot.hear(r'^\?wv\s(.+)$', lambda response: "Well volunteered %s!" % response.match.group(1).strip())
|
Add ?wv command for encouraging action
|
Add ?wv command for encouraging action
Reviewed-by: Zi Li
|
Python
|
mit
|
smarkets/hal
|
Add ?wv command for encouraging action
Reviewed-by: Zi Li
|
__commands__ = '''
?wv [name] - congratulates people on their public sprited gesture
'''
def plugin(bot):
bot.hear(r'^\?wv$', "Well volunteered!")
bot.hear(r'^\?wv\s(.+)$', lambda response: "Well volunteered %s!" % response.match.group(1).strip())
|
<commit_before><commit_msg>Add ?wv command for encouraging action
Reviewed-by: Zi Li<commit_after>
|
__commands__ = '''
?wv [name] - congratulates people on their public sprited gesture
'''
def plugin(bot):
bot.hear(r'^\?wv$', "Well volunteered!")
bot.hear(r'^\?wv\s(.+)$', lambda response: "Well volunteered %s!" % response.match.group(1).strip())
|
Add ?wv command for encouraging action
Reviewed-by: Zi Li__commands__ = '''
?wv [name] - congratulates people on their public sprited gesture
'''
def plugin(bot):
bot.hear(r'^\?wv$', "Well volunteered!")
bot.hear(r'^\?wv\s(.+)$', lambda response: "Well volunteered %s!" % response.match.group(1).strip())
|
<commit_before><commit_msg>Add ?wv command for encouraging action
Reviewed-by: Zi Li<commit_after>__commands__ = '''
?wv [name] - congratulates people on their public sprited gesture
'''
def plugin(bot):
bot.hear(r'^\?wv$', "Well volunteered!")
bot.hear(r'^\?wv\s(.+)$', lambda response: "Well volunteered %s!" % response.match.group(1).strip())
|
|
40ad674ae170347ed69b19434241438bb09e473d
|
app/decorators.py
|
app/decorators.py
|
from functools import wraps
from flask import redirect, session, url_for
def login_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if session.get('logged_in', False):
return f(*args, **kwargs)
return redirect(url_for('public.login'))
return wrapper
|
Define decorator for requiring login
|
Define decorator for requiring login
|
Python
|
apache-2.0
|
ueg1990/customer-info,ueg1990/customer-info
|
Define decorator for requiring login
|
from functools import wraps
from flask import redirect, session, url_for
def login_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if session.get('logged_in', False):
return f(*args, **kwargs)
return redirect(url_for('public.login'))
return wrapper
|
<commit_before><commit_msg>Define decorator for requiring login<commit_after>
|
from functools import wraps
from flask import redirect, session, url_for
def login_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if session.get('logged_in', False):
return f(*args, **kwargs)
return redirect(url_for('public.login'))
return wrapper
|
Define decorator for requiring loginfrom functools import wraps
from flask import redirect, session, url_for
def login_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if session.get('logged_in', False):
return f(*args, **kwargs)
return redirect(url_for('public.login'))
return wrapper
|
<commit_before><commit_msg>Define decorator for requiring login<commit_after>from functools import wraps
from flask import redirect, session, url_for
def login_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if session.get('logged_in', False):
return f(*args, **kwargs)
return redirect(url_for('public.login'))
return wrapper
|
|
722b11eab90c6d532ea96209f7632e17181c0b3e
|
tests/test_inpoly.py
|
tests/test_inpoly.py
|
import unittest
import pcl
import numpy as np
from patty_registration.conversions import loadLas, loadCsvPolygon
from numpy.testing import assert_array_equal, assert_array_almost_equal
from matplotlib import path
class TestInPoly(unittest.TestCase):
def testInPoly(self):
fileLas = 'data/footprints/162.las'
filePoly = 'data/footprints/162.las_footprint.csv'
pc = loadLas(fileLas)
footprint = loadCsvPolygon(filePoly)
pc2 = points_in_poly(pc, footprint)
def point_in_poly(point, polyPath):
return polyPath.contains_point(point[:2])
def points_in_poly(pc, poly):
polyPath = path.Path(poly[:,:2])
points = np.asarray(pc)
return np.array([ point for point in points if point_in_poly(point+pc.offset, polyPath) ])
|
Test if points are in footprint
|
Test if points are in footprint
|
Python
|
apache-2.0
|
NLeSC/PattyAnalytics
|
Test if points are in footprint
|
import unittest
import pcl
import numpy as np
from patty_registration.conversions import loadLas, loadCsvPolygon
from numpy.testing import assert_array_equal, assert_array_almost_equal
from matplotlib import path
class TestInPoly(unittest.TestCase):
def testInPoly(self):
fileLas = 'data/footprints/162.las'
filePoly = 'data/footprints/162.las_footprint.csv'
pc = loadLas(fileLas)
footprint = loadCsvPolygon(filePoly)
pc2 = points_in_poly(pc, footprint)
def point_in_poly(point, polyPath):
return polyPath.contains_point(point[:2])
def points_in_poly(pc, poly):
polyPath = path.Path(poly[:,:2])
points = np.asarray(pc)
return np.array([ point for point in points if point_in_poly(point+pc.offset, polyPath) ])
|
<commit_before><commit_msg>Test if points are in footprint<commit_after>
|
import unittest
import pcl
import numpy as np
from patty_registration.conversions import loadLas, loadCsvPolygon
from numpy.testing import assert_array_equal, assert_array_almost_equal
from matplotlib import path
class TestInPoly(unittest.TestCase):
def testInPoly(self):
fileLas = 'data/footprints/162.las'
filePoly = 'data/footprints/162.las_footprint.csv'
pc = loadLas(fileLas)
footprint = loadCsvPolygon(filePoly)
pc2 = points_in_poly(pc, footprint)
def point_in_poly(point, polyPath):
return polyPath.contains_point(point[:2])
def points_in_poly(pc, poly):
polyPath = path.Path(poly[:,:2])
points = np.asarray(pc)
return np.array([ point for point in points if point_in_poly(point+pc.offset, polyPath) ])
|
Test if points are in footprintimport unittest
import pcl
import numpy as np
from patty_registration.conversions import loadLas, loadCsvPolygon
from numpy.testing import assert_array_equal, assert_array_almost_equal
from matplotlib import path
class TestInPoly(unittest.TestCase):
def testInPoly(self):
fileLas = 'data/footprints/162.las'
filePoly = 'data/footprints/162.las_footprint.csv'
pc = loadLas(fileLas)
footprint = loadCsvPolygon(filePoly)
pc2 = points_in_poly(pc, footprint)
def point_in_poly(point, polyPath):
return polyPath.contains_point(point[:2])
def points_in_poly(pc, poly):
polyPath = path.Path(poly[:,:2])
points = np.asarray(pc)
return np.array([ point for point in points if point_in_poly(point+pc.offset, polyPath) ])
|
<commit_before><commit_msg>Test if points are in footprint<commit_after>import unittest
import pcl
import numpy as np
from patty_registration.conversions import loadLas, loadCsvPolygon
from numpy.testing import assert_array_equal, assert_array_almost_equal
from matplotlib import path
class TestInPoly(unittest.TestCase):
def testInPoly(self):
fileLas = 'data/footprints/162.las'
filePoly = 'data/footprints/162.las_footprint.csv'
pc = loadLas(fileLas)
footprint = loadCsvPolygon(filePoly)
pc2 = points_in_poly(pc, footprint)
def point_in_poly(point, polyPath):
return polyPath.contains_point(point[:2])
def points_in_poly(pc, poly):
polyPath = path.Path(poly[:,:2])
points = np.asarray(pc)
return np.array([ point for point in points if point_in_poly(point+pc.offset, polyPath) ])
|
|
7115d25c57404a42bc29513eb514073747d876ce
|
src/rez/utils/platform_mapped.py
|
src/rez/utils/platform_mapped.py
|
import re
def platform_mapped(func):
"""
Decorates functions for lookups within a config.platform_map dictionary.
The first level key is mapped to the func.__name__ of the decorated function.
Regular expressions are used on the second level key, values.
Note that there is no guaranteed order within the dictionary evaluation. Only the first matching
regular expression is being used.
For example:
config.platform_map = {
"os": {
r"Scientific Linux-(.*)": r"Scientific-\1", # Scientific Linux-x.x -> Scientific-x.x
r"Ubuntu-14.\d": r"Ubuntu-14, # Any Ubuntu-14.x -> Ubuntu-14
},
"arch": {
"x86_64": "64bit", # Maps both x86_64 and amd64 -> 64bit
"amd64": "64bit",
},
}
"""
def inner(*args, **kwargs):
# Since platform is being used within config lazy import config to prevent circular dependencies
from rez.config import config
# Original result
result = func(*args, **kwargs)
# The function name is used as primary key
if func.__name__ in config.platform_map:
for key, value in config.platform_map[func.__name__].iteritems():
result, changes = re.subn(key, value, result)
if changes > 0:
break
return result
return result
return inner
|
Add platform_map to remap Platform.os and arch based on config
|
Add platform_map to remap Platform.os and arch based on config
|
Python
|
apache-2.0
|
nerdvegas/rez,instinct-vfx/rez,nerdvegas/rez,instinct-vfx/rez
|
Add platform_map to remap Platform.os and arch based on config
|
import re
def platform_mapped(func):
"""
Decorates functions for lookups within a config.platform_map dictionary.
The first level key is mapped to the func.__name__ of the decorated function.
Regular expressions are used on the second level key, values.
Note that there is no guaranteed order within the dictionary evaluation. Only the first matching
regular expression is being used.
For example:
config.platform_map = {
"os": {
r"Scientific Linux-(.*)": r"Scientific-\1", # Scientific Linux-x.x -> Scientific-x.x
r"Ubuntu-14.\d": r"Ubuntu-14, # Any Ubuntu-14.x -> Ubuntu-14
},
"arch": {
"x86_64": "64bit", # Maps both x86_64 and amd64 -> 64bit
"amd64": "64bit",
},
}
"""
def inner(*args, **kwargs):
# Since platform is being used within config lazy import config to prevent circular dependencies
from rez.config import config
# Original result
result = func(*args, **kwargs)
# The function name is used as primary key
if func.__name__ in config.platform_map:
for key, value in config.platform_map[func.__name__].iteritems():
result, changes = re.subn(key, value, result)
if changes > 0:
break
return result
return result
return inner
|
<commit_before><commit_msg>Add platform_map to remap Platform.os and arch based on config<commit_after>
|
import re
def platform_mapped(func):
"""
Decorates functions for lookups within a config.platform_map dictionary.
The first level key is mapped to the func.__name__ of the decorated function.
Regular expressions are used on the second level key, values.
Note that there is no guaranteed order within the dictionary evaluation. Only the first matching
regular expression is being used.
For example:
config.platform_map = {
"os": {
r"Scientific Linux-(.*)": r"Scientific-\1", # Scientific Linux-x.x -> Scientific-x.x
r"Ubuntu-14.\d": r"Ubuntu-14, # Any Ubuntu-14.x -> Ubuntu-14
},
"arch": {
"x86_64": "64bit", # Maps both x86_64 and amd64 -> 64bit
"amd64": "64bit",
},
}
"""
def inner(*args, **kwargs):
# Since platform is being used within config lazy import config to prevent circular dependencies
from rez.config import config
# Original result
result = func(*args, **kwargs)
# The function name is used as primary key
if func.__name__ in config.platform_map:
for key, value in config.platform_map[func.__name__].iteritems():
result, changes = re.subn(key, value, result)
if changes > 0:
break
return result
return result
return inner
|
Add platform_map to remap Platform.os and arch based on configimport re
def platform_mapped(func):
"""
Decorates functions for lookups within a config.platform_map dictionary.
The first level key is mapped to the func.__name__ of the decorated function.
Regular expressions are used on the second level key, values.
Note that there is no guaranteed order within the dictionary evaluation. Only the first matching
regular expression is being used.
For example:
config.platform_map = {
"os": {
r"Scientific Linux-(.*)": r"Scientific-\1", # Scientific Linux-x.x -> Scientific-x.x
r"Ubuntu-14.\d": r"Ubuntu-14, # Any Ubuntu-14.x -> Ubuntu-14
},
"arch": {
"x86_64": "64bit", # Maps both x86_64 and amd64 -> 64bit
"amd64": "64bit",
},
}
"""
def inner(*args, **kwargs):
# Since platform is being used within config lazy import config to prevent circular dependencies
from rez.config import config
# Original result
result = func(*args, **kwargs)
# The function name is used as primary key
if func.__name__ in config.platform_map:
for key, value in config.platform_map[func.__name__].iteritems():
result, changes = re.subn(key, value, result)
if changes > 0:
break
return result
return result
return inner
|
<commit_before><commit_msg>Add platform_map to remap Platform.os and arch based on config<commit_after>import re
def platform_mapped(func):
"""
Decorates functions for lookups within a config.platform_map dictionary.
The first level key is mapped to the func.__name__ of the decorated function.
Regular expressions are used on the second level key, values.
Note that there is no guaranteed order within the dictionary evaluation. Only the first matching
regular expression is being used.
For example:
config.platform_map = {
"os": {
r"Scientific Linux-(.*)": r"Scientific-\1", # Scientific Linux-x.x -> Scientific-x.x
r"Ubuntu-14.\d": r"Ubuntu-14, # Any Ubuntu-14.x -> Ubuntu-14
},
"arch": {
"x86_64": "64bit", # Maps both x86_64 and amd64 -> 64bit
"amd64": "64bit",
},
}
"""
def inner(*args, **kwargs):
# Since platform is being used within config lazy import config to prevent circular dependencies
from rez.config import config
# Original result
result = func(*args, **kwargs)
# The function name is used as primary key
if func.__name__ in config.platform_map:
for key, value in config.platform_map[func.__name__].iteritems():
result, changes = re.subn(key, value, result)
if changes > 0:
break
return result
return result
return inner
|
|
8a1448ed3bd426d11f6222d63f77604ec132b2da
|
examples/signed_url_auth.py
|
examples/signed_url_auth.py
|
# Copyright 2016 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zaqarclient.queues.v2 import client
URL = 'http://localhost:8888'
def create_post_delete(queue_name, messages):
"""Presigned queue example
Creates a queue, posts messages to it and finally deletes it with
``signed-url`` auth strategy enabled on Zaqar server side.
:params queue_name: The name of the queue
:type queue_name: `six.text_type`
:params messages: Messages to post.
:type messages: list
"""
conf = {'auth_opts':
{'backend': 'signed-url',
'options': {'signature': '',
'expires': '',
'methods': ['GET', 'PATCH', 'POST', 'PUT'],
'paths': ['/v2/queues/beijing/claims'],
'os_project_id': '2887aabf368046a3bb0070f1c0413470'}
}
}
cli = client.Client(URL, conf=conf)
queue = cli.queue(queue_name)
queue.post(messages)
for msg in queue.messages(echo=True):
print(msg.body)
msg.delete()
if __name__ == '__main__':
messages = [{'body': {'id': idx}, 'ttl': 360}
for idx in range(20)]
create_post_delete('beijing', messages)
|
Add an example for pre signed URL
|
Add an example for pre signed URL
Change-Id: I6abb998cee022e5cf3e83df4600dc027bc537d32
|
Python
|
apache-2.0
|
openstack/python-zaqarclient
|
Add an example for pre signed URL
Change-Id: I6abb998cee022e5cf3e83df4600dc027bc537d32
|
# Copyright 2016 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zaqarclient.queues.v2 import client
URL = 'http://localhost:8888'
def create_post_delete(queue_name, messages):
"""Presigned queue example
Creates a queue, posts messages to it and finally deletes it with
``signed-url`` auth strategy enabled on Zaqar server side.
:params queue_name: The name of the queue
:type queue_name: `six.text_type`
:params messages: Messages to post.
:type messages: list
"""
conf = {'auth_opts':
{'backend': 'signed-url',
'options': {'signature': '',
'expires': '',
'methods': ['GET', 'PATCH', 'POST', 'PUT'],
'paths': ['/v2/queues/beijing/claims'],
'os_project_id': '2887aabf368046a3bb0070f1c0413470'}
}
}
cli = client.Client(URL, conf=conf)
queue = cli.queue(queue_name)
queue.post(messages)
for msg in queue.messages(echo=True):
print(msg.body)
msg.delete()
if __name__ == '__main__':
messages = [{'body': {'id': idx}, 'ttl': 360}
for idx in range(20)]
create_post_delete('beijing', messages)
|
<commit_before><commit_msg>Add an example for pre signed URL
Change-Id: I6abb998cee022e5cf3e83df4600dc027bc537d32<commit_after>
|
# Copyright 2016 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zaqarclient.queues.v2 import client
URL = 'http://localhost:8888'
def create_post_delete(queue_name, messages):
"""Presigned queue example
Creates a queue, posts messages to it and finally deletes it with
``signed-url`` auth strategy enabled on Zaqar server side.
:params queue_name: The name of the queue
:type queue_name: `six.text_type`
:params messages: Messages to post.
:type messages: list
"""
conf = {'auth_opts':
{'backend': 'signed-url',
'options': {'signature': '',
'expires': '',
'methods': ['GET', 'PATCH', 'POST', 'PUT'],
'paths': ['/v2/queues/beijing/claims'],
'os_project_id': '2887aabf368046a3bb0070f1c0413470'}
}
}
cli = client.Client(URL, conf=conf)
queue = cli.queue(queue_name)
queue.post(messages)
for msg in queue.messages(echo=True):
print(msg.body)
msg.delete()
if __name__ == '__main__':
messages = [{'body': {'id': idx}, 'ttl': 360}
for idx in range(20)]
create_post_delete('beijing', messages)
|
Add an example for pre signed URL
Change-Id: I6abb998cee022e5cf3e83df4600dc027bc537d32# Copyright 2016 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zaqarclient.queues.v2 import client
URL = 'http://localhost:8888'
def create_post_delete(queue_name, messages):
"""Presigned queue example
Creates a queue, posts messages to it and finally deletes it with
``signed-url`` auth strategy enabled on Zaqar server side.
:params queue_name: The name of the queue
:type queue_name: `six.text_type`
:params messages: Messages to post.
:type messages: list
"""
conf = {'auth_opts':
{'backend': 'signed-url',
'options': {'signature': '',
'expires': '',
'methods': ['GET', 'PATCH', 'POST', 'PUT'],
'paths': ['/v2/queues/beijing/claims'],
'os_project_id': '2887aabf368046a3bb0070f1c0413470'}
}
}
cli = client.Client(URL, conf=conf)
queue = cli.queue(queue_name)
queue.post(messages)
for msg in queue.messages(echo=True):
print(msg.body)
msg.delete()
if __name__ == '__main__':
messages = [{'body': {'id': idx}, 'ttl': 360}
for idx in range(20)]
create_post_delete('beijing', messages)
|
<commit_before><commit_msg>Add an example for pre signed URL
Change-Id: I6abb998cee022e5cf3e83df4600dc027bc537d32<commit_after># Copyright 2016 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zaqarclient.queues.v2 import client
URL = 'http://localhost:8888'
def create_post_delete(queue_name, messages):
"""Presigned queue example
Creates a queue, posts messages to it and finally deletes it with
``signed-url`` auth strategy enabled on Zaqar server side.
:params queue_name: The name of the queue
:type queue_name: `six.text_type`
:params messages: Messages to post.
:type messages: list
"""
conf = {'auth_opts':
{'backend': 'signed-url',
'options': {'signature': '',
'expires': '',
'methods': ['GET', 'PATCH', 'POST', 'PUT'],
'paths': ['/v2/queues/beijing/claims'],
'os_project_id': '2887aabf368046a3bb0070f1c0413470'}
}
}
cli = client.Client(URL, conf=conf)
queue = cli.queue(queue_name)
queue.post(messages)
for msg in queue.messages(echo=True):
print(msg.body)
msg.delete()
if __name__ == '__main__':
messages = [{'body': {'id': idx}, 'ttl': 360}
for idx in range(20)]
create_post_delete('beijing', messages)
|
|
59f37975bb06edd38bbcdea6f0ea031f079ba2c3
|
lib/hawaiibuildbot/common/utils.py
|
lib/hawaiibuildbot/common/utils.py
|
#
# This file is part of Hawaii.
#
# Copyright (C) 2015 Pier Luigi Fiorini <pierluigi.fiorini@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def loadYaml(fileName):
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
stream = open(fileName, "r")
return load(stream, Loader=Loader)
|
Add an utility function to load YAML
|
common: Add an utility function to load YAML
|
Python
|
agpl-3.0
|
hawaii-desktop/builder,hawaii-desktop/builder,hawaii-desktop/builder,hawaii-desktop/builder,hawaii-desktop/builder
|
common: Add an utility function to load YAML
|
#
# This file is part of Hawaii.
#
# Copyright (C) 2015 Pier Luigi Fiorini <pierluigi.fiorini@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def loadYaml(fileName):
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
stream = open(fileName, "r")
return load(stream, Loader=Loader)
|
<commit_before><commit_msg>common: Add an utility function to load YAML<commit_after>
|
#
# This file is part of Hawaii.
#
# Copyright (C) 2015 Pier Luigi Fiorini <pierluigi.fiorini@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def loadYaml(fileName):
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
stream = open(fileName, "r")
return load(stream, Loader=Loader)
|
common: Add an utility function to load YAML#
# This file is part of Hawaii.
#
# Copyright (C) 2015 Pier Luigi Fiorini <pierluigi.fiorini@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def loadYaml(fileName):
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
stream = open(fileName, "r")
return load(stream, Loader=Loader)
|
<commit_before><commit_msg>common: Add an utility function to load YAML<commit_after>#
# This file is part of Hawaii.
#
# Copyright (C) 2015 Pier Luigi Fiorini <pierluigi.fiorini@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def loadYaml(fileName):
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
stream = open(fileName, "r")
return load(stream, Loader=Loader)
|
|
62b2c69482d36a7afcdb732dd70a037d2513ba51
|
bh_sshcmd.py
|
bh_sshcmd.py
|
import paramiko # pip install paramiko
import os
def ssh_command(ip, user, command):
# you can run this script as
# SSH_PRIV_KEY=[your private key path] python bh_sshcmd.py
key = paramiko.RSAKey.from_private_key_file(os.getenv('SSH_PRIV_KEY'))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print "[==>connecting]"
client.connect(ip, username=user, pkey=key)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print(ssh_session.recv(1024))
return
ssh_command('52.35.195.113', 'ubuntu', 'id')
|
Add script to execute a simple command in a remote server over SSH
|
Add script to execute a simple command in a remote server over SSH
|
Python
|
mit
|
inakidelamadrid/bhp_exercises
|
Add script to execute a simple command in a remote server over SSH
|
import paramiko # pip install paramiko
import os
def ssh_command(ip, user, command):
# you can run this script as
# SSH_PRIV_KEY=[your private key path] python bh_sshcmd.py
key = paramiko.RSAKey.from_private_key_file(os.getenv('SSH_PRIV_KEY'))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print "[==>connecting]"
client.connect(ip, username=user, pkey=key)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print(ssh_session.recv(1024))
return
ssh_command('52.35.195.113', 'ubuntu', 'id')
|
<commit_before><commit_msg>Add script to execute a simple command in a remote server over SSH<commit_after>
|
import paramiko # pip install paramiko
import os
def ssh_command(ip, user, command):
# you can run this script as
# SSH_PRIV_KEY=[your private key path] python bh_sshcmd.py
key = paramiko.RSAKey.from_private_key_file(os.getenv('SSH_PRIV_KEY'))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print "[==>connecting]"
client.connect(ip, username=user, pkey=key)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print(ssh_session.recv(1024))
return
ssh_command('52.35.195.113', 'ubuntu', 'id')
|
Add script to execute a simple command in a remote server over SSHimport paramiko # pip install paramiko
import os
def ssh_command(ip, user, command):
# you can run this script as
# SSH_PRIV_KEY=[your private key path] python bh_sshcmd.py
key = paramiko.RSAKey.from_private_key_file(os.getenv('SSH_PRIV_KEY'))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print "[==>connecting]"
client.connect(ip, username=user, pkey=key)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print(ssh_session.recv(1024))
return
ssh_command('52.35.195.113', 'ubuntu', 'id')
|
<commit_before><commit_msg>Add script to execute a simple command in a remote server over SSH<commit_after>import paramiko # pip install paramiko
import os
def ssh_command(ip, user, command):
# you can run this script as
# SSH_PRIV_KEY=[your private key path] python bh_sshcmd.py
key = paramiko.RSAKey.from_private_key_file(os.getenv('SSH_PRIV_KEY'))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print "[==>connecting]"
client.connect(ip, username=user, pkey=key)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print(ssh_session.recv(1024))
return
ssh_command('52.35.195.113', 'ubuntu', 'id')
|
|
7c6077e107f40a3fcc3e1414f26071ceab0e0cf6
|
taiga/projects/notifications/migrations/0006_auto_20151103_0954.py
|
taiga/projects/notifications/migrations/0006_auto_20151103_0954.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0005_auto_20151005_1357'),
]
operations = [
migrations.AlterField(
model_name='notifypolicy',
name='notify_level',
field=models.SmallIntegerField(choices=[(<NotifyLevel.involved: 1>, 'Involved'), (<NotifyLevel.all: 2>, 'All'), (<NotifyLevel.none: 3>, 'None')]),
),
]
|
Create missing migration in taiga.projects.notifications
|
Create missing migration in taiga.projects.notifications
|
Python
|
agpl-3.0
|
Rademade/taiga-back,xdevelsistemas/taiga-back-community,dayatz/taiga-back,Rademade/taiga-back,dayatz/taiga-back,Rademade/taiga-back,Rademade/taiga-back,gam-phon/taiga-back,taigaio/taiga-back,taigaio/taiga-back,taigaio/taiga-back,gam-phon/taiga-back,dayatz/taiga-back,xdevelsistemas/taiga-back-community,gam-phon/taiga-back,xdevelsistemas/taiga-back-community,gam-phon/taiga-back,Rademade/taiga-back
|
Create missing migration in taiga.projects.notifications
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0005_auto_20151005_1357'),
]
operations = [
migrations.AlterField(
model_name='notifypolicy',
name='notify_level',
field=models.SmallIntegerField(choices=[(<NotifyLevel.involved: 1>, 'Involved'), (<NotifyLevel.all: 2>, 'All'), (<NotifyLevel.none: 3>, 'None')]),
),
]
|
<commit_before><commit_msg>Create missing migration in taiga.projects.notifications<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0005_auto_20151005_1357'),
]
operations = [
migrations.AlterField(
model_name='notifypolicy',
name='notify_level',
field=models.SmallIntegerField(choices=[(<NotifyLevel.involved: 1>, 'Involved'), (<NotifyLevel.all: 2>, 'All'), (<NotifyLevel.none: 3>, 'None')]),
),
]
|
Create missing migration in taiga.projects.notifications# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0005_auto_20151005_1357'),
]
operations = [
migrations.AlterField(
model_name='notifypolicy',
name='notify_level',
field=models.SmallIntegerField(choices=[(<NotifyLevel.involved: 1>, 'Involved'), (<NotifyLevel.all: 2>, 'All'), (<NotifyLevel.none: 3>, 'None')]),
),
]
|
<commit_before><commit_msg>Create missing migration in taiga.projects.notifications<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0005_auto_20151005_1357'),
]
operations = [
migrations.AlterField(
model_name='notifypolicy',
name='notify_level',
field=models.SmallIntegerField(choices=[(<NotifyLevel.involved: 1>, 'Involved'), (<NotifyLevel.all: 2>, 'All'), (<NotifyLevel.none: 3>, 'None')]),
),
]
|
|
3b29a94a7009c0b652e8eca0b175bb97250e1b33
|
feature_extraction/extraction.py
|
feature_extraction/extraction.py
|
import numpy as np
"""
Given an image as a Numpy array and a set of measurement objects
implementing a compute method returning a feature vector, return a combined
feature vector.
"""
def extract_features(image, measurements):
# TODO(liam): parallelize multiple measurements on an image by using Celery
return np.ravel([m.compute(image) for m in measurements])
|
Add an extract_features(image, measurements) function returning a feature vector
|
Add an extract_features(image, measurements) function returning a feature vector
|
Python
|
apache-2.0
|
widoptimization-willett/feature-extraction
|
Add an extract_features(image, measurements) function returning a feature vector
|
import numpy as np
"""
Given an image as a Numpy array and a set of measurement objects
implementing a compute method returning a feature vector, return a combined
feature vector.
"""
def extract_features(image, measurements):
# TODO(liam): parallelize multiple measurements on an image by using Celery
return np.ravel([m.compute(image) for m in measurements])
|
<commit_before><commit_msg>Add an extract_features(image, measurements) function returning a feature vector<commit_after>
|
import numpy as np
"""
Given an image as a Numpy array and a set of measurement objects
implementing a compute method returning a feature vector, return a combined
feature vector.
"""
def extract_features(image, measurements):
# TODO(liam): parallelize multiple measurements on an image by using Celery
return np.ravel([m.compute(image) for m in measurements])
|
Add an extract_features(image, measurements) function returning a feature vectorimport numpy as np
"""
Given an image as a Numpy array and a set of measurement objects
implementing a compute method returning a feature vector, return a combined
feature vector.
"""
def extract_features(image, measurements):
# TODO(liam): parallelize multiple measurements on an image by using Celery
return np.ravel([m.compute(image) for m in measurements])
|
<commit_before><commit_msg>Add an extract_features(image, measurements) function returning a feature vector<commit_after>import numpy as np
"""
Given an image as a Numpy array and a set of measurement objects
implementing a compute method returning a feature vector, return a combined
feature vector.
"""
def extract_features(image, measurements):
# TODO(liam): parallelize multiple measurements on an image by using Celery
return np.ravel([m.compute(image) for m in measurements])
|
|
d11491d30a2fb418dd40bf7e97d4d35cc84d6f3f
|
pyjokes/chuck.py
|
pyjokes/chuck.py
|
# -*- coding: utf-8 -*-
import json
try:
from urllib2 import urlopen
except:
from urllib.request import urlopen
def get_chuck_nerd_jokes():
url = 'http://api.icndb.com/jokes/random?limitTo=[nerdy]'
response = urlopen(url).readall().decode('utf-8')
data = json.loads(response)
d = data['value']
return d['joke']
if __name__ == '__main__':
print(get_chuck_nerd_jokes())
|
Move Chuck database query function to another file
|
Move Chuck database query function to another file
|
Python
|
bsd-3-clause
|
trojjer/pyjokes,gmarkall/pyjokes,Wren6991/pyjokes,bennuttall/pyjokes,borjaayerdi/pyjokes,ElectronicsGeek/pyjokes,pyjokes/pyjokes,martinohanlon/pyjokes,birdsarah/pyjokes
|
Move Chuck database query function to another file
|
# -*- coding: utf-8 -*-
import json
try:
from urllib2 import urlopen
except:
from urllib.request import urlopen
def get_chuck_nerd_jokes():
url = 'http://api.icndb.com/jokes/random?limitTo=[nerdy]'
response = urlopen(url).readall().decode('utf-8')
data = json.loads(response)
d = data['value']
return d['joke']
if __name__ == '__main__':
print(get_chuck_nerd_jokes())
|
<commit_before><commit_msg>Move Chuck database query function to another file<commit_after>
|
# -*- coding: utf-8 -*-
import json
try:
from urllib2 import urlopen
except:
from urllib.request import urlopen
def get_chuck_nerd_jokes():
url = 'http://api.icndb.com/jokes/random?limitTo=[nerdy]'
response = urlopen(url).readall().decode('utf-8')
data = json.loads(response)
d = data['value']
return d['joke']
if __name__ == '__main__':
print(get_chuck_nerd_jokes())
|
Move Chuck database query function to another file# -*- coding: utf-8 -*-
import json
try:
from urllib2 import urlopen
except:
from urllib.request import urlopen
def get_chuck_nerd_jokes():
url = 'http://api.icndb.com/jokes/random?limitTo=[nerdy]'
response = urlopen(url).readall().decode('utf-8')
data = json.loads(response)
d = data['value']
return d['joke']
if __name__ == '__main__':
print(get_chuck_nerd_jokes())
|
<commit_before><commit_msg>Move Chuck database query function to another file<commit_after># -*- coding: utf-8 -*-
import json
try:
from urllib2 import urlopen
except:
from urllib.request import urlopen
def get_chuck_nerd_jokes():
url = 'http://api.icndb.com/jokes/random?limitTo=[nerdy]'
response = urlopen(url).readall().decode('utf-8')
data = json.loads(response)
d = data['value']
return d['joke']
if __name__ == '__main__':
print(get_chuck_nerd_jokes())
|
|
2f268173e25bee5d671583bb905829e0ffd4f631
|
match/management/commands/reset-matches.py
|
match/management/commands/reset-matches.py
|
from django.core.management.base import BaseCommand, CommandError
from match.models import Match
import sys
class Command(BaseCommand):
help = 'Reset all match data'
def handle(self, *args, **options):
Match.objects.all().delete()
|
Add management command to clear all matches mostly useful with heroku
|
Add management command to clear all matches
mostly useful with heroku
|
Python
|
mit
|
maxf/address-matcher,maxf/address-matcher,maxf/address-matcher,maxf/address-matcher
|
Add management command to clear all matches
mostly useful with heroku
|
from django.core.management.base import BaseCommand, CommandError
from match.models import Match
import sys
class Command(BaseCommand):
help = 'Reset all match data'
def handle(self, *args, **options):
Match.objects.all().delete()
|
<commit_before><commit_msg>Add management command to clear all matches
mostly useful with heroku<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from match.models import Match
import sys
class Command(BaseCommand):
help = 'Reset all match data'
def handle(self, *args, **options):
Match.objects.all().delete()
|
Add management command to clear all matches
mostly useful with herokufrom django.core.management.base import BaseCommand, CommandError
from match.models import Match
import sys
class Command(BaseCommand):
help = 'Reset all match data'
def handle(self, *args, **options):
Match.objects.all().delete()
|
<commit_before><commit_msg>Add management command to clear all matches
mostly useful with heroku<commit_after>from django.core.management.base import BaseCommand, CommandError
from match.models import Match
import sys
class Command(BaseCommand):
help = 'Reset all match data'
def handle(self, *args, **options):
Match.objects.all().delete()
|
|
8c4833dbf9f4ae32afbfbe6a3cb8e4630abc3d25
|
test/requests/test_login_local.py
|
test/requests/test_login_local.py
|
import requests
from wqflask import user_manager
from parametrized_test import ParametrizedTest
class TestLoginLocal(ParametrizedTest):
def setUp(self):
super(TestLoginLocal, self).setUp()
self.login_url = self.gn2_url +"/n/login"
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testLoginNonRegisteredUser(self):
data = {
"email_address": "non@existent.email",
"password": "doesitmatter?"
}
result = requests.post(self.login_url, data=data)
self.assertEqual(result.url, self.login_url, "")
def testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse(self):
data = {
"email_address": "test@user.com",
"password": "test_password"
}
result = requests.post(self.login_url, data=data)
print("THE COOKIES? ", result.cookies)
self.assertEqual(
result.url
, self.gn2_url+"/?import_collections=false"
, "Login should have been successful")
def main(gn2, es):
import unittest
suite = unittest.TestSuite()
suite.addTest(TestLoginLocal(methodName="testLoginNonRegisteredUser", gn2_url=gn2, es_url=es))
suite.addTest(TestLoginLocal(methodName="testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse", gn2_url=gn2, es_url=es))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
raise Exception("Required arguments missing")
else:
main(sys.argv[1], sys.argv[2])
|
Add test for local login
|
Add test for local login
* Add an integration test to test that the login process for users
registered locally to genenetwork2 works as expected.
|
Python
|
agpl-3.0
|
DannyArends/genenetwork2,DannyArends/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,DannyArends/genenetwork2,zsloan/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2
|
Add test for local login
* Add an integration test to test that the login process for users
registered locally to genenetwork2 works as expected.
|
import requests
from wqflask import user_manager
from parametrized_test import ParametrizedTest
class TestLoginLocal(ParametrizedTest):
def setUp(self):
super(TestLoginLocal, self).setUp()
self.login_url = self.gn2_url +"/n/login"
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testLoginNonRegisteredUser(self):
data = {
"email_address": "non@existent.email",
"password": "doesitmatter?"
}
result = requests.post(self.login_url, data=data)
self.assertEqual(result.url, self.login_url, "")
def testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse(self):
data = {
"email_address": "test@user.com",
"password": "test_password"
}
result = requests.post(self.login_url, data=data)
print("THE COOKIES? ", result.cookies)
self.assertEqual(
result.url
, self.gn2_url+"/?import_collections=false"
, "Login should have been successful")
def main(gn2, es):
import unittest
suite = unittest.TestSuite()
suite.addTest(TestLoginLocal(methodName="testLoginNonRegisteredUser", gn2_url=gn2, es_url=es))
suite.addTest(TestLoginLocal(methodName="testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse", gn2_url=gn2, es_url=es))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
raise Exception("Required arguments missing")
else:
main(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add test for local login
* Add an integration test to test that the login process for users
registered locally to genenetwork2 works as expected.<commit_after>
|
import requests
from wqflask import user_manager
from parametrized_test import ParametrizedTest
class TestLoginLocal(ParametrizedTest):
def setUp(self):
super(TestLoginLocal, self).setUp()
self.login_url = self.gn2_url +"/n/login"
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testLoginNonRegisteredUser(self):
data = {
"email_address": "non@existent.email",
"password": "doesitmatter?"
}
result = requests.post(self.login_url, data=data)
self.assertEqual(result.url, self.login_url, "")
def testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse(self):
data = {
"email_address": "test@user.com",
"password": "test_password"
}
result = requests.post(self.login_url, data=data)
print("THE COOKIES? ", result.cookies)
self.assertEqual(
result.url
, self.gn2_url+"/?import_collections=false"
, "Login should have been successful")
def main(gn2, es):
import unittest
suite = unittest.TestSuite()
suite.addTest(TestLoginLocal(methodName="testLoginNonRegisteredUser", gn2_url=gn2, es_url=es))
suite.addTest(TestLoginLocal(methodName="testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse", gn2_url=gn2, es_url=es))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
raise Exception("Required arguments missing")
else:
main(sys.argv[1], sys.argv[2])
|
Add test for local login
* Add an integration test to test that the login process for users
registered locally to genenetwork2 works as expected.import requests
from wqflask import user_manager
from parametrized_test import ParametrizedTest
class TestLoginLocal(ParametrizedTest):
def setUp(self):
super(TestLoginLocal, self).setUp()
self.login_url = self.gn2_url +"/n/login"
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testLoginNonRegisteredUser(self):
data = {
"email_address": "non@existent.email",
"password": "doesitmatter?"
}
result = requests.post(self.login_url, data=data)
self.assertEqual(result.url, self.login_url, "")
def testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse(self):
data = {
"email_address": "test@user.com",
"password": "test_password"
}
result = requests.post(self.login_url, data=data)
print("THE COOKIES? ", result.cookies)
self.assertEqual(
result.url
, self.gn2_url+"/?import_collections=false"
, "Login should have been successful")
def main(gn2, es):
import unittest
suite = unittest.TestSuite()
suite.addTest(TestLoginLocal(methodName="testLoginNonRegisteredUser", gn2_url=gn2, es_url=es))
suite.addTest(TestLoginLocal(methodName="testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse", gn2_url=gn2, es_url=es))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
raise Exception("Required arguments missing")
else:
main(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add test for local login
* Add an integration test to test that the login process for users
registered locally to genenetwork2 works as expected.<commit_after>import requests
from wqflask import user_manager
from parametrized_test import ParametrizedTest
class TestLoginLocal(ParametrizedTest):
def setUp(self):
super(TestLoginLocal, self).setUp()
self.login_url = self.gn2_url +"/n/login"
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testLoginNonRegisteredUser(self):
data = {
"email_address": "non@existent.email",
"password": "doesitmatter?"
}
result = requests.post(self.login_url, data=data)
self.assertEqual(result.url, self.login_url, "")
def testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse(self):
data = {
"email_address": "test@user.com",
"password": "test_password"
}
result = requests.post(self.login_url, data=data)
print("THE COOKIES? ", result.cookies)
self.assertEqual(
result.url
, self.gn2_url+"/?import_collections=false"
, "Login should have been successful")
def main(gn2, es):
import unittest
suite = unittest.TestSuite()
suite.addTest(TestLoginLocal(methodName="testLoginNonRegisteredUser", gn2_url=gn2, es_url=es))
suite.addTest(TestLoginLocal(methodName="testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse", gn2_url=gn2, es_url=es))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
raise Exception("Required arguments missing")
else:
main(sys.argv[1], sys.argv[2])
|
|
5af36bbe29a8a7a7418fc535c5647c9be511f0b4
|
scripts/userCounts.py
|
scripts/userCounts.py
|
"""
Script to write user counts for each region to CSV.
"""
import twitterproj
def main():
db = twitterproj.connect()
filenames = ['grids/counties.user_counts.bot_filtered.csv',
'grids/states.user_counts.bot_filtered.csv',
'grids/squares.user_counts.bot_filtered.csv']
funcs = ['counties', 'states', 'squares']
for func, filename in zip(funcs, filenames):
# The attribute we want is twitterproj.hashtag_counts__{gridtype}
regions = getattr(twitterproj, 'hashtag_counts__' + func)()
lines = ["# count"]
for region in regions:
lines.append(str(region['user_count']))
with open(filename, 'w') as f:
f.write('\n'.join(lines))
if __name__ == '__main__':
main()
|
Add script to write user counts to csv.
|
Add script to write user counts to csv.
|
Python
|
unlicense
|
chebee7i/twitter,chebee7i/twitter,chebee7i/twitter
|
Add script to write user counts to csv.
|
"""
Script to write user counts for each region to CSV.
"""
import twitterproj
def main():
db = twitterproj.connect()
filenames = ['grids/counties.user_counts.bot_filtered.csv',
'grids/states.user_counts.bot_filtered.csv',
'grids/squares.user_counts.bot_filtered.csv']
funcs = ['counties', 'states', 'squares']
for func, filename in zip(funcs, filenames):
# The attribute we want is twitterproj.hashtag_counts__{gridtype}
regions = getattr(twitterproj, 'hashtag_counts__' + func)()
lines = ["# count"]
for region in regions:
lines.append(str(region['user_count']))
with open(filename, 'w') as f:
f.write('\n'.join(lines))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to write user counts to csv.<commit_after>
|
"""
Script to write user counts for each region to CSV.
"""
import twitterproj
def main():
db = twitterproj.connect()
filenames = ['grids/counties.user_counts.bot_filtered.csv',
'grids/states.user_counts.bot_filtered.csv',
'grids/squares.user_counts.bot_filtered.csv']
funcs = ['counties', 'states', 'squares']
for func, filename in zip(funcs, filenames):
# The attribute we want is twitterproj.hashtag_counts__{gridtype}
regions = getattr(twitterproj, 'hashtag_counts__' + func)()
lines = ["# count"]
for region in regions:
lines.append(str(region['user_count']))
with open(filename, 'w') as f:
f.write('\n'.join(lines))
if __name__ == '__main__':
main()
|
Add script to write user counts to csv."""
Script to write user counts for each region to CSV.
"""
import twitterproj
def main():
db = twitterproj.connect()
filenames = ['grids/counties.user_counts.bot_filtered.csv',
'grids/states.user_counts.bot_filtered.csv',
'grids/squares.user_counts.bot_filtered.csv']
funcs = ['counties', 'states', 'squares']
for func, filename in zip(funcs, filenames):
# The attribute we want is twitterproj.hashtag_counts__{gridtype}
regions = getattr(twitterproj, 'hashtag_counts__' + func)()
lines = ["# count"]
for region in regions:
lines.append(str(region['user_count']))
with open(filename, 'w') as f:
f.write('\n'.join(lines))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to write user counts to csv.<commit_after>"""
Script to write user counts for each region to CSV.
"""
import twitterproj
def main():
db = twitterproj.connect()
filenames = ['grids/counties.user_counts.bot_filtered.csv',
'grids/states.user_counts.bot_filtered.csv',
'grids/squares.user_counts.bot_filtered.csv']
funcs = ['counties', 'states', 'squares']
for func, filename in zip(funcs, filenames):
# The attribute we want is twitterproj.hashtag_counts__{gridtype}
regions = getattr(twitterproj, 'hashtag_counts__' + func)()
lines = ["# count"]
for region in regions:
lines.append(str(region['user_count']))
with open(filename, 'w') as f:
f.write('\n'.join(lines))
if __name__ == '__main__':
main()
|
|
0c18bb0993be77059aa75015cc5433eaacbe8999
|
rfc.py
|
rfc.py
|
import pydoc
import sys
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
def get_rfc(rfc):
url = "http://www.ietf.org/rfc/rfc{0}.txt".format(rfc)
f = urlopen(url)
data = f.read()
if isinstance(data, bytes):
data = data.decode('utf-8')
return data
def render_rfc(rfc):
pydoc.pager(get_rfc(rfc))
if __name__ == "__main__":
render_rfc(sys.argv[1])
|
Add barebones RFC downloader and renderer.
|
Add barebones RFC downloader and renderer.
|
Python
|
lgpl-2.1
|
StefanKopieczek/rfc
|
Add barebones RFC downloader and renderer.
|
import pydoc
import sys
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
def get_rfc(rfc):
url = "http://www.ietf.org/rfc/rfc{0}.txt".format(rfc)
f = urlopen(url)
data = f.read()
if isinstance(data, bytes):
data = data.decode('utf-8')
return data
def render_rfc(rfc):
pydoc.pager(get_rfc(rfc))
if __name__ == "__main__":
render_rfc(sys.argv[1])
|
<commit_before><commit_msg>Add barebones RFC downloader and renderer.<commit_after>
|
import pydoc
import sys
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
def get_rfc(rfc):
url = "http://www.ietf.org/rfc/rfc{0}.txt".format(rfc)
f = urlopen(url)
data = f.read()
if isinstance(data, bytes):
data = data.decode('utf-8')
return data
def render_rfc(rfc):
pydoc.pager(get_rfc(rfc))
if __name__ == "__main__":
render_rfc(sys.argv[1])
|
Add barebones RFC downloader and renderer.import pydoc
import sys
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
def get_rfc(rfc):
url = "http://www.ietf.org/rfc/rfc{0}.txt".format(rfc)
f = urlopen(url)
data = f.read()
if isinstance(data, bytes):
data = data.decode('utf-8')
return data
def render_rfc(rfc):
pydoc.pager(get_rfc(rfc))
if __name__ == "__main__":
render_rfc(sys.argv[1])
|
<commit_before><commit_msg>Add barebones RFC downloader and renderer.<commit_after>import pydoc
import sys
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
def get_rfc(rfc):
url = "http://www.ietf.org/rfc/rfc{0}.txt".format(rfc)
f = urlopen(url)
data = f.read()
if isinstance(data, bytes):
data = data.decode('utf-8')
return data
def render_rfc(rfc):
pydoc.pager(get_rfc(rfc))
if __name__ == "__main__":
render_rfc(sys.argv[1])
|
|
681c21a5fbf3bc713468e33bb10dfa9bf6d62850
|
corehq/apps/users/migrations/0004_rm_role_id_from_admins.py
|
corehq/apps/users/migrations/0004_rm_role_id_from_admins.py
|
from django.db import migrations
from corehq.apps.es import UserES
from corehq.apps.users.models import WebUser
from corehq.util.couch import DocUpdate, iter_update
from corehq.util.django_migrations import skip_on_fresh_install
from corehq.util.log import with_progress_bar
@skip_on_fresh_install
def fix_users(apps, schema_editor):
user_ids = with_progress_bar(_get_admins_with_roles())
iter_update(WebUser.get_db(), _remove_role, user_ids, verbose=True)
def _get_admins_with_roles():
# domain_memberships isn't a nested mapping in ES, so this only checks that
# they have a domain membership that's an admin, and one with a role_id,
# not that it's both on the same membership
return (UserES()
.web_users()
.term('domain_memberships.is_admin', True)
.non_null('domain_memberships.role_id')
.get_ids())
def _remove_role(user_doc):
changed = False
for dm in user_doc['domain_memberships']:
if dm['is_admin'] and dm['role_id']:
dm['role_id'] = None
changed = True
if changed:
return DocUpdate(user_doc)
class Migration(migrations.Migration):
dependencies = [
('users', '0003_roles_permissions_update'),
]
operations = [
migrations.RunPython(fix_users, reverse_code=migrations.RunPython.noop, elidable=True)
]
|
Add migration to fix admin users with roles
|
Add migration to fix admin users with roles
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add migration to fix admin users with roles
|
from django.db import migrations
from corehq.apps.es import UserES
from corehq.apps.users.models import WebUser
from corehq.util.couch import DocUpdate, iter_update
from corehq.util.django_migrations import skip_on_fresh_install
from corehq.util.log import with_progress_bar
@skip_on_fresh_install
def fix_users(apps, schema_editor):
user_ids = with_progress_bar(_get_admins_with_roles())
iter_update(WebUser.get_db(), _remove_role, user_ids, verbose=True)
def _get_admins_with_roles():
# domain_memberships isn't a nested mapping in ES, so this only checks that
# they have a domain membership that's an admin, and one with a role_id,
# not that it's both on the same membership
return (UserES()
.web_users()
.term('domain_memberships.is_admin', True)
.non_null('domain_memberships.role_id')
.get_ids())
def _remove_role(user_doc):
changed = False
for dm in user_doc['domain_memberships']:
if dm['is_admin'] and dm['role_id']:
dm['role_id'] = None
changed = True
if changed:
return DocUpdate(user_doc)
class Migration(migrations.Migration):
dependencies = [
('users', '0003_roles_permissions_update'),
]
operations = [
migrations.RunPython(fix_users, reverse_code=migrations.RunPython.noop, elidable=True)
]
|
<commit_before><commit_msg>Add migration to fix admin users with roles<commit_after>
|
from django.db import migrations
from corehq.apps.es import UserES
from corehq.apps.users.models import WebUser
from corehq.util.couch import DocUpdate, iter_update
from corehq.util.django_migrations import skip_on_fresh_install
from corehq.util.log import with_progress_bar
@skip_on_fresh_install
def fix_users(apps, schema_editor):
user_ids = with_progress_bar(_get_admins_with_roles())
iter_update(WebUser.get_db(), _remove_role, user_ids, verbose=True)
def _get_admins_with_roles():
# domain_memberships isn't a nested mapping in ES, so this only checks that
# they have a domain membership that's an admin, and one with a role_id,
# not that it's both on the same membership
return (UserES()
.web_users()
.term('domain_memberships.is_admin', True)
.non_null('domain_memberships.role_id')
.get_ids())
def _remove_role(user_doc):
changed = False
for dm in user_doc['domain_memberships']:
if dm['is_admin'] and dm['role_id']:
dm['role_id'] = None
changed = True
if changed:
return DocUpdate(user_doc)
class Migration(migrations.Migration):
dependencies = [
('users', '0003_roles_permissions_update'),
]
operations = [
migrations.RunPython(fix_users, reverse_code=migrations.RunPython.noop, elidable=True)
]
|
Add migration to fix admin users with rolesfrom django.db import migrations
from corehq.apps.es import UserES
from corehq.apps.users.models import WebUser
from corehq.util.couch import DocUpdate, iter_update
from corehq.util.django_migrations import skip_on_fresh_install
from corehq.util.log import with_progress_bar
@skip_on_fresh_install
def fix_users(apps, schema_editor):
user_ids = with_progress_bar(_get_admins_with_roles())
iter_update(WebUser.get_db(), _remove_role, user_ids, verbose=True)
def _get_admins_with_roles():
# domain_memberships isn't a nested mapping in ES, so this only checks that
# they have a domain membership that's an admin, and one with a role_id,
# not that it's both on the same membership
return (UserES()
.web_users()
.term('domain_memberships.is_admin', True)
.non_null('domain_memberships.role_id')
.get_ids())
def _remove_role(user_doc):
changed = False
for dm in user_doc['domain_memberships']:
if dm['is_admin'] and dm['role_id']:
dm['role_id'] = None
changed = True
if changed:
return DocUpdate(user_doc)
class Migration(migrations.Migration):
dependencies = [
('users', '0003_roles_permissions_update'),
]
operations = [
migrations.RunPython(fix_users, reverse_code=migrations.RunPython.noop, elidable=True)
]
|
<commit_before><commit_msg>Add migration to fix admin users with roles<commit_after>from django.db import migrations
from corehq.apps.es import UserES
from corehq.apps.users.models import WebUser
from corehq.util.couch import DocUpdate, iter_update
from corehq.util.django_migrations import skip_on_fresh_install
from corehq.util.log import with_progress_bar
@skip_on_fresh_install
def fix_users(apps, schema_editor):
user_ids = with_progress_bar(_get_admins_with_roles())
iter_update(WebUser.get_db(), _remove_role, user_ids, verbose=True)
def _get_admins_with_roles():
# domain_memberships isn't a nested mapping in ES, so this only checks that
# they have a domain membership that's an admin, and one with a role_id,
# not that it's both on the same membership
return (UserES()
.web_users()
.term('domain_memberships.is_admin', True)
.non_null('domain_memberships.role_id')
.get_ids())
def _remove_role(user_doc):
changed = False
for dm in user_doc['domain_memberships']:
if dm['is_admin'] and dm['role_id']:
dm['role_id'] = None
changed = True
if changed:
return DocUpdate(user_doc)
class Migration(migrations.Migration):
dependencies = [
('users', '0003_roles_permissions_update'),
]
operations = [
migrations.RunPython(fix_users, reverse_code=migrations.RunPython.noop, elidable=True)
]
|
|
3134e22eb5da9bd7104c199f788288e0fc823db2
|
app.py
|
app.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import bottle
from bottle import route, run, template, request, response
import os
import uuid
@route('/')
def get_simple_form():
"""
Returns simple images upload form
:return:
"""
return ('<form action="/imgs" method="post" enctype="multipart/form-data">\n'
' Input: <input name="input" type="file">\n'
' Style: <input name="style" type="file">\n'
' <input value="Upload" type="submit">\n'
'</form>')
@route('/imgs', method='POST')
def upload_imgs():
"""
Upload input & style images and return id
:return:
"""
# get files
input_img = request.files.get('input')
style_img = request.files.get('style')
if not check_img_png(input_img) or not check_img_png(style_img):
return 'File extension not allowed.'
# assign uuid
id = uuid.uuid4()
input_up_path = id.get_hex() + input_img.filename
style_up_path = id.get_hex() + style_img.filename
input_img.save(input_up_path)
style_img.save(style_up_path)
return template('Uploaded images. ID is "{{id}}".', id=id.get_hex())
def check_img_png(image):
"""
Check whether
:param image:
:return:
"""
name, ext = os.path.split(image.filename)
return ext not in ('png', 'jpeg', 'jpg')
@route('/statuses/<id>')
def show_status(id=''):
return
if __name__ == '__main__':
run(debug=True)
|
Add basic endopoints powered by bottle
|
Add basic endopoints powered by bottle
|
Python
|
mit
|
ciela/chainer-gogh
|
Add basic endopoints powered by bottle
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import bottle
from bottle import route, run, template, request, response
import os
import uuid
@route('/')
def get_simple_form():
"""
Returns simple images upload form
:return:
"""
return ('<form action="/imgs" method="post" enctype="multipart/form-data">\n'
' Input: <input name="input" type="file">\n'
' Style: <input name="style" type="file">\n'
' <input value="Upload" type="submit">\n'
'</form>')
@route('/imgs', method='POST')
def upload_imgs():
"""
Upload input & style images and return id
:return:
"""
# get files
input_img = request.files.get('input')
style_img = request.files.get('style')
if not check_img_png(input_img) or not check_img_png(style_img):
return 'File extension not allowed.'
# assign uuid
id = uuid.uuid4()
input_up_path = id.get_hex() + input_img.filename
style_up_path = id.get_hex() + style_img.filename
input_img.save(input_up_path)
style_img.save(style_up_path)
return template('Uploaded images. ID is "{{id}}".', id=id.get_hex())
def check_img_png(image):
"""
Check whether
:param image:
:return:
"""
name, ext = os.path.split(image.filename)
return ext not in ('png', 'jpeg', 'jpg')
@route('/statuses/<id>')
def show_status(id=''):
return
if __name__ == '__main__':
run(debug=True)
|
<commit_before><commit_msg>Add basic endopoints powered by bottle<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import bottle
from bottle import route, run, template, request, response
import os
import uuid
@route('/')
def get_simple_form():
"""
Returns simple images upload form
:return:
"""
return ('<form action="/imgs" method="post" enctype="multipart/form-data">\n'
' Input: <input name="input" type="file">\n'
' Style: <input name="style" type="file">\n'
' <input value="Upload" type="submit">\n'
'</form>')
@route('/imgs', method='POST')
def upload_imgs():
"""
Upload input & style images and return id
:return:
"""
# get files
input_img = request.files.get('input')
style_img = request.files.get('style')
if not check_img_png(input_img) or not check_img_png(style_img):
return 'File extension not allowed.'
# assign uuid
id = uuid.uuid4()
input_up_path = id.get_hex() + input_img.filename
style_up_path = id.get_hex() + style_img.filename
input_img.save(input_up_path)
style_img.save(style_up_path)
return template('Uploaded images. ID is "{{id}}".', id=id.get_hex())
def check_img_png(image):
"""
Check whether
:param image:
:return:
"""
name, ext = os.path.split(image.filename)
return ext not in ('png', 'jpeg', 'jpg')
@route('/statuses/<id>')
def show_status(id=''):
return
if __name__ == '__main__':
run(debug=True)
|
Add basic endopoints powered by bottle#!/usr/bin/env python
# -*- coding: utf-8 -*-
import bottle
from bottle import route, run, template, request, response
import os
import uuid
@route('/')
def get_simple_form():
"""
Returns simple images upload form
:return:
"""
return ('<form action="/imgs" method="post" enctype="multipart/form-data">\n'
' Input: <input name="input" type="file">\n'
' Style: <input name="style" type="file">\n'
' <input value="Upload" type="submit">\n'
'</form>')
@route('/imgs', method='POST')
def upload_imgs():
"""
Upload input & style images and return id
:return:
"""
# get files
input_img = request.files.get('input')
style_img = request.files.get('style')
if not check_img_png(input_img) or not check_img_png(style_img):
return 'File extension not allowed.'
# assign uuid
id = uuid.uuid4()
input_up_path = id.get_hex() + input_img.filename
style_up_path = id.get_hex() + style_img.filename
input_img.save(input_up_path)
style_img.save(style_up_path)
return template('Uploaded images. ID is "{{id}}".', id=id.get_hex())
def check_img_png(image):
"""
Check whether
:param image:
:return:
"""
name, ext = os.path.split(image.filename)
return ext not in ('png', 'jpeg', 'jpg')
@route('/statuses/<id>')
def show_status(id=''):
return
if __name__ == '__main__':
run(debug=True)
|
<commit_before><commit_msg>Add basic endopoints powered by bottle<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import bottle
from bottle import route, run, template, request, response
import os
import uuid
@route('/')
def get_simple_form():
"""
Returns simple images upload form
:return:
"""
return ('<form action="/imgs" method="post" enctype="multipart/form-data">\n'
' Input: <input name="input" type="file">\n'
' Style: <input name="style" type="file">\n'
' <input value="Upload" type="submit">\n'
'</form>')
@route('/imgs', method='POST')
def upload_imgs():
"""
Upload input & style images and return id
:return:
"""
# get files
input_img = request.files.get('input')
style_img = request.files.get('style')
if not check_img_png(input_img) or not check_img_png(style_img):
return 'File extension not allowed.'
# assign uuid
id = uuid.uuid4()
input_up_path = id.get_hex() + input_img.filename
style_up_path = id.get_hex() + style_img.filename
input_img.save(input_up_path)
style_img.save(style_up_path)
return template('Uploaded images. ID is "{{id}}".', id=id.get_hex())
def check_img_png(image):
"""
Check whether
:param image:
:return:
"""
name, ext = os.path.split(image.filename)
return ext not in ('png', 'jpeg', 'jpg')
@route('/statuses/<id>')
def show_status(id=''):
return
if __name__ == '__main__':
run(debug=True)
|
|
07442bd7ddd07635002493bafb6ac16a24fd5d82
|
hls.py
|
hls.py
|
var http = require('http');
var fs = require('fs');
var url = require('url');
var path = require('path');
var zlib = require('zlib');
PORT = 8000;
http.createServer(function (req, res) {
var uri = url.parse(req.url).pathname;
if (uri == '/player.html') {
res.writeHead(200, { 'Content-Type': 'text/html' });
res.write('<html><head><title>HLS Player fed by node.js' +
'</title></head><body>');
res.write('<video src="http://' + req.socket.localAddress +
':' + PORT + '/out.M3U8" controls autoplay></body></html>');
res.end();
return;
}
var filename = path.join("./", uri);
fs.exists(filename, function (exists) {
if (!exists) {
console.log('file not found: ' + filename);
res.writeHead(404, { 'Content-Type': 'text/plain' });
res.write('file not found: %s\n', filename);
res.end();
} else {
console.log('sending file: ' + filename);
switch (path.extname(uri)) {
case '.M3U8':
fs.readFile(filename, function (err, contents) {
if (err) {
res.writeHead(500);
res.end();
} else if (contents) {
res.writeHead(200,
{'Content-Type':
'application/vnd.apple.mpegurl'});
var ae = req.headers['accept-encoding'];
if (ae.match(/\bgzip\b/)) {
zlib.gzip(contents, function (err, zip) {
if (err) throw err;
res.writeHead(200,
{'content-encoding': 'gzip'});
res.end(zip);
});
} else {
res.end(contents, 'utf-8');
}
} else {
console.log('emptly playlist');
res.writeHead(500);
res.end();
}
});
break;
case '.ts':
res.writeHead(200, { 'Content-Type':
'video/MP2T' });
var stream = fs.createReadStream(filename,
{ bufferSize: 64 * 1024 });
stream.pipe(res);
break;
default:
console.log('unknown file type: ' +
path.extname(uri));
res.writeHead(500);
res.end();
}
}
});
}).listen(PORT);
|
Add script for http live streaming
|
Add script for http live streaming
|
Python
|
mit
|
voidabhi/node-scripts,voidabhi/node-scripts,voidabhi/node-scripts
|
Add script for http live streaming
|
var http = require('http');
var fs = require('fs');
var url = require('url');
var path = require('path');
var zlib = require('zlib');
PORT = 8000;
http.createServer(function (req, res) {
var uri = url.parse(req.url).pathname;
if (uri == '/player.html') {
res.writeHead(200, { 'Content-Type': 'text/html' });
res.write('<html><head><title>HLS Player fed by node.js' +
'</title></head><body>');
res.write('<video src="http://' + req.socket.localAddress +
':' + PORT + '/out.M3U8" controls autoplay></body></html>');
res.end();
return;
}
var filename = path.join("./", uri);
fs.exists(filename, function (exists) {
if (!exists) {
console.log('file not found: ' + filename);
res.writeHead(404, { 'Content-Type': 'text/plain' });
res.write('file not found: %s\n', filename);
res.end();
} else {
console.log('sending file: ' + filename);
switch (path.extname(uri)) {
case '.M3U8':
fs.readFile(filename, function (err, contents) {
if (err) {
res.writeHead(500);
res.end();
} else if (contents) {
res.writeHead(200,
{'Content-Type':
'application/vnd.apple.mpegurl'});
var ae = req.headers['accept-encoding'];
if (ae.match(/\bgzip\b/)) {
zlib.gzip(contents, function (err, zip) {
if (err) throw err;
res.writeHead(200,
{'content-encoding': 'gzip'});
res.end(zip);
});
} else {
res.end(contents, 'utf-8');
}
} else {
console.log('emptly playlist');
res.writeHead(500);
res.end();
}
});
break;
case '.ts':
res.writeHead(200, { 'Content-Type':
'video/MP2T' });
var stream = fs.createReadStream(filename,
{ bufferSize: 64 * 1024 });
stream.pipe(res);
break;
default:
console.log('unknown file type: ' +
path.extname(uri));
res.writeHead(500);
res.end();
}
}
});
}).listen(PORT);
|
<commit_before><commit_msg>Add script for http live streaming<commit_after>
|
var http = require('http');
var fs = require('fs');
var url = require('url');
var path = require('path');
var zlib = require('zlib');
PORT = 8000;
http.createServer(function (req, res) {
var uri = url.parse(req.url).pathname;
if (uri == '/player.html') {
res.writeHead(200, { 'Content-Type': 'text/html' });
res.write('<html><head><title>HLS Player fed by node.js' +
'</title></head><body>');
res.write('<video src="http://' + req.socket.localAddress +
':' + PORT + '/out.M3U8" controls autoplay></body></html>');
res.end();
return;
}
var filename = path.join("./", uri);
fs.exists(filename, function (exists) {
if (!exists) {
console.log('file not found: ' + filename);
res.writeHead(404, { 'Content-Type': 'text/plain' });
res.write('file not found: %s\n', filename);
res.end();
} else {
console.log('sending file: ' + filename);
switch (path.extname(uri)) {
case '.M3U8':
fs.readFile(filename, function (err, contents) {
if (err) {
res.writeHead(500);
res.end();
} else if (contents) {
res.writeHead(200,
{'Content-Type':
'application/vnd.apple.mpegurl'});
var ae = req.headers['accept-encoding'];
if (ae.match(/\bgzip\b/)) {
zlib.gzip(contents, function (err, zip) {
if (err) throw err;
res.writeHead(200,
{'content-encoding': 'gzip'});
res.end(zip);
});
} else {
res.end(contents, 'utf-8');
}
} else {
console.log('emptly playlist');
res.writeHead(500);
res.end();
}
});
break;
case '.ts':
res.writeHead(200, { 'Content-Type':
'video/MP2T' });
var stream = fs.createReadStream(filename,
{ bufferSize: 64 * 1024 });
stream.pipe(res);
break;
default:
console.log('unknown file type: ' +
path.extname(uri));
res.writeHead(500);
res.end();
}
}
});
}).listen(PORT);
|
Add script for http live streamingvar http = require('http');
var fs = require('fs');
var url = require('url');
var path = require('path');
var zlib = require('zlib');
PORT = 8000;
http.createServer(function (req, res) {
var uri = url.parse(req.url).pathname;
if (uri == '/player.html') {
res.writeHead(200, { 'Content-Type': 'text/html' });
res.write('<html><head><title>HLS Player fed by node.js' +
'</title></head><body>');
res.write('<video src="http://' + req.socket.localAddress +
':' + PORT + '/out.M3U8" controls autoplay></body></html>');
res.end();
return;
}
var filename = path.join("./", uri);
fs.exists(filename, function (exists) {
if (!exists) {
console.log('file not found: ' + filename);
res.writeHead(404, { 'Content-Type': 'text/plain' });
res.write('file not found: %s\n', filename);
res.end();
} else {
console.log('sending file: ' + filename);
switch (path.extname(uri)) {
case '.M3U8':
fs.readFile(filename, function (err, contents) {
if (err) {
res.writeHead(500);
res.end();
} else if (contents) {
res.writeHead(200,
{'Content-Type':
'application/vnd.apple.mpegurl'});
var ae = req.headers['accept-encoding'];
if (ae.match(/\bgzip\b/)) {
zlib.gzip(contents, function (err, zip) {
if (err) throw err;
res.writeHead(200,
{'content-encoding': 'gzip'});
res.end(zip);
});
} else {
res.end(contents, 'utf-8');
}
} else {
console.log('emptly playlist');
res.writeHead(500);
res.end();
}
});
break;
case '.ts':
res.writeHead(200, { 'Content-Type':
'video/MP2T' });
var stream = fs.createReadStream(filename,
{ bufferSize: 64 * 1024 });
stream.pipe(res);
break;
default:
console.log('unknown file type: ' +
path.extname(uri));
res.writeHead(500);
res.end();
}
}
});
}).listen(PORT);
|
<commit_before><commit_msg>Add script for http live streaming<commit_after>var http = require('http');
var fs = require('fs');
var url = require('url');
var path = require('path');
var zlib = require('zlib');
PORT = 8000;
http.createServer(function (req, res) {
var uri = url.parse(req.url).pathname;
if (uri == '/player.html') {
res.writeHead(200, { 'Content-Type': 'text/html' });
res.write('<html><head><title>HLS Player fed by node.js' +
'</title></head><body>');
res.write('<video src="http://' + req.socket.localAddress +
':' + PORT + '/out.M3U8" controls autoplay></body></html>');
res.end();
return;
}
var filename = path.join("./", uri);
fs.exists(filename, function (exists) {
if (!exists) {
console.log('file not found: ' + filename);
res.writeHead(404, { 'Content-Type': 'text/plain' });
res.write('file not found: %s\n', filename);
res.end();
} else {
console.log('sending file: ' + filename);
switch (path.extname(uri)) {
case '.M3U8':
fs.readFile(filename, function (err, contents) {
if (err) {
res.writeHead(500);
res.end();
} else if (contents) {
res.writeHead(200,
{'Content-Type':
'application/vnd.apple.mpegurl'});
var ae = req.headers['accept-encoding'];
if (ae.match(/\bgzip\b/)) {
zlib.gzip(contents, function (err, zip) {
if (err) throw err;
res.writeHead(200,
{'content-encoding': 'gzip'});
res.end(zip);
});
} else {
res.end(contents, 'utf-8');
}
} else {
console.log('emptly playlist');
res.writeHead(500);
res.end();
}
});
break;
case '.ts':
res.writeHead(200, { 'Content-Type':
'video/MP2T' });
var stream = fs.createReadStream(filename,
{ bufferSize: 64 * 1024 });
stream.pipe(res);
break;
default:
console.log('unknown file type: ' +
path.extname(uri));
res.writeHead(500);
res.end();
}
}
});
}).listen(PORT);
|
|
03f46b0d6867bcb8a88e53b26089705cb1667bbd
|
tools/create_from_sample_texts.py
|
tools/create_from_sample_texts.py
|
#!/usr/bin/env python
import teetime
import os
def main():
with open('samples/sample-texts.txt') as fh:
for line in fh:
print line.strip()
path = teetime.create_typography(line.strip(), colors=False)
os.rename(path, os.path.join('samples', os.path.basename(path)))
if __name__ == '__main__':
main()
|
Add script to generate images from all samples
|
Add script to generate images from all samples
|
Python
|
mit
|
megacool/teetime
|
Add script to generate images from all samples
|
#!/usr/bin/env python
import teetime
import os
def main():
with open('samples/sample-texts.txt') as fh:
for line in fh:
print line.strip()
path = teetime.create_typography(line.strip(), colors=False)
os.rename(path, os.path.join('samples', os.path.basename(path)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to generate images from all samples<commit_after>
|
#!/usr/bin/env python
import teetime
import os
def main():
with open('samples/sample-texts.txt') as fh:
for line in fh:
print line.strip()
path = teetime.create_typography(line.strip(), colors=False)
os.rename(path, os.path.join('samples', os.path.basename(path)))
if __name__ == '__main__':
main()
|
Add script to generate images from all samples#!/usr/bin/env python
import teetime
import os
def main():
with open('samples/sample-texts.txt') as fh:
for line in fh:
print line.strip()
path = teetime.create_typography(line.strip(), colors=False)
os.rename(path, os.path.join('samples', os.path.basename(path)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to generate images from all samples<commit_after>#!/usr/bin/env python
import teetime
import os
def main():
with open('samples/sample-texts.txt') as fh:
for line in fh:
print line.strip()
path = teetime.create_typography(line.strip(), colors=False)
os.rename(path, os.path.join('samples', os.path.basename(path)))
if __name__ == '__main__':
main()
|
|
9ba0ff62572dcfd7912c9b58091b59844f8e1753
|
results/sccg-table.py
|
results/sccg-table.py
|
import os
import sys
import pandas as pd
p4_data = "helmholtz-results/helmholtz_conv-d-4.csv"
p5_data = "helmholtz-results/helmholtz_conv-d-5.csv"
p6_data = "helmholtz-results/helmholtz_conv-d-6.csv"
p7_data = "helmholtz-results/helmholtz_conv-d-7.csv"
data_set = [p4_data, p5_data, p6_data, p7_data]
for data in data_set:
if not os.path.exists(data):
print("Cannot find data file '%s'" % data)
sys.exit(1)
table = r"""\begin{tabular}{| l | c | c | c |}
\hline
\multicolumn{4}{|c|}{$H^1$ Helmholtz} \\
\hline
\multirow{2}{*}{$k$} & mesh &
\multicolumn{2}{|c|}{$\norm{p-p_h}_{L^2(\Omega)} \leq \mathcal{O}(h^{k+1})$} \\
\cline{2-4}
& $r$ & $L^2$-error & rate \\
"""
lformat = r"""& {mesh: d} & {L2Errors:.3e} & {ConvRates} \\
"""
def rate(s):
if s == '---':
return s
else:
return "{s:.3f}".format(s=float(s))
for data in data_set:
df = pd.read_csv(data)
df = df.sort_values("Mesh")
degree = df.Degree.values[0]
table += r"""
\hline
\multirow{6}{*}{%d}
""" % degree
for k in df.Mesh:
sliced = df.loc[lambda x: x.Mesh == k]
table += lformat.format(mesh=k,
L2Errors=sliced.L2Errors.values[0],
ConvRates=rate(sliced.ConvRates.values[0]),
Reductions=sliced.ResidualReductions.values[0])
table += r"""\hline
\end{tabular}
"""
print(table)
|
Add script for Helmholtz rates
|
Add script for Helmholtz rates
|
Python
|
mit
|
thomasgibson/tabula-rasa
|
Add script for Helmholtz rates
|
import os
import sys
import pandas as pd
p4_data = "helmholtz-results/helmholtz_conv-d-4.csv"
p5_data = "helmholtz-results/helmholtz_conv-d-5.csv"
p6_data = "helmholtz-results/helmholtz_conv-d-6.csv"
p7_data = "helmholtz-results/helmholtz_conv-d-7.csv"
data_set = [p4_data, p5_data, p6_data, p7_data]
for data in data_set:
if not os.path.exists(data):
print("Cannot find data file '%s'" % data)
sys.exit(1)
table = r"""\begin{tabular}{| l | c | c | c |}
\hline
\multicolumn{4}{|c|}{$H^1$ Helmholtz} \\
\hline
\multirow{2}{*}{$k$} & mesh &
\multicolumn{2}{|c|}{$\norm{p-p_h}_{L^2(\Omega)} \leq \mathcal{O}(h^{k+1})$} \\
\cline{2-4}
& $r$ & $L^2$-error & rate \\
"""
lformat = r"""& {mesh: d} & {L2Errors:.3e} & {ConvRates} \\
"""
def rate(s):
if s == '---':
return s
else:
return "{s:.3f}".format(s=float(s))
for data in data_set:
df = pd.read_csv(data)
df = df.sort_values("Mesh")
degree = df.Degree.values[0]
table += r"""
\hline
\multirow{6}{*}{%d}
""" % degree
for k in df.Mesh:
sliced = df.loc[lambda x: x.Mesh == k]
table += lformat.format(mesh=k,
L2Errors=sliced.L2Errors.values[0],
ConvRates=rate(sliced.ConvRates.values[0]),
Reductions=sliced.ResidualReductions.values[0])
table += r"""\hline
\end{tabular}
"""
print(table)
|
<commit_before><commit_msg>Add script for Helmholtz rates<commit_after>
|
import os
import sys
import pandas as pd
p4_data = "helmholtz-results/helmholtz_conv-d-4.csv"
p5_data = "helmholtz-results/helmholtz_conv-d-5.csv"
p6_data = "helmholtz-results/helmholtz_conv-d-6.csv"
p7_data = "helmholtz-results/helmholtz_conv-d-7.csv"
data_set = [p4_data, p5_data, p6_data, p7_data]
for data in data_set:
if not os.path.exists(data):
print("Cannot find data file '%s'" % data)
sys.exit(1)
table = r"""\begin{tabular}{| l | c | c | c |}
\hline
\multicolumn{4}{|c|}{$H^1$ Helmholtz} \\
\hline
\multirow{2}{*}{$k$} & mesh &
\multicolumn{2}{|c|}{$\norm{p-p_h}_{L^2(\Omega)} \leq \mathcal{O}(h^{k+1})$} \\
\cline{2-4}
& $r$ & $L^2$-error & rate \\
"""
lformat = r"""& {mesh: d} & {L2Errors:.3e} & {ConvRates} \\
"""
def rate(s):
if s == '---':
return s
else:
return "{s:.3f}".format(s=float(s))
for data in data_set:
df = pd.read_csv(data)
df = df.sort_values("Mesh")
degree = df.Degree.values[0]
table += r"""
\hline
\multirow{6}{*}{%d}
""" % degree
for k in df.Mesh:
sliced = df.loc[lambda x: x.Mesh == k]
table += lformat.format(mesh=k,
L2Errors=sliced.L2Errors.values[0],
ConvRates=rate(sliced.ConvRates.values[0]),
Reductions=sliced.ResidualReductions.values[0])
table += r"""\hline
\end{tabular}
"""
print(table)
|
Add script for Helmholtz ratesimport os
import sys
import pandas as pd
p4_data = "helmholtz-results/helmholtz_conv-d-4.csv"
p5_data = "helmholtz-results/helmholtz_conv-d-5.csv"
p6_data = "helmholtz-results/helmholtz_conv-d-6.csv"
p7_data = "helmholtz-results/helmholtz_conv-d-7.csv"
data_set = [p4_data, p5_data, p6_data, p7_data]
for data in data_set:
if not os.path.exists(data):
print("Cannot find data file '%s'" % data)
sys.exit(1)
table = r"""\begin{tabular}{| l | c | c | c |}
\hline
\multicolumn{4}{|c|}{$H^1$ Helmholtz} \\
\hline
\multirow{2}{*}{$k$} & mesh &
\multicolumn{2}{|c|}{$\norm{p-p_h}_{L^2(\Omega)} \leq \mathcal{O}(h^{k+1})$} \\
\cline{2-4}
& $r$ & $L^2$-error & rate \\
"""
lformat = r"""& {mesh: d} & {L2Errors:.3e} & {ConvRates} \\
"""
def rate(s):
if s == '---':
return s
else:
return "{s:.3f}".format(s=float(s))
for data in data_set:
df = pd.read_csv(data)
df = df.sort_values("Mesh")
degree = df.Degree.values[0]
table += r"""
\hline
\multirow{6}{*}{%d}
""" % degree
for k in df.Mesh:
sliced = df.loc[lambda x: x.Mesh == k]
table += lformat.format(mesh=k,
L2Errors=sliced.L2Errors.values[0],
ConvRates=rate(sliced.ConvRates.values[0]),
Reductions=sliced.ResidualReductions.values[0])
table += r"""\hline
\end{tabular}
"""
print(table)
|
<commit_before><commit_msg>Add script for Helmholtz rates<commit_after>import os
import sys
import pandas as pd
p4_data = "helmholtz-results/helmholtz_conv-d-4.csv"
p5_data = "helmholtz-results/helmholtz_conv-d-5.csv"
p6_data = "helmholtz-results/helmholtz_conv-d-6.csv"
p7_data = "helmholtz-results/helmholtz_conv-d-7.csv"
data_set = [p4_data, p5_data, p6_data, p7_data]
for data in data_set:
if not os.path.exists(data):
print("Cannot find data file '%s'" % data)
sys.exit(1)
table = r"""\begin{tabular}{| l | c | c | c |}
\hline
\multicolumn{4}{|c|}{$H^1$ Helmholtz} \\
\hline
\multirow{2}{*}{$k$} & mesh &
\multicolumn{2}{|c|}{$\norm{p-p_h}_{L^2(\Omega)} \leq \mathcal{O}(h^{k+1})$} \\
\cline{2-4}
& $r$ & $L^2$-error & rate \\
"""
lformat = r"""& {mesh: d} & {L2Errors:.3e} & {ConvRates} \\
"""
def rate(s):
if s == '---':
return s
else:
return "{s:.3f}".format(s=float(s))
for data in data_set:
df = pd.read_csv(data)
df = df.sort_values("Mesh")
degree = df.Degree.values[0]
table += r"""
\hline
\multirow{6}{*}{%d}
""" % degree
for k in df.Mesh:
sliced = df.loc[lambda x: x.Mesh == k]
table += lformat.format(mesh=k,
L2Errors=sliced.L2Errors.values[0],
ConvRates=rate(sliced.ConvRates.values[0]),
Reductions=sliced.ResidualReductions.values[0])
table += r"""\hline
\end{tabular}
"""
print(table)
|
|
5d1da267791456f6c5e386d6e7204d02371c2eb2
|
readthedocs/rtd_tests/tests/test_gold.py
|
readthedocs/rtd_tests/tests/test_gold.py
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django_dynamic_fixture import get
from django_dynamic_fixture import new
from readthedocs.gold.models import GoldUser, LEVEL_CHOICES
from readthedocs.projects.models import Project
def create_user(username, password):
user = new(User, username=username)
user.set_password(password)
user.save()
return user
class GoldViewTests(TestCase):
def setUp(self):
self.user = create_user(username='owner', password='test')
self.project = get(Project, slug='test')
self.golduser = get(GoldUser, user=self.user, level=LEVEL_CHOICES[0][0])
self.client.login(username='owner', password='test')
def test_adding_projects(self):
self.assertEqual(self.golduser.projects.count(), 0)
resp = self.client.post(reverse('gold_projects'), data={'project': 'test'})
self.assertEqual(self.golduser.projects.count(), 1)
self.assertEqual(resp.status_code, 302)
def test_too_many_projects(self):
self.project2 = get(Project, slug='test2')
self.assertEqual(self.golduser.projects.count(), 0)
resp = self.client.post(reverse('gold_projects'), data={'project': self.project.slug})
self.assertEqual(self.golduser.projects.count(), 1)
self.assertEqual(resp.status_code, 302)
resp = self.client.post(reverse('gold_projects'), data={'project': self.project2.slug})
self.assertFormError(
resp, form='form', field=None, errors='You already have the max number of supported projects.'
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.golduser.projects.count(), 1)
def test_remove_project(self):
self.assertEqual(self.golduser.projects.count(), 0)
self.client.post(reverse('gold_projects'), data={'project': self.project.slug})
self.assertEqual(self.golduser.projects.count(), 1)
self.client.post(
reverse('gold_projects_remove', args=[self.project.slug]),
)
self.assertEqual(self.golduser.projects.count(), 0)
|
Add tests for gold projects
|
Add tests for gold projects
|
Python
|
mit
|
emawind84/readthedocs.org,kenshinthebattosai/readthedocs.org,Tazer/readthedocs.org,SteveViss/readthedocs.org,atsuyim/readthedocs.org,clarkperkins/readthedocs.org,safwanrahman/readthedocs.org,kenwang76/readthedocs.org,SteveViss/readthedocs.org,sunnyzwh/readthedocs.org,mhils/readthedocs.org,attakei/readthedocs-oauth,rtfd/readthedocs.org,wanghaven/readthedocs.org,hach-que/readthedocs.org,stevepiercy/readthedocs.org,clarkperkins/readthedocs.org,CedarLogic/readthedocs.org,stevepiercy/readthedocs.org,gjtorikian/readthedocs.org,wijerasa/readthedocs.org,davidfischer/readthedocs.org,emawind84/readthedocs.org,pombredanne/readthedocs.org,sunnyzwh/readthedocs.org,espdev/readthedocs.org,clarkperkins/readthedocs.org,atsuyim/readthedocs.org,GovReady/readthedocs.org,fujita-shintaro/readthedocs.org,wanghaven/readthedocs.org,LukasBoersma/readthedocs.org,istresearch/readthedocs.org,soulshake/readthedocs.org,michaelmcandrew/readthedocs.org,attakei/readthedocs-oauth,sunnyzwh/readthedocs.org,sid-kap/readthedocs.org,hach-que/readthedocs.org,mhils/readthedocs.org,atsuyim/readthedocs.org,espdev/readthedocs.org,stevepiercy/readthedocs.org,CedarLogic/readthedocs.org,techtonik/readthedocs.org,sunnyzwh/readthedocs.org,titiushko/readthedocs.org,michaelmcandrew/readthedocs.org,attakei/readthedocs-oauth,tddv/readthedocs.org,gjtorikian/readthedocs.org,soulshake/readthedocs.org,kenshinthebattosai/readthedocs.org,kenshinthebattosai/readthedocs.org,rtfd/readthedocs.org,davidfischer/readthedocs.org,fujita-shintaro/readthedocs.org,mhils/readthedocs.org,safwanrahman/readthedocs.org,titiushko/readthedocs.org,rtfd/readthedocs.org,michaelmcandrew/readthedocs.org,laplaceliu/readthedocs.org,royalwang/readthedocs.org,tddv/readthedocs.org,mhils/readthedocs.org,michaelmcandrew/readthedocs.org,singingwolfboy/readthedocs.org,laplaceliu/readthedocs.org,GovReady/readthedocs.org,VishvajitP/readthedocs.org,kenwang76/readthedocs.org,wijerasa/readthedocs.org,istresearch/readthedocs.org,tddv/readthedocs.org,sid-kap/readthedocs.org,kenshinthebattosai/readthedocs.org,hach-que/readthedocs.org,safwanrahman/readthedocs.org,espdev/readthedocs.org,titiushko/readthedocs.org,emawind84/readthedocs.org,SteveViss/readthedocs.org,techtonik/readthedocs.org,kenwang76/readthedocs.org,fujita-shintaro/readthedocs.org,gjtorikian/readthedocs.org,LukasBoersma/readthedocs.org,LukasBoersma/readthedocs.org,davidfischer/readthedocs.org,pombredanne/readthedocs.org,singingwolfboy/readthedocs.org,GovReady/readthedocs.org,titiushko/readthedocs.org,singingwolfboy/readthedocs.org,sid-kap/readthedocs.org,singingwolfboy/readthedocs.org,soulshake/readthedocs.org,CedarLogic/readthedocs.org,CedarLogic/readthedocs.org,istresearch/readthedocs.org,espdev/readthedocs.org,hach-que/readthedocs.org,LukasBoersma/readthedocs.org,clarkperkins/readthedocs.org,emawind84/readthedocs.org,pombredanne/readthedocs.org,wanghaven/readthedocs.org,Tazer/readthedocs.org,VishvajitP/readthedocs.org,VishvajitP/readthedocs.org,fujita-shintaro/readthedocs.org,istresearch/readthedocs.org,soulshake/readthedocs.org,stevepiercy/readthedocs.org,Tazer/readthedocs.org,techtonik/readthedocs.org,wijerasa/readthedocs.org,wanghaven/readthedocs.org,kenwang76/readthedocs.org,royalwang/readthedocs.org,techtonik/readthedocs.org,davidfischer/readthedocs.org,espdev/readthedocs.org,atsuyim/readthedocs.org,rtfd/readthedocs.org,gjtorikian/readthedocs.org,safwanrahman/readthedocs.org,sid-kap/readthedocs.org,laplaceliu/readthedocs.org,royalwang/readthedocs.org,SteveViss/readthedocs.org,GovReady/readthedocs.org,laplaceliu/readthedocs.org,wijerasa/readthedocs.org,Tazer/readthedocs.org,VishvajitP/readthedocs.org,royalwang/readthedocs.org,attakei/readthedocs-oauth
|
Add tests for gold projects
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django_dynamic_fixture import get
from django_dynamic_fixture import new
from readthedocs.gold.models import GoldUser, LEVEL_CHOICES
from readthedocs.projects.models import Project
def create_user(username, password):
user = new(User, username=username)
user.set_password(password)
user.save()
return user
class GoldViewTests(TestCase):
def setUp(self):
self.user = create_user(username='owner', password='test')
self.project = get(Project, slug='test')
self.golduser = get(GoldUser, user=self.user, level=LEVEL_CHOICES[0][0])
self.client.login(username='owner', password='test')
def test_adding_projects(self):
self.assertEqual(self.golduser.projects.count(), 0)
resp = self.client.post(reverse('gold_projects'), data={'project': 'test'})
self.assertEqual(self.golduser.projects.count(), 1)
self.assertEqual(resp.status_code, 302)
def test_too_many_projects(self):
self.project2 = get(Project, slug='test2')
self.assertEqual(self.golduser.projects.count(), 0)
resp = self.client.post(reverse('gold_projects'), data={'project': self.project.slug})
self.assertEqual(self.golduser.projects.count(), 1)
self.assertEqual(resp.status_code, 302)
resp = self.client.post(reverse('gold_projects'), data={'project': self.project2.slug})
self.assertFormError(
resp, form='form', field=None, errors='You already have the max number of supported projects.'
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.golduser.projects.count(), 1)
def test_remove_project(self):
self.assertEqual(self.golduser.projects.count(), 0)
self.client.post(reverse('gold_projects'), data={'project': self.project.slug})
self.assertEqual(self.golduser.projects.count(), 1)
self.client.post(
reverse('gold_projects_remove', args=[self.project.slug]),
)
self.assertEqual(self.golduser.projects.count(), 0)
|
<commit_before><commit_msg>Add tests for gold projects<commit_after>
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django_dynamic_fixture import get
from django_dynamic_fixture import new
from readthedocs.gold.models import GoldUser, LEVEL_CHOICES
from readthedocs.projects.models import Project
def create_user(username, password):
user = new(User, username=username)
user.set_password(password)
user.save()
return user
class GoldViewTests(TestCase):
def setUp(self):
self.user = create_user(username='owner', password='test')
self.project = get(Project, slug='test')
self.golduser = get(GoldUser, user=self.user, level=LEVEL_CHOICES[0][0])
self.client.login(username='owner', password='test')
def test_adding_projects(self):
self.assertEqual(self.golduser.projects.count(), 0)
resp = self.client.post(reverse('gold_projects'), data={'project': 'test'})
self.assertEqual(self.golduser.projects.count(), 1)
self.assertEqual(resp.status_code, 302)
def test_too_many_projects(self):
self.project2 = get(Project, slug='test2')
self.assertEqual(self.golduser.projects.count(), 0)
resp = self.client.post(reverse('gold_projects'), data={'project': self.project.slug})
self.assertEqual(self.golduser.projects.count(), 1)
self.assertEqual(resp.status_code, 302)
resp = self.client.post(reverse('gold_projects'), data={'project': self.project2.slug})
self.assertFormError(
resp, form='form', field=None, errors='You already have the max number of supported projects.'
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.golduser.projects.count(), 1)
def test_remove_project(self):
self.assertEqual(self.golduser.projects.count(), 0)
self.client.post(reverse('gold_projects'), data={'project': self.project.slug})
self.assertEqual(self.golduser.projects.count(), 1)
self.client.post(
reverse('gold_projects_remove', args=[self.project.slug]),
)
self.assertEqual(self.golduser.projects.count(), 0)
|
Add tests for gold projectsfrom django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django_dynamic_fixture import get
from django_dynamic_fixture import new
from readthedocs.gold.models import GoldUser, LEVEL_CHOICES
from readthedocs.projects.models import Project
def create_user(username, password):
user = new(User, username=username)
user.set_password(password)
user.save()
return user
class GoldViewTests(TestCase):
def setUp(self):
self.user = create_user(username='owner', password='test')
self.project = get(Project, slug='test')
self.golduser = get(GoldUser, user=self.user, level=LEVEL_CHOICES[0][0])
self.client.login(username='owner', password='test')
def test_adding_projects(self):
self.assertEqual(self.golduser.projects.count(), 0)
resp = self.client.post(reverse('gold_projects'), data={'project': 'test'})
self.assertEqual(self.golduser.projects.count(), 1)
self.assertEqual(resp.status_code, 302)
def test_too_many_projects(self):
self.project2 = get(Project, slug='test2')
self.assertEqual(self.golduser.projects.count(), 0)
resp = self.client.post(reverse('gold_projects'), data={'project': self.project.slug})
self.assertEqual(self.golduser.projects.count(), 1)
self.assertEqual(resp.status_code, 302)
resp = self.client.post(reverse('gold_projects'), data={'project': self.project2.slug})
self.assertFormError(
resp, form='form', field=None, errors='You already have the max number of supported projects.'
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.golduser.projects.count(), 1)
def test_remove_project(self):
self.assertEqual(self.golduser.projects.count(), 0)
self.client.post(reverse('gold_projects'), data={'project': self.project.slug})
self.assertEqual(self.golduser.projects.count(), 1)
self.client.post(
reverse('gold_projects_remove', args=[self.project.slug]),
)
self.assertEqual(self.golduser.projects.count(), 0)
|
<commit_before><commit_msg>Add tests for gold projects<commit_after>from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django_dynamic_fixture import get
from django_dynamic_fixture import new
from readthedocs.gold.models import GoldUser, LEVEL_CHOICES
from readthedocs.projects.models import Project
def create_user(username, password):
user = new(User, username=username)
user.set_password(password)
user.save()
return user
class GoldViewTests(TestCase):
def setUp(self):
self.user = create_user(username='owner', password='test')
self.project = get(Project, slug='test')
self.golduser = get(GoldUser, user=self.user, level=LEVEL_CHOICES[0][0])
self.client.login(username='owner', password='test')
def test_adding_projects(self):
self.assertEqual(self.golduser.projects.count(), 0)
resp = self.client.post(reverse('gold_projects'), data={'project': 'test'})
self.assertEqual(self.golduser.projects.count(), 1)
self.assertEqual(resp.status_code, 302)
def test_too_many_projects(self):
self.project2 = get(Project, slug='test2')
self.assertEqual(self.golduser.projects.count(), 0)
resp = self.client.post(reverse('gold_projects'), data={'project': self.project.slug})
self.assertEqual(self.golduser.projects.count(), 1)
self.assertEqual(resp.status_code, 302)
resp = self.client.post(reverse('gold_projects'), data={'project': self.project2.slug})
self.assertFormError(
resp, form='form', field=None, errors='You already have the max number of supported projects.'
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.golduser.projects.count(), 1)
def test_remove_project(self):
self.assertEqual(self.golduser.projects.count(), 0)
self.client.post(reverse('gold_projects'), data={'project': self.project.slug})
self.assertEqual(self.golduser.projects.count(), 1)
self.client.post(
reverse('gold_projects_remove', args=[self.project.slug]),
)
self.assertEqual(self.golduser.projects.count(), 0)
|
|
00c8c165e3f9a136a8950ca1fb0f2d9ade6731d6
|
pybtex/tests/bibtex_parser_test.py
|
pybtex/tests/bibtex_parser_test.py
|
from pybtex.database import BibliographyData
from pybtex.core import Entry
from pybtex.database.input.bibtex import Parser
from cStringIO import StringIO
test_data = [
(
'''
''',
BibliographyData(),
),
(
'''@ARTICLE{
test,
title={Polluted
with {DDT}.
},
}''',
BibliographyData({u'test': Entry('article', {u'title': 'Polluted with {DDT}.'})}),
),
]
def _test(bibtex_input, correct_result):
parser = Parser(encoding='UTF-8')
parser.parse_stream(StringIO(bibtex_input))
result = parser.data
assert result == correct_result
def test_bibtex_parser():
for bibtex_input, correct_result in test_data:
_test(bibtex_input, correct_result)
|
Add a regression test for whitespace normalization in the BibTeX parser.
|
Add a regression test for whitespace normalization in the BibTeX parser.
|
Python
|
mit
|
chbrown/pybtex,andreas-h/pybtex,andreas-h/pybtex,chbrown/pybtex
|
Add a regression test for whitespace normalization in the BibTeX parser.
|
from pybtex.database import BibliographyData
from pybtex.core import Entry
from pybtex.database.input.bibtex import Parser
from cStringIO import StringIO
test_data = [
(
'''
''',
BibliographyData(),
),
(
'''@ARTICLE{
test,
title={Polluted
with {DDT}.
},
}''',
BibliographyData({u'test': Entry('article', {u'title': 'Polluted with {DDT}.'})}),
),
]
def _test(bibtex_input, correct_result):
parser = Parser(encoding='UTF-8')
parser.parse_stream(StringIO(bibtex_input))
result = parser.data
assert result == correct_result
def test_bibtex_parser():
for bibtex_input, correct_result in test_data:
_test(bibtex_input, correct_result)
|
<commit_before><commit_msg>Add a regression test for whitespace normalization in the BibTeX parser.<commit_after>
|
from pybtex.database import BibliographyData
from pybtex.core import Entry
from pybtex.database.input.bibtex import Parser
from cStringIO import StringIO
test_data = [
(
'''
''',
BibliographyData(),
),
(
'''@ARTICLE{
test,
title={Polluted
with {DDT}.
},
}''',
BibliographyData({u'test': Entry('article', {u'title': 'Polluted with {DDT}.'})}),
),
]
def _test(bibtex_input, correct_result):
parser = Parser(encoding='UTF-8')
parser.parse_stream(StringIO(bibtex_input))
result = parser.data
assert result == correct_result
def test_bibtex_parser():
for bibtex_input, correct_result in test_data:
_test(bibtex_input, correct_result)
|
Add a regression test for whitespace normalization in the BibTeX parser.from pybtex.database import BibliographyData
from pybtex.core import Entry
from pybtex.database.input.bibtex import Parser
from cStringIO import StringIO
test_data = [
(
'''
''',
BibliographyData(),
),
(
'''@ARTICLE{
test,
title={Polluted
with {DDT}.
},
}''',
BibliographyData({u'test': Entry('article', {u'title': 'Polluted with {DDT}.'})}),
),
]
def _test(bibtex_input, correct_result):
parser = Parser(encoding='UTF-8')
parser.parse_stream(StringIO(bibtex_input))
result = parser.data
assert result == correct_result
def test_bibtex_parser():
for bibtex_input, correct_result in test_data:
_test(bibtex_input, correct_result)
|
<commit_before><commit_msg>Add a regression test for whitespace normalization in the BibTeX parser.<commit_after>from pybtex.database import BibliographyData
from pybtex.core import Entry
from pybtex.database.input.bibtex import Parser
from cStringIO import StringIO
test_data = [
(
'''
''',
BibliographyData(),
),
(
'''@ARTICLE{
test,
title={Polluted
with {DDT}.
},
}''',
BibliographyData({u'test': Entry('article', {u'title': 'Polluted with {DDT}.'})}),
),
]
def _test(bibtex_input, correct_result):
parser = Parser(encoding='UTF-8')
parser.parse_stream(StringIO(bibtex_input))
result = parser.data
assert result == correct_result
def test_bibtex_parser():
for bibtex_input, correct_result in test_data:
_test(bibtex_input, correct_result)
|
|
1e82283cc85b2eb449969849d23c4ffa2c090426
|
scripts/directory_batch_convert.py
|
scripts/directory_batch_convert.py
|
import os
import sys
import re
from pathlib import Path
import argparse
from convert2netcdf4 import parseandconvert
parser = argparse.ArgumentParser(description='Recursively batch convert Vaisala old-binary format to NetCDF files. Keeps directory structure.')
parser.add_argument('--from', dest='fromdir', help='Input directory', required=True)
parser.add_argument('--to', dest='todir', help='Output directory. Created if not exists. Files will be overwritten.', required=True)
EXTENSION_REGEX = r'.*\.edt$|.*\.[0-9]{2}e$'
def main():
args = parser.parse_args()
from_dir = Path(args.fromdir)
to_dir = Path(args.todir)
for dirpath, dirnames, files in os.walk(from_dir.as_posix()):
for name in files:
#if name.lower().endswith(extension):
if re.match(EXTENSION_REGEX, name.lower(), re.M|re.I):
input_file = os.path.join(dirpath, name)
input_path = Path(input_file)
diff = input_path.relative_to(from_dir)
output_path = to_dir.joinpath(diff)
extension = output_path.suffix
output_file = output_path.as_posix()
output_file = output_file.replace(extension, '.nc')
if not output_path.parent.exists():
output_path.parent.mkdir(parents=True, exist_ok=True)
print(output_file)
parseandconvert(input_file, output_file)
if __name__ == '__main__':
main()
sys.exit(0)
|
Add script to batch convert a directory recursively
|
Add script to batch convert a directory recursively
|
Python
|
mit
|
kinow/pccora
|
Add script to batch convert a directory recursively
|
import os
import sys
import re
from pathlib import Path
import argparse
from convert2netcdf4 import parseandconvert
parser = argparse.ArgumentParser(description='Recursively batch convert Vaisala old-binary format to NetCDF files. Keeps directory structure.')
parser.add_argument('--from', dest='fromdir', help='Input directory', required=True)
parser.add_argument('--to', dest='todir', help='Output directory. Created if not exists. Files will be overwritten.', required=True)
EXTENSION_REGEX = r'.*\.edt$|.*\.[0-9]{2}e$'
def main():
args = parser.parse_args()
from_dir = Path(args.fromdir)
to_dir = Path(args.todir)
for dirpath, dirnames, files in os.walk(from_dir.as_posix()):
for name in files:
#if name.lower().endswith(extension):
if re.match(EXTENSION_REGEX, name.lower(), re.M|re.I):
input_file = os.path.join(dirpath, name)
input_path = Path(input_file)
diff = input_path.relative_to(from_dir)
output_path = to_dir.joinpath(diff)
extension = output_path.suffix
output_file = output_path.as_posix()
output_file = output_file.replace(extension, '.nc')
if not output_path.parent.exists():
output_path.parent.mkdir(parents=True, exist_ok=True)
print(output_file)
parseandconvert(input_file, output_file)
if __name__ == '__main__':
main()
sys.exit(0)
|
<commit_before><commit_msg>Add script to batch convert a directory recursively<commit_after>
|
import os
import sys
import re
from pathlib import Path
import argparse
from convert2netcdf4 import parseandconvert
parser = argparse.ArgumentParser(description='Recursively batch convert Vaisala old-binary format to NetCDF files. Keeps directory structure.')
parser.add_argument('--from', dest='fromdir', help='Input directory', required=True)
parser.add_argument('--to', dest='todir', help='Output directory. Created if not exists. Files will be overwritten.', required=True)
EXTENSION_REGEX = r'.*\.edt$|.*\.[0-9]{2}e$'
def main():
args = parser.parse_args()
from_dir = Path(args.fromdir)
to_dir = Path(args.todir)
for dirpath, dirnames, files in os.walk(from_dir.as_posix()):
for name in files:
#if name.lower().endswith(extension):
if re.match(EXTENSION_REGEX, name.lower(), re.M|re.I):
input_file = os.path.join(dirpath, name)
input_path = Path(input_file)
diff = input_path.relative_to(from_dir)
output_path = to_dir.joinpath(diff)
extension = output_path.suffix
output_file = output_path.as_posix()
output_file = output_file.replace(extension, '.nc')
if not output_path.parent.exists():
output_path.parent.mkdir(parents=True, exist_ok=True)
print(output_file)
parseandconvert(input_file, output_file)
if __name__ == '__main__':
main()
sys.exit(0)
|
Add script to batch convert a directory recursivelyimport os
import sys
import re
from pathlib import Path
import argparse
from convert2netcdf4 import parseandconvert
parser = argparse.ArgumentParser(description='Recursively batch convert Vaisala old-binary format to NetCDF files. Keeps directory structure.')
parser.add_argument('--from', dest='fromdir', help='Input directory', required=True)
parser.add_argument('--to', dest='todir', help='Output directory. Created if not exists. Files will be overwritten.', required=True)
EXTENSION_REGEX = r'.*\.edt$|.*\.[0-9]{2}e$'
def main():
args = parser.parse_args()
from_dir = Path(args.fromdir)
to_dir = Path(args.todir)
for dirpath, dirnames, files in os.walk(from_dir.as_posix()):
for name in files:
#if name.lower().endswith(extension):
if re.match(EXTENSION_REGEX, name.lower(), re.M|re.I):
input_file = os.path.join(dirpath, name)
input_path = Path(input_file)
diff = input_path.relative_to(from_dir)
output_path = to_dir.joinpath(diff)
extension = output_path.suffix
output_file = output_path.as_posix()
output_file = output_file.replace(extension, '.nc')
if not output_path.parent.exists():
output_path.parent.mkdir(parents=True, exist_ok=True)
print(output_file)
parseandconvert(input_file, output_file)
if __name__ == '__main__':
main()
sys.exit(0)
|
<commit_before><commit_msg>Add script to batch convert a directory recursively<commit_after>import os
import sys
import re
from pathlib import Path
import argparse
from convert2netcdf4 import parseandconvert
parser = argparse.ArgumentParser(description='Recursively batch convert Vaisala old-binary format to NetCDF files. Keeps directory structure.')
parser.add_argument('--from', dest='fromdir', help='Input directory', required=True)
parser.add_argument('--to', dest='todir', help='Output directory. Created if not exists. Files will be overwritten.', required=True)
EXTENSION_REGEX = r'.*\.edt$|.*\.[0-9]{2}e$'
def main():
args = parser.parse_args()
from_dir = Path(args.fromdir)
to_dir = Path(args.todir)
for dirpath, dirnames, files in os.walk(from_dir.as_posix()):
for name in files:
#if name.lower().endswith(extension):
if re.match(EXTENSION_REGEX, name.lower(), re.M|re.I):
input_file = os.path.join(dirpath, name)
input_path = Path(input_file)
diff = input_path.relative_to(from_dir)
output_path = to_dir.joinpath(diff)
extension = output_path.suffix
output_file = output_path.as_posix()
output_file = output_file.replace(extension, '.nc')
if not output_path.parent.exists():
output_path.parent.mkdir(parents=True, exist_ok=True)
print(output_file)
parseandconvert(input_file, output_file)
if __name__ == '__main__':
main()
sys.exit(0)
|
|
860f8224bf8ef2f1553a17842d1389491f43bfa5
|
wagtail/tests/migrations/0008_auto_20141113_2125.py
|
wagtail/tests/migrations/0008_auto_20141113_2125.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tests', '0007_registerdecorator'),
]
operations = [
migrations.AlterField(
model_name='pagechoosermodel',
name='page',
field=models.ForeignKey(help_text='help text', to='wagtailcore.Page'),
preserve_default=True,
),
migrations.AlterField(
model_name='snippetchoosermodel',
name='advert',
field=models.ForeignKey(help_text='help text', to='tests.Advert'),
preserve_default=True,
),
]
|
Add missing migration for wagtail.tests
|
Add missing migration for wagtail.tests
It only appears in Python 3.x for some reason.
|
Python
|
bsd-3-clause
|
inonit/wagtail,janusnic/wagtail,chrxr/wagtail,FlipperPA/wagtail,nrsimha/wagtail,benjaoming/wagtail,jordij/wagtail,taedori81/wagtail,takeshineshiro/wagtail,mephizzle/wagtail,hanpama/wagtail,darith27/wagtail,gogobook/wagtail,gasman/wagtail,wagtail/wagtail,timorieber/wagtail,rsalmaso/wagtail,FlipperPA/wagtail,mjec/wagtail,nimasmi/wagtail,mikedingjan/wagtail,kurtrwall/wagtail,marctc/wagtail,benjaoming/wagtail,chrxr/wagtail,marctc/wagtail,mixxorz/wagtail,rv816/wagtail,taedori81/wagtail,Klaudit/wagtail,gogobook/wagtail,kaedroho/wagtail,nutztherookie/wagtail,kurtw/wagtail,mikedingjan/wagtail,iansprice/wagtail,Tivix/wagtail,jordij/wagtail,wagtail/wagtail,janusnic/wagtail,benjaoming/wagtail,nimasmi/wagtail,tangentlabs/wagtail,quru/wagtail,quru/wagtail,nealtodd/wagtail,taedori81/wagtail,davecranwell/wagtail,jorge-marques/wagtail,torchbox/wagtail,iansprice/wagtail,takeflight/wagtail,zerolab/wagtail,bjesus/wagtail,mayapurmedia/wagtail,darith27/wagtail,darith27/wagtail,kaedroho/wagtail,janusnic/wagtail,chimeno/wagtail,hamsterbacke23/wagtail,stevenewey/wagtail,Pennebaker/wagtail,kurtw/wagtail,bjesus/wagtail,Tivix/wagtail,hamsterbacke23/wagtail,thenewguy/wagtail,FlipperPA/wagtail,mikedingjan/wagtail,iansprice/wagtail,WQuanfeng/wagtail,kaedroho/wagtail,taedori81/wagtail,mephizzle/wagtail,inonit/wagtail,WQuanfeng/wagtail,mjec/wagtail,mjec/wagtail,chimeno/wagtail,JoshBarr/wagtail,hanpama/wagtail,rv816/wagtail,nilnvoid/wagtail,mayapurmedia/wagtail,zerolab/wagtail,thenewguy/wagtail,nealtodd/wagtail,chrxr/wagtail,mikedingjan/wagtail,marctc/wagtail,stevenewey/wagtail,nilnvoid/wagtail,kurtrwall/wagtail,jnns/wagtail,JoshBarr/wagtail,hanpama/wagtail,rjsproxy/wagtail,timorieber/wagtail,kurtrwall/wagtail,Pennebaker/wagtail,dresiu/wagtail,nrsimha/wagtail,chrxr/wagtail,jnns/wagtail,zerolab/wagtail,jordij/wagtail,torchbox/wagtail,Toshakins/wagtail,benjaoming/wagtail,serzans/wagtail,mayapurmedia/wagtail,gogobook/wagtail,jorge-marques/wagtail,zerolab/wagtail,nutztherookie/wagtail,rsalmaso/wagtail,dresiu/wagtail,jorge-marques/wagtail,Pennebaker/wagtail,takeflight/wagtail,wagtail/wagtail,chimeno/wagtail,nrsimha/wagtail,inonit/wagtail,nealtodd/wagtail,Toshakins/wagtail,rv816/wagtail,m-sanders/wagtail,torchbox/wagtail,stevenewey/wagtail,gogobook/wagtail,Klaudit/wagtail,jorge-marques/wagtail,nutztherookie/wagtail,hanpama/wagtail,kaedroho/wagtail,mjec/wagtail,tangentlabs/wagtail,darith27/wagtail,nimasmi/wagtail,dresiu/wagtail,JoshBarr/wagtail,WQuanfeng/wagtail,timorieber/wagtail,m-sanders/wagtail,FlipperPA/wagtail,hamsterbacke23/wagtail,rjsproxy/wagtail,takeflight/wagtail,iansprice/wagtail,takeshineshiro/wagtail,tangentlabs/wagtail,marctc/wagtail,mixxorz/wagtail,jnns/wagtail,inonit/wagtail,WQuanfeng/wagtail,rsalmaso/wagtail,gasman/wagtail,Toshakins/wagtail,thenewguy/wagtail,zerolab/wagtail,iho/wagtail,dresiu/wagtail,Toshakins/wagtail,Pennebaker/wagtail,torchbox/wagtail,serzans/wagtail,tangentlabs/wagtail,gasman/wagtail,stevenewey/wagtail,chimeno/wagtail,kurtw/wagtail,rv816/wagtail,wagtail/wagtail,rjsproxy/wagtail,nilnvoid/wagtail,Tivix/wagtail,KimGlazebrook/wagtail-experiment,m-sanders/wagtail,bjesus/wagtail,Klaudit/wagtail,rjsproxy/wagtail,iho/wagtail,davecranwell/wagtail,thenewguy/wagtail,takeshineshiro/wagtail,thenewguy/wagtail,Klaudit/wagtail,bjesus/wagtail,dresiu/wagtail,chimeno/wagtail,jnns/wagtail,jorge-marques/wagtail,gasman/wagtail,JoshBarr/wagtail,serzans/wagtail,hamsterbacke23/wagtail,quru/wagtail,nilnvoid/wagtail,taedori81/wagtail,gasman/wagtail,iho/wagtail,iho/wagtail,kurtw/wagtail,janusnic/wagtail,timorieber/wagtail,serzans/wagtail,rsalmaso/wagtail,mephizzle/wagtail,takeshineshiro/wagtail,Tivix/wagtail,nrsimha/wagtail,wagtail/wagtail,rsalmaso/wagtail,takeflight/wagtail,kurtrwall/wagtail,KimGlazebrook/wagtail-experiment,KimGlazebrook/wagtail-experiment,davecranwell/wagtail,mixxorz/wagtail,mephizzle/wagtail,quru/wagtail,jordij/wagtail,nealtodd/wagtail,nutztherookie/wagtail,mixxorz/wagtail,nimasmi/wagtail,kaedroho/wagtail,mixxorz/wagtail,mayapurmedia/wagtail,KimGlazebrook/wagtail-experiment,m-sanders/wagtail,davecranwell/wagtail
|
Add missing migration for wagtail.tests
It only appears in Python 3.x for some reason.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tests', '0007_registerdecorator'),
]
operations = [
migrations.AlterField(
model_name='pagechoosermodel',
name='page',
field=models.ForeignKey(help_text='help text', to='wagtailcore.Page'),
preserve_default=True,
),
migrations.AlterField(
model_name='snippetchoosermodel',
name='advert',
field=models.ForeignKey(help_text='help text', to='tests.Advert'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add missing migration for wagtail.tests
It only appears in Python 3.x for some reason.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tests', '0007_registerdecorator'),
]
operations = [
migrations.AlterField(
model_name='pagechoosermodel',
name='page',
field=models.ForeignKey(help_text='help text', to='wagtailcore.Page'),
preserve_default=True,
),
migrations.AlterField(
model_name='snippetchoosermodel',
name='advert',
field=models.ForeignKey(help_text='help text', to='tests.Advert'),
preserve_default=True,
),
]
|
Add missing migration for wagtail.tests
It only appears in Python 3.x for some reason.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tests', '0007_registerdecorator'),
]
operations = [
migrations.AlterField(
model_name='pagechoosermodel',
name='page',
field=models.ForeignKey(help_text='help text', to='wagtailcore.Page'),
preserve_default=True,
),
migrations.AlterField(
model_name='snippetchoosermodel',
name='advert',
field=models.ForeignKey(help_text='help text', to='tests.Advert'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add missing migration for wagtail.tests
It only appears in Python 3.x for some reason.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tests', '0007_registerdecorator'),
]
operations = [
migrations.AlterField(
model_name='pagechoosermodel',
name='page',
field=models.ForeignKey(help_text='help text', to='wagtailcore.Page'),
preserve_default=True,
),
migrations.AlterField(
model_name='snippetchoosermodel',
name='advert',
field=models.ForeignKey(help_text='help text', to='tests.Advert'),
preserve_default=True,
),
]
|
|
d8ad74b80f214ca313d01533ea6f15082cbb3af2
|
srttools/core/tests/test_calibration.py
|
srttools/core/tests/test_calibration.py
|
from ..calibration import CalibratorTable
from ..read_config import read_config
from ..scan import list_scans
import numpy as np
import matplotlib.pyplot as plt
import unittest
from astropy.table import Table
from ..imager import ScanSet
import os
import glob
class Test2_Calibration(unittest.TestCase):
@classmethod
def setup_class(klass):
import os
global DEBUG_MODE
DEBUG_MODE = True
klass.curdir = os.path.dirname(__file__)
klass.datadir = os.path.join(klass.curdir, 'data')
klass.config_file = \
os.path.abspath(os.path.join(klass.datadir, "calibrators.ini"))
klass.config = read_config(klass.config_file)
def step_1_calibrate(self):
"""Simple calibration from scans."""
scan_list = \
list_scans(self.config['datadir'],
self.config['calibrator_directories'])
scan_list.sort()
caltable = CalibratorTable()
caltable.from_scans(scan_list)
caltable.update()
caltable.write(os.path.join(self.datadir, 'calibrators.hdf5'),
path="config", overwrite=True)
def step_999_cleanup(self):
"""Clean up the mess."""
os.unlink(os.path.join(self.datadir, 'calibrators.hdf5'))
for d in self.config['list_of_directories']:
hfiles = glob.glob(os.path.join(self.config['datadir'], d, '*.hdf5'))
print(hfiles)
for h in hfiles:
os.unlink(h)
def test_all(self):
self.step_1_calibrate()
self.step_999_cleanup()
|
Add tests for calibration procedure (draft)
|
Add tests for calibration procedure (draft)
|
Python
|
bsd-3-clause
|
matteobachetti/srt-single-dish-tools
|
Add tests for calibration procedure (draft)
|
from ..calibration import CalibratorTable
from ..read_config import read_config
from ..scan import list_scans
import numpy as np
import matplotlib.pyplot as plt
import unittest
from astropy.table import Table
from ..imager import ScanSet
import os
import glob
class Test2_Calibration(unittest.TestCase):
@classmethod
def setup_class(klass):
import os
global DEBUG_MODE
DEBUG_MODE = True
klass.curdir = os.path.dirname(__file__)
klass.datadir = os.path.join(klass.curdir, 'data')
klass.config_file = \
os.path.abspath(os.path.join(klass.datadir, "calibrators.ini"))
klass.config = read_config(klass.config_file)
def step_1_calibrate(self):
"""Simple calibration from scans."""
scan_list = \
list_scans(self.config['datadir'],
self.config['calibrator_directories'])
scan_list.sort()
caltable = CalibratorTable()
caltable.from_scans(scan_list)
caltable.update()
caltable.write(os.path.join(self.datadir, 'calibrators.hdf5'),
path="config", overwrite=True)
def step_999_cleanup(self):
"""Clean up the mess."""
os.unlink(os.path.join(self.datadir, 'calibrators.hdf5'))
for d in self.config['list_of_directories']:
hfiles = glob.glob(os.path.join(self.config['datadir'], d, '*.hdf5'))
print(hfiles)
for h in hfiles:
os.unlink(h)
def test_all(self):
self.step_1_calibrate()
self.step_999_cleanup()
|
<commit_before><commit_msg>Add tests for calibration procedure (draft)<commit_after>
|
from ..calibration import CalibratorTable
from ..read_config import read_config
from ..scan import list_scans
import numpy as np
import matplotlib.pyplot as plt
import unittest
from astropy.table import Table
from ..imager import ScanSet
import os
import glob
class Test2_Calibration(unittest.TestCase):
@classmethod
def setup_class(klass):
import os
global DEBUG_MODE
DEBUG_MODE = True
klass.curdir = os.path.dirname(__file__)
klass.datadir = os.path.join(klass.curdir, 'data')
klass.config_file = \
os.path.abspath(os.path.join(klass.datadir, "calibrators.ini"))
klass.config = read_config(klass.config_file)
def step_1_calibrate(self):
"""Simple calibration from scans."""
scan_list = \
list_scans(self.config['datadir'],
self.config['calibrator_directories'])
scan_list.sort()
caltable = CalibratorTable()
caltable.from_scans(scan_list)
caltable.update()
caltable.write(os.path.join(self.datadir, 'calibrators.hdf5'),
path="config", overwrite=True)
def step_999_cleanup(self):
"""Clean up the mess."""
os.unlink(os.path.join(self.datadir, 'calibrators.hdf5'))
for d in self.config['list_of_directories']:
hfiles = glob.glob(os.path.join(self.config['datadir'], d, '*.hdf5'))
print(hfiles)
for h in hfiles:
os.unlink(h)
def test_all(self):
self.step_1_calibrate()
self.step_999_cleanup()
|
Add tests for calibration procedure (draft)from ..calibration import CalibratorTable
from ..read_config import read_config
from ..scan import list_scans
import numpy as np
import matplotlib.pyplot as plt
import unittest
from astropy.table import Table
from ..imager import ScanSet
import os
import glob
class Test2_Calibration(unittest.TestCase):
@classmethod
def setup_class(klass):
import os
global DEBUG_MODE
DEBUG_MODE = True
klass.curdir = os.path.dirname(__file__)
klass.datadir = os.path.join(klass.curdir, 'data')
klass.config_file = \
os.path.abspath(os.path.join(klass.datadir, "calibrators.ini"))
klass.config = read_config(klass.config_file)
def step_1_calibrate(self):
"""Simple calibration from scans."""
scan_list = \
list_scans(self.config['datadir'],
self.config['calibrator_directories'])
scan_list.sort()
caltable = CalibratorTable()
caltable.from_scans(scan_list)
caltable.update()
caltable.write(os.path.join(self.datadir, 'calibrators.hdf5'),
path="config", overwrite=True)
def step_999_cleanup(self):
"""Clean up the mess."""
os.unlink(os.path.join(self.datadir, 'calibrators.hdf5'))
for d in self.config['list_of_directories']:
hfiles = glob.glob(os.path.join(self.config['datadir'], d, '*.hdf5'))
print(hfiles)
for h in hfiles:
os.unlink(h)
def test_all(self):
self.step_1_calibrate()
self.step_999_cleanup()
|
<commit_before><commit_msg>Add tests for calibration procedure (draft)<commit_after>from ..calibration import CalibratorTable
from ..read_config import read_config
from ..scan import list_scans
import numpy as np
import matplotlib.pyplot as plt
import unittest
from astropy.table import Table
from ..imager import ScanSet
import os
import glob
class Test2_Calibration(unittest.TestCase):
@classmethod
def setup_class(klass):
import os
global DEBUG_MODE
DEBUG_MODE = True
klass.curdir = os.path.dirname(__file__)
klass.datadir = os.path.join(klass.curdir, 'data')
klass.config_file = \
os.path.abspath(os.path.join(klass.datadir, "calibrators.ini"))
klass.config = read_config(klass.config_file)
def step_1_calibrate(self):
"""Simple calibration from scans."""
scan_list = \
list_scans(self.config['datadir'],
self.config['calibrator_directories'])
scan_list.sort()
caltable = CalibratorTable()
caltable.from_scans(scan_list)
caltable.update()
caltable.write(os.path.join(self.datadir, 'calibrators.hdf5'),
path="config", overwrite=True)
def step_999_cleanup(self):
"""Clean up the mess."""
os.unlink(os.path.join(self.datadir, 'calibrators.hdf5'))
for d in self.config['list_of_directories']:
hfiles = glob.glob(os.path.join(self.config['datadir'], d, '*.hdf5'))
print(hfiles)
for h in hfiles:
os.unlink(h)
def test_all(self):
self.step_1_calibrate()
self.step_999_cleanup()
|
|
e39290b71299843eff858fb51543b99a06178a1d
|
ice40/picorv32_benchmark.py
|
ice40/picorv32_benchmark.py
|
#!/usr/bin/env python3
import os, sys, threading
from os import path
import subprocess
import re
num_runs = 8
if not path.exists("picorv32.json"):
os.remove("picorv32.json")
subprocess.run(["wget", "https://raw.githubusercontent.com/cliffordwolf/picorv32/master/picorv32.v"], check=True)
subprocess.run(["yosys", "-q", "-p", "synth_ice40 -json picorv32.json -top top", "picorv32.v", "picorv32_top.v"], check=True)
fmax = {}
if not path.exists("picorv32_work"):
os.mkdir("picorv32_work")
threads = []
for i in range(num_runs):
def runner(run):
ascfile = "picorv32_work/picorv32_s{}.asc".format(run)
if path.exists(ascfile):
os.remove(ascfile)
result = subprocess.run(["../nextpnr-ice40", "--hx8k", "--seed", str(run), "--json", "picorv32.json", "--asc", ascfile], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
if result.returncode != 0:
print("Run {} failed!".format(run))
else:
icetime_res = subprocess.check_output(["icetime", "-d", "hx8k", ascfile])
fmax_m = re.search(r'\(([0-9.]+) MHz\)', icetime_res.decode('utf-8'))
fmax[run] = float(fmax_m.group(1))
threads.append(threading.Thread(target=runner, args=[i+1]))
for t in threads: t.start()
for t in threads: t.join()
fmax_min = min(fmax.values())
fmax_max = max(fmax.values())
fmax_avg = sum(fmax.values()) / len(fmax)
print("{}/{} runs passed".format(len(fmax), num_runs))
print("icetime: min = {} MHz, avg = {} MHz, max = {} MHz".format(fmax_min, fmax_avg, fmax_max))
|
Add a simple 8x benchmark script
|
Add a simple 8x benchmark script
Signed-off-by: David Shah <4be9b043912c80de45ffb490ebd07e45bc0fcd34@gmail.com>
|
Python
|
isc
|
YosysHQ/nextpnr,SymbiFlow/nextpnr,SymbiFlow/nextpnr,YosysHQ/nextpnr,YosysHQ/nextpnr,YosysHQ/nextpnr,SymbiFlow/nextpnr,SymbiFlow/nextpnr
|
Add a simple 8x benchmark script
Signed-off-by: David Shah <4be9b043912c80de45ffb490ebd07e45bc0fcd34@gmail.com>
|
#!/usr/bin/env python3
import os, sys, threading
from os import path
import subprocess
import re
num_runs = 8
if not path.exists("picorv32.json"):
os.remove("picorv32.json")
subprocess.run(["wget", "https://raw.githubusercontent.com/cliffordwolf/picorv32/master/picorv32.v"], check=True)
subprocess.run(["yosys", "-q", "-p", "synth_ice40 -json picorv32.json -top top", "picorv32.v", "picorv32_top.v"], check=True)
fmax = {}
if not path.exists("picorv32_work"):
os.mkdir("picorv32_work")
threads = []
for i in range(num_runs):
def runner(run):
ascfile = "picorv32_work/picorv32_s{}.asc".format(run)
if path.exists(ascfile):
os.remove(ascfile)
result = subprocess.run(["../nextpnr-ice40", "--hx8k", "--seed", str(run), "--json", "picorv32.json", "--asc", ascfile], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
if result.returncode != 0:
print("Run {} failed!".format(run))
else:
icetime_res = subprocess.check_output(["icetime", "-d", "hx8k", ascfile])
fmax_m = re.search(r'\(([0-9.]+) MHz\)', icetime_res.decode('utf-8'))
fmax[run] = float(fmax_m.group(1))
threads.append(threading.Thread(target=runner, args=[i+1]))
for t in threads: t.start()
for t in threads: t.join()
fmax_min = min(fmax.values())
fmax_max = max(fmax.values())
fmax_avg = sum(fmax.values()) / len(fmax)
print("{}/{} runs passed".format(len(fmax), num_runs))
print("icetime: min = {} MHz, avg = {} MHz, max = {} MHz".format(fmax_min, fmax_avg, fmax_max))
|
<commit_before><commit_msg>Add a simple 8x benchmark script
Signed-off-by: David Shah <4be9b043912c80de45ffb490ebd07e45bc0fcd34@gmail.com><commit_after>
|
#!/usr/bin/env python3
import os, sys, threading
from os import path
import subprocess
import re
num_runs = 8
if not path.exists("picorv32.json"):
os.remove("picorv32.json")
subprocess.run(["wget", "https://raw.githubusercontent.com/cliffordwolf/picorv32/master/picorv32.v"], check=True)
subprocess.run(["yosys", "-q", "-p", "synth_ice40 -json picorv32.json -top top", "picorv32.v", "picorv32_top.v"], check=True)
fmax = {}
if not path.exists("picorv32_work"):
os.mkdir("picorv32_work")
threads = []
for i in range(num_runs):
def runner(run):
ascfile = "picorv32_work/picorv32_s{}.asc".format(run)
if path.exists(ascfile):
os.remove(ascfile)
result = subprocess.run(["../nextpnr-ice40", "--hx8k", "--seed", str(run), "--json", "picorv32.json", "--asc", ascfile], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
if result.returncode != 0:
print("Run {} failed!".format(run))
else:
icetime_res = subprocess.check_output(["icetime", "-d", "hx8k", ascfile])
fmax_m = re.search(r'\(([0-9.]+) MHz\)', icetime_res.decode('utf-8'))
fmax[run] = float(fmax_m.group(1))
threads.append(threading.Thread(target=runner, args=[i+1]))
for t in threads: t.start()
for t in threads: t.join()
fmax_min = min(fmax.values())
fmax_max = max(fmax.values())
fmax_avg = sum(fmax.values()) / len(fmax)
print("{}/{} runs passed".format(len(fmax), num_runs))
print("icetime: min = {} MHz, avg = {} MHz, max = {} MHz".format(fmax_min, fmax_avg, fmax_max))
|
Add a simple 8x benchmark script
Signed-off-by: David Shah <4be9b043912c80de45ffb490ebd07e45bc0fcd34@gmail.com>#!/usr/bin/env python3
import os, sys, threading
from os import path
import subprocess
import re
num_runs = 8
if not path.exists("picorv32.json"):
os.remove("picorv32.json")
subprocess.run(["wget", "https://raw.githubusercontent.com/cliffordwolf/picorv32/master/picorv32.v"], check=True)
subprocess.run(["yosys", "-q", "-p", "synth_ice40 -json picorv32.json -top top", "picorv32.v", "picorv32_top.v"], check=True)
fmax = {}
if not path.exists("picorv32_work"):
os.mkdir("picorv32_work")
threads = []
for i in range(num_runs):
def runner(run):
ascfile = "picorv32_work/picorv32_s{}.asc".format(run)
if path.exists(ascfile):
os.remove(ascfile)
result = subprocess.run(["../nextpnr-ice40", "--hx8k", "--seed", str(run), "--json", "picorv32.json", "--asc", ascfile], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
if result.returncode != 0:
print("Run {} failed!".format(run))
else:
icetime_res = subprocess.check_output(["icetime", "-d", "hx8k", ascfile])
fmax_m = re.search(r'\(([0-9.]+) MHz\)', icetime_res.decode('utf-8'))
fmax[run] = float(fmax_m.group(1))
threads.append(threading.Thread(target=runner, args=[i+1]))
for t in threads: t.start()
for t in threads: t.join()
fmax_min = min(fmax.values())
fmax_max = max(fmax.values())
fmax_avg = sum(fmax.values()) / len(fmax)
print("{}/{} runs passed".format(len(fmax), num_runs))
print("icetime: min = {} MHz, avg = {} MHz, max = {} MHz".format(fmax_min, fmax_avg, fmax_max))
|
<commit_before><commit_msg>Add a simple 8x benchmark script
Signed-off-by: David Shah <4be9b043912c80de45ffb490ebd07e45bc0fcd34@gmail.com><commit_after>#!/usr/bin/env python3
import os, sys, threading
from os import path
import subprocess
import re
num_runs = 8
if not path.exists("picorv32.json"):
os.remove("picorv32.json")
subprocess.run(["wget", "https://raw.githubusercontent.com/cliffordwolf/picorv32/master/picorv32.v"], check=True)
subprocess.run(["yosys", "-q", "-p", "synth_ice40 -json picorv32.json -top top", "picorv32.v", "picorv32_top.v"], check=True)
fmax = {}
if not path.exists("picorv32_work"):
os.mkdir("picorv32_work")
threads = []
for i in range(num_runs):
def runner(run):
ascfile = "picorv32_work/picorv32_s{}.asc".format(run)
if path.exists(ascfile):
os.remove(ascfile)
result = subprocess.run(["../nextpnr-ice40", "--hx8k", "--seed", str(run), "--json", "picorv32.json", "--asc", ascfile], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
if result.returncode != 0:
print("Run {} failed!".format(run))
else:
icetime_res = subprocess.check_output(["icetime", "-d", "hx8k", ascfile])
fmax_m = re.search(r'\(([0-9.]+) MHz\)', icetime_res.decode('utf-8'))
fmax[run] = float(fmax_m.group(1))
threads.append(threading.Thread(target=runner, args=[i+1]))
for t in threads: t.start()
for t in threads: t.join()
fmax_min = min(fmax.values())
fmax_max = max(fmax.values())
fmax_avg = sum(fmax.values()) / len(fmax)
print("{}/{} runs passed".format(len(fmax), num_runs))
print("icetime: min = {} MHz, avg = {} MHz, max = {} MHz".format(fmax_min, fmax_avg, fmax_max))
|
|
0af447c0371bd157c03fc5097ac8c0e0a5873ff7
|
examples/satellites_analyze.py
|
examples/satellites_analyze.py
|
assert __name__ == '__main__'
import bayeslite.bql as bql
import bayeslite.core as core
import bayeslite.parse as parse
import crosscat.LocalEngine as localengine
import getopt
import sys
# XXX This is wrong -- should be part of bayesdb proper. But it, and
# copypasta of it, will do for now until internals are restructured
# well enough for bdb.execute to work.
def bql_exec(bdb, string):
import sys
print >>sys.stderr, '--> %s' % (string.strip(),)
phrases = parse.parse_bql_string(string)
phrase = phrases.next()
done = None
try:
phrases.next()
done = False
except StopIteration:
done = True
if done is not True:
raise ValueError('>1 phrase: %s' % (string,))
return bql.execute_phrase(bdb, phrase)
def usage():
print >>sys.stderr, 'Usage: %s [-hv] [-i <iter>] [-m <models>]' % \
(sys.argv[0])
iterations = None
modelnos = None
try:
opts, args = getopt.getopt(sys.argv[1:], '?hi:m:', [])
except getopt.GetoptError as e:
print str(e)
usage()
if 0 < len(args):
usage()
for o, a in opts:
if o in ('-h', '-?'):
usage()
sys.exit()
elif o == '-i':
iterations = int(a)
elif o == '-m':
modelnos = a
else:
assert False, 'bad option %s' % (o,)
bdb = core.BayesDB(localengine.LocalEngine(seed=0), pathname='satellites.bdb')
bql_exec(bdb, "create btable if not exists satellites" +
" from 'satellites.utf8.csv'")
bql_exec(bdb, 'initialize 10 models if not exists for satellites')
if iterations is not None:
modelspec = 'models %s' % (modelnos,) if modelnos is not None else ''
bql_exec(bdb, 'analyze satellites %s for %d iterations wait' %
(modelspec, iterations))
|
Add temporary satellites analysis example.
|
Add temporary satellites analysis example.
|
Python
|
apache-2.0
|
probcomp/bayeslite,probcomp/bayeslite
|
Add temporary satellites analysis example.
|
assert __name__ == '__main__'
import bayeslite.bql as bql
import bayeslite.core as core
import bayeslite.parse as parse
import crosscat.LocalEngine as localengine
import getopt
import sys
# XXX This is wrong -- should be part of bayesdb proper. But it, and
# copypasta of it, will do for now until internals are restructured
# well enough for bdb.execute to work.
def bql_exec(bdb, string):
import sys
print >>sys.stderr, '--> %s' % (string.strip(),)
phrases = parse.parse_bql_string(string)
phrase = phrases.next()
done = None
try:
phrases.next()
done = False
except StopIteration:
done = True
if done is not True:
raise ValueError('>1 phrase: %s' % (string,))
return bql.execute_phrase(bdb, phrase)
def usage():
print >>sys.stderr, 'Usage: %s [-hv] [-i <iter>] [-m <models>]' % \
(sys.argv[0])
iterations = None
modelnos = None
try:
opts, args = getopt.getopt(sys.argv[1:], '?hi:m:', [])
except getopt.GetoptError as e:
print str(e)
usage()
if 0 < len(args):
usage()
for o, a in opts:
if o in ('-h', '-?'):
usage()
sys.exit()
elif o == '-i':
iterations = int(a)
elif o == '-m':
modelnos = a
else:
assert False, 'bad option %s' % (o,)
bdb = core.BayesDB(localengine.LocalEngine(seed=0), pathname='satellites.bdb')
bql_exec(bdb, "create btable if not exists satellites" +
" from 'satellites.utf8.csv'")
bql_exec(bdb, 'initialize 10 models if not exists for satellites')
if iterations is not None:
modelspec = 'models %s' % (modelnos,) if modelnos is not None else ''
bql_exec(bdb, 'analyze satellites %s for %d iterations wait' %
(modelspec, iterations))
|
<commit_before><commit_msg>Add temporary satellites analysis example.<commit_after>
|
assert __name__ == '__main__'
import bayeslite.bql as bql
import bayeslite.core as core
import bayeslite.parse as parse
import crosscat.LocalEngine as localengine
import getopt
import sys
# XXX This is wrong -- should be part of bayesdb proper. But it, and
# copypasta of it, will do for now until internals are restructured
# well enough for bdb.execute to work.
def bql_exec(bdb, string):
import sys
print >>sys.stderr, '--> %s' % (string.strip(),)
phrases = parse.parse_bql_string(string)
phrase = phrases.next()
done = None
try:
phrases.next()
done = False
except StopIteration:
done = True
if done is not True:
raise ValueError('>1 phrase: %s' % (string,))
return bql.execute_phrase(bdb, phrase)
def usage():
print >>sys.stderr, 'Usage: %s [-hv] [-i <iter>] [-m <models>]' % \
(sys.argv[0])
iterations = None
modelnos = None
try:
opts, args = getopt.getopt(sys.argv[1:], '?hi:m:', [])
except getopt.GetoptError as e:
print str(e)
usage()
if 0 < len(args):
usage()
for o, a in opts:
if o in ('-h', '-?'):
usage()
sys.exit()
elif o == '-i':
iterations = int(a)
elif o == '-m':
modelnos = a
else:
assert False, 'bad option %s' % (o,)
bdb = core.BayesDB(localengine.LocalEngine(seed=0), pathname='satellites.bdb')
bql_exec(bdb, "create btable if not exists satellites" +
" from 'satellites.utf8.csv'")
bql_exec(bdb, 'initialize 10 models if not exists for satellites')
if iterations is not None:
modelspec = 'models %s' % (modelnos,) if modelnos is not None else ''
bql_exec(bdb, 'analyze satellites %s for %d iterations wait' %
(modelspec, iterations))
|
Add temporary satellites analysis example.assert __name__ == '__main__'
import bayeslite.bql as bql
import bayeslite.core as core
import bayeslite.parse as parse
import crosscat.LocalEngine as localengine
import getopt
import sys
# XXX This is wrong -- should be part of bayesdb proper. But it, and
# copypasta of it, will do for now until internals are restructured
# well enough for bdb.execute to work.
def bql_exec(bdb, string):
import sys
print >>sys.stderr, '--> %s' % (string.strip(),)
phrases = parse.parse_bql_string(string)
phrase = phrases.next()
done = None
try:
phrases.next()
done = False
except StopIteration:
done = True
if done is not True:
raise ValueError('>1 phrase: %s' % (string,))
return bql.execute_phrase(bdb, phrase)
def usage():
print >>sys.stderr, 'Usage: %s [-hv] [-i <iter>] [-m <models>]' % \
(sys.argv[0])
iterations = None
modelnos = None
try:
opts, args = getopt.getopt(sys.argv[1:], '?hi:m:', [])
except getopt.GetoptError as e:
print str(e)
usage()
if 0 < len(args):
usage()
for o, a in opts:
if o in ('-h', '-?'):
usage()
sys.exit()
elif o == '-i':
iterations = int(a)
elif o == '-m':
modelnos = a
else:
assert False, 'bad option %s' % (o,)
bdb = core.BayesDB(localengine.LocalEngine(seed=0), pathname='satellites.bdb')
bql_exec(bdb, "create btable if not exists satellites" +
" from 'satellites.utf8.csv'")
bql_exec(bdb, 'initialize 10 models if not exists for satellites')
if iterations is not None:
modelspec = 'models %s' % (modelnos,) if modelnos is not None else ''
bql_exec(bdb, 'analyze satellites %s for %d iterations wait' %
(modelspec, iterations))
|
<commit_before><commit_msg>Add temporary satellites analysis example.<commit_after>assert __name__ == '__main__'
import bayeslite.bql as bql
import bayeslite.core as core
import bayeslite.parse as parse
import crosscat.LocalEngine as localengine
import getopt
import sys
# XXX This is wrong -- should be part of bayesdb proper. But it, and
# copypasta of it, will do for now until internals are restructured
# well enough for bdb.execute to work.
def bql_exec(bdb, string):
import sys
print >>sys.stderr, '--> %s' % (string.strip(),)
phrases = parse.parse_bql_string(string)
phrase = phrases.next()
done = None
try:
phrases.next()
done = False
except StopIteration:
done = True
if done is not True:
raise ValueError('>1 phrase: %s' % (string,))
return bql.execute_phrase(bdb, phrase)
def usage():
print >>sys.stderr, 'Usage: %s [-hv] [-i <iter>] [-m <models>]' % \
(sys.argv[0])
iterations = None
modelnos = None
try:
opts, args = getopt.getopt(sys.argv[1:], '?hi:m:', [])
except getopt.GetoptError as e:
print str(e)
usage()
if 0 < len(args):
usage()
for o, a in opts:
if o in ('-h', '-?'):
usage()
sys.exit()
elif o == '-i':
iterations = int(a)
elif o == '-m':
modelnos = a
else:
assert False, 'bad option %s' % (o,)
bdb = core.BayesDB(localengine.LocalEngine(seed=0), pathname='satellites.bdb')
bql_exec(bdb, "create btable if not exists satellites" +
" from 'satellites.utf8.csv'")
bql_exec(bdb, 'initialize 10 models if not exists for satellites')
if iterations is not None:
modelspec = 'models %s' % (modelnos,) if modelnos is not None else ''
bql_exec(bdb, 'analyze satellites %s for %d iterations wait' %
(modelspec, iterations))
|
|
069a5758b16624ac2b547ede44123b64c89baf96
|
map_ytids_to_ka_urls.py
|
map_ytids_to_ka_urls.py
|
#!/usr/bin/env python3
from kapi import *
from utils import *
import argparse, sys
import time
def read_cmd():
"""Function for reading command line options."""
desc = "Program for mapping YouTube IDs to KA URLs to Crowdin WYSIWYG editor."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('input_file',metavar='INPUT_FILE', help='Text file containing YouTube IDs and possibly filenames.')
parser.add_argument('-s','--subject', dest='subject', default='root', help='Print full tree for a given domain/subject.')
return parser.parse_args()
def print_children_titles(content_tree):
for child in content_tree["children"]:
pprint(child['title'])
def print_dict_without_children(dictionary):
for k in dictionary.keys():
if k != 'children':
print(k, dictionary[k])
if __name__ == "__main__":
opts = read_cmd()
infile = opts.input_file
subject_title = opts.subject
# List ytids may also contain filenames
ytids = []
# Reading file with YT id's
with open(infile, "r") as f:
for line in f:
y = line.split()
if len(y) != 0:
ytids.append(y[0])
else:
ytids.append(None)
tree = load_ka_tree('video')
if subject_title == 'root':
subtree = tree
else:
subtree = find_ka_topic(tree, subject_title)
videos = []
for ytid in ytids:
if ytid is not None:
v = find_video_by_youtube_id(subtree, ytid)
if v:
videos.append(find_video_by_youtube_id(subtree, ytid) )
else:
videos.append(ytid)
for v in videos:
try:
print(v['ka_url'].replace('www', 'translate'))
except:
print(v)
|
Add simple script mapping YTID to KA URLs.
|
Add simple script mapping YTID to KA URLs.
|
Python
|
mit
|
danielhollas/AmaraUpload,danielhollas/AmaraUpload
|
Add simple script mapping YTID to KA URLs.
|
#!/usr/bin/env python3
from kapi import *
from utils import *
import argparse, sys
import time
def read_cmd():
"""Function for reading command line options."""
desc = "Program for mapping YouTube IDs to KA URLs to Crowdin WYSIWYG editor."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('input_file',metavar='INPUT_FILE', help='Text file containing YouTube IDs and possibly filenames.')
parser.add_argument('-s','--subject', dest='subject', default='root', help='Print full tree for a given domain/subject.')
return parser.parse_args()
def print_children_titles(content_tree):
for child in content_tree["children"]:
pprint(child['title'])
def print_dict_without_children(dictionary):
for k in dictionary.keys():
if k != 'children':
print(k, dictionary[k])
if __name__ == "__main__":
opts = read_cmd()
infile = opts.input_file
subject_title = opts.subject
# List ytids may also contain filenames
ytids = []
# Reading file with YT id's
with open(infile, "r") as f:
for line in f:
y = line.split()
if len(y) != 0:
ytids.append(y[0])
else:
ytids.append(None)
tree = load_ka_tree('video')
if subject_title == 'root':
subtree = tree
else:
subtree = find_ka_topic(tree, subject_title)
videos = []
for ytid in ytids:
if ytid is not None:
v = find_video_by_youtube_id(subtree, ytid)
if v:
videos.append(find_video_by_youtube_id(subtree, ytid) )
else:
videos.append(ytid)
for v in videos:
try:
print(v['ka_url'].replace('www', 'translate'))
except:
print(v)
|
<commit_before><commit_msg>Add simple script mapping YTID to KA URLs.<commit_after>
|
#!/usr/bin/env python3
from kapi import *
from utils import *
import argparse, sys
import time
def read_cmd():
"""Function for reading command line options."""
desc = "Program for mapping YouTube IDs to KA URLs to Crowdin WYSIWYG editor."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('input_file',metavar='INPUT_FILE', help='Text file containing YouTube IDs and possibly filenames.')
parser.add_argument('-s','--subject', dest='subject', default='root', help='Print full tree for a given domain/subject.')
return parser.parse_args()
def print_children_titles(content_tree):
for child in content_tree["children"]:
pprint(child['title'])
def print_dict_without_children(dictionary):
for k in dictionary.keys():
if k != 'children':
print(k, dictionary[k])
if __name__ == "__main__":
opts = read_cmd()
infile = opts.input_file
subject_title = opts.subject
# List ytids may also contain filenames
ytids = []
# Reading file with YT id's
with open(infile, "r") as f:
for line in f:
y = line.split()
if len(y) != 0:
ytids.append(y[0])
else:
ytids.append(None)
tree = load_ka_tree('video')
if subject_title == 'root':
subtree = tree
else:
subtree = find_ka_topic(tree, subject_title)
videos = []
for ytid in ytids:
if ytid is not None:
v = find_video_by_youtube_id(subtree, ytid)
if v:
videos.append(find_video_by_youtube_id(subtree, ytid) )
else:
videos.append(ytid)
for v in videos:
try:
print(v['ka_url'].replace('www', 'translate'))
except:
print(v)
|
Add simple script mapping YTID to KA URLs.#!/usr/bin/env python3
from kapi import *
from utils import *
import argparse, sys
import time
def read_cmd():
"""Function for reading command line options."""
desc = "Program for mapping YouTube IDs to KA URLs to Crowdin WYSIWYG editor."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('input_file',metavar='INPUT_FILE', help='Text file containing YouTube IDs and possibly filenames.')
parser.add_argument('-s','--subject', dest='subject', default='root', help='Print full tree for a given domain/subject.')
return parser.parse_args()
def print_children_titles(content_tree):
for child in content_tree["children"]:
pprint(child['title'])
def print_dict_without_children(dictionary):
for k in dictionary.keys():
if k != 'children':
print(k, dictionary[k])
if __name__ == "__main__":
opts = read_cmd()
infile = opts.input_file
subject_title = opts.subject
# List ytids may also contain filenames
ytids = []
# Reading file with YT id's
with open(infile, "r") as f:
for line in f:
y = line.split()
if len(y) != 0:
ytids.append(y[0])
else:
ytids.append(None)
tree = load_ka_tree('video')
if subject_title == 'root':
subtree = tree
else:
subtree = find_ka_topic(tree, subject_title)
videos = []
for ytid in ytids:
if ytid is not None:
v = find_video_by_youtube_id(subtree, ytid)
if v:
videos.append(find_video_by_youtube_id(subtree, ytid) )
else:
videos.append(ytid)
for v in videos:
try:
print(v['ka_url'].replace('www', 'translate'))
except:
print(v)
|
<commit_before><commit_msg>Add simple script mapping YTID to KA URLs.<commit_after>#!/usr/bin/env python3
from kapi import *
from utils import *
import argparse, sys
import time
def read_cmd():
"""Function for reading command line options."""
desc = "Program for mapping YouTube IDs to KA URLs to Crowdin WYSIWYG editor."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('input_file',metavar='INPUT_FILE', help='Text file containing YouTube IDs and possibly filenames.')
parser.add_argument('-s','--subject', dest='subject', default='root', help='Print full tree for a given domain/subject.')
return parser.parse_args()
def print_children_titles(content_tree):
for child in content_tree["children"]:
pprint(child['title'])
def print_dict_without_children(dictionary):
for k in dictionary.keys():
if k != 'children':
print(k, dictionary[k])
if __name__ == "__main__":
opts = read_cmd()
infile = opts.input_file
subject_title = opts.subject
# List ytids may also contain filenames
ytids = []
# Reading file with YT id's
with open(infile, "r") as f:
for line in f:
y = line.split()
if len(y) != 0:
ytids.append(y[0])
else:
ytids.append(None)
tree = load_ka_tree('video')
if subject_title == 'root':
subtree = tree
else:
subtree = find_ka_topic(tree, subject_title)
videos = []
for ytid in ytids:
if ytid is not None:
v = find_video_by_youtube_id(subtree, ytid)
if v:
videos.append(find_video_by_youtube_id(subtree, ytid) )
else:
videos.append(ytid)
for v in videos:
try:
print(v['ka_url'].replace('www', 'translate'))
except:
print(v)
|
|
e50dc18525e0e4cbbef56cd16ba4e2d9690464f1
|
euler034.py
|
euler034.py
|
#!/usr/bin/python
from math import factorial, log
values = [0]*10
for i in range(10):
values[i] = factorial(i)
total = 0
for i in range(10, factorial(9) * 7):
target = 0
test = i
while test != 0:
x = test % 10
target += values[x]
test = test // 10
if i == target:
total += i
print(total)
|
Add solution for problem 34
|
Add solution for problem 34
Slow and brute force solution...
|
Python
|
mit
|
cifvts/PyEuler
|
Add solution for problem 34
Slow and brute force solution...
|
#!/usr/bin/python
from math import factorial, log
values = [0]*10
for i in range(10):
values[i] = factorial(i)
total = 0
for i in range(10, factorial(9) * 7):
target = 0
test = i
while test != 0:
x = test % 10
target += values[x]
test = test // 10
if i == target:
total += i
print(total)
|
<commit_before><commit_msg>Add solution for problem 34
Slow and brute force solution...<commit_after>
|
#!/usr/bin/python
from math import factorial, log
values = [0]*10
for i in range(10):
values[i] = factorial(i)
total = 0
for i in range(10, factorial(9) * 7):
target = 0
test = i
while test != 0:
x = test % 10
target += values[x]
test = test // 10
if i == target:
total += i
print(total)
|
Add solution for problem 34
Slow and brute force solution...#!/usr/bin/python
from math import factorial, log
values = [0]*10
for i in range(10):
values[i] = factorial(i)
total = 0
for i in range(10, factorial(9) * 7):
target = 0
test = i
while test != 0:
x = test % 10
target += values[x]
test = test // 10
if i == target:
total += i
print(total)
|
<commit_before><commit_msg>Add solution for problem 34
Slow and brute force solution...<commit_after>#!/usr/bin/python
from math import factorial, log
values = [0]*10
for i in range(10):
values[i] = factorial(i)
total = 0
for i in range(10, factorial(9) * 7):
target = 0
test = i
while test != 0:
x = test % 10
target += values[x]
test = test // 10
if i == target:
total += i
print(total)
|
|
943e162eee203f05b5a2d5b19bcb4a9c371cc93b
|
plugins/Scripts/Plugins/Kymograph_Comet_Velocity.py
|
plugins/Scripts/Plugins/Kymograph_Comet_Velocity.py
|
# @Float(label="Time Interval (s)", value=1) dt
# @Float(label="Pixel Length (um)", value=1) pixel_length
# @Boolean(label="Do you want to save results files ?", required=False) save_results
# @Boolean(label="Do you want to save ROI files ?", required=False) save_roi
# @ImageJ ij
# @ImagePlus img
# @Dataset data
# @StatusService status
import os
import math
from ij.plugin.frame import RoiManager
from ij.measure import ResultsTable
def main():
# Get image processor and imgplus
imp = img.getProcessor()
imgp = data.getImgPlus()
fname = data.getSource()
name = os.path.basename(fname)
# Get ROIManager
rm = RoiManager.getInstance()
if not rm:
status.warn("Use ROI Manager tool (Analyze>Tools>ROI Manager...).")
return False
if len(rm.getRoisAsArray()) == 0:
status.warn("ROI Manager does not have any ROI.")
return False
if save_roi:
roi_path = os.path.splitext(fname)[0] + ".ROI.zip"
rm.runCommand("Save", roi_path);
rt = ResultsTable()
for i, roi in enumerate(rm.getRoisAsArray()):
x1 = roi.x1
y1 = roi.y1
x2 = roi.x2
y2 = roi.y2
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
run_length = roi.y1 - roi.y2
run_duration = roi.x2 - roi.x1
run_speed = run_length / run_duration
rt.incrementCounter()
rt.addValue("Track ID", i+1)
rt.addValue("Track Length (um)", run_length)
rt.addValue("Track duration (s)", run_duration)
rt.addValue("Track speed (um/s)", run_speed)
results_path = roi_path = os.path.splitext(fname)[0] + ".Results.csv"
rt.save(results_path)
rt.show('Comet Analysis Results for "%s"' % name)
main()
|
Add new script to get comet velocity from kymograph
|
Add new script to get comet velocity from kymograph
|
Python
|
bsd-3-clause
|
hadim/fiji_tools,hadim/fiji_scripts,hadim/fiji_scripts,hadim/fiji_scripts,hadim/fiji_tools
|
Add new script to get comet velocity from kymograph
|
# @Float(label="Time Interval (s)", value=1) dt
# @Float(label="Pixel Length (um)", value=1) pixel_length
# @Boolean(label="Do you want to save results files ?", required=False) save_results
# @Boolean(label="Do you want to save ROI files ?", required=False) save_roi
# @ImageJ ij
# @ImagePlus img
# @Dataset data
# @StatusService status
import os
import math
from ij.plugin.frame import RoiManager
from ij.measure import ResultsTable
def main():
# Get image processor and imgplus
imp = img.getProcessor()
imgp = data.getImgPlus()
fname = data.getSource()
name = os.path.basename(fname)
# Get ROIManager
rm = RoiManager.getInstance()
if not rm:
status.warn("Use ROI Manager tool (Analyze>Tools>ROI Manager...).")
return False
if len(rm.getRoisAsArray()) == 0:
status.warn("ROI Manager does not have any ROI.")
return False
if save_roi:
roi_path = os.path.splitext(fname)[0] + ".ROI.zip"
rm.runCommand("Save", roi_path);
rt = ResultsTable()
for i, roi in enumerate(rm.getRoisAsArray()):
x1 = roi.x1
y1 = roi.y1
x2 = roi.x2
y2 = roi.y2
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
run_length = roi.y1 - roi.y2
run_duration = roi.x2 - roi.x1
run_speed = run_length / run_duration
rt.incrementCounter()
rt.addValue("Track ID", i+1)
rt.addValue("Track Length (um)", run_length)
rt.addValue("Track duration (s)", run_duration)
rt.addValue("Track speed (um/s)", run_speed)
results_path = roi_path = os.path.splitext(fname)[0] + ".Results.csv"
rt.save(results_path)
rt.show('Comet Analysis Results for "%s"' % name)
main()
|
<commit_before><commit_msg>Add new script to get comet velocity from kymograph<commit_after>
|
# @Float(label="Time Interval (s)", value=1) dt
# @Float(label="Pixel Length (um)", value=1) pixel_length
# @Boolean(label="Do you want to save results files ?", required=False) save_results
# @Boolean(label="Do you want to save ROI files ?", required=False) save_roi
# @ImageJ ij
# @ImagePlus img
# @Dataset data
# @StatusService status
import os
import math
from ij.plugin.frame import RoiManager
from ij.measure import ResultsTable
def main():
# Get image processor and imgplus
imp = img.getProcessor()
imgp = data.getImgPlus()
fname = data.getSource()
name = os.path.basename(fname)
# Get ROIManager
rm = RoiManager.getInstance()
if not rm:
status.warn("Use ROI Manager tool (Analyze>Tools>ROI Manager...).")
return False
if len(rm.getRoisAsArray()) == 0:
status.warn("ROI Manager does not have any ROI.")
return False
if save_roi:
roi_path = os.path.splitext(fname)[0] + ".ROI.zip"
rm.runCommand("Save", roi_path);
rt = ResultsTable()
for i, roi in enumerate(rm.getRoisAsArray()):
x1 = roi.x1
y1 = roi.y1
x2 = roi.x2
y2 = roi.y2
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
run_length = roi.y1 - roi.y2
run_duration = roi.x2 - roi.x1
run_speed = run_length / run_duration
rt.incrementCounter()
rt.addValue("Track ID", i+1)
rt.addValue("Track Length (um)", run_length)
rt.addValue("Track duration (s)", run_duration)
rt.addValue("Track speed (um/s)", run_speed)
results_path = roi_path = os.path.splitext(fname)[0] + ".Results.csv"
rt.save(results_path)
rt.show('Comet Analysis Results for "%s"' % name)
main()
|
Add new script to get comet velocity from kymograph# @Float(label="Time Interval (s)", value=1) dt
# @Float(label="Pixel Length (um)", value=1) pixel_length
# @Boolean(label="Do you want to save results files ?", required=False) save_results
# @Boolean(label="Do you want to save ROI files ?", required=False) save_roi
# @ImageJ ij
# @ImagePlus img
# @Dataset data
# @StatusService status
import os
import math
from ij.plugin.frame import RoiManager
from ij.measure import ResultsTable
def main():
# Get image processor and imgplus
imp = img.getProcessor()
imgp = data.getImgPlus()
fname = data.getSource()
name = os.path.basename(fname)
# Get ROIManager
rm = RoiManager.getInstance()
if not rm:
status.warn("Use ROI Manager tool (Analyze>Tools>ROI Manager...).")
return False
if len(rm.getRoisAsArray()) == 0:
status.warn("ROI Manager does not have any ROI.")
return False
if save_roi:
roi_path = os.path.splitext(fname)[0] + ".ROI.zip"
rm.runCommand("Save", roi_path);
rt = ResultsTable()
for i, roi in enumerate(rm.getRoisAsArray()):
x1 = roi.x1
y1 = roi.y1
x2 = roi.x2
y2 = roi.y2
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
run_length = roi.y1 - roi.y2
run_duration = roi.x2 - roi.x1
run_speed = run_length / run_duration
rt.incrementCounter()
rt.addValue("Track ID", i+1)
rt.addValue("Track Length (um)", run_length)
rt.addValue("Track duration (s)", run_duration)
rt.addValue("Track speed (um/s)", run_speed)
results_path = roi_path = os.path.splitext(fname)[0] + ".Results.csv"
rt.save(results_path)
rt.show('Comet Analysis Results for "%s"' % name)
main()
|
<commit_before><commit_msg>Add new script to get comet velocity from kymograph<commit_after># @Float(label="Time Interval (s)", value=1) dt
# @Float(label="Pixel Length (um)", value=1) pixel_length
# @Boolean(label="Do you want to save results files ?", required=False) save_results
# @Boolean(label="Do you want to save ROI files ?", required=False) save_roi
# @ImageJ ij
# @ImagePlus img
# @Dataset data
# @StatusService status
import os
import math
from ij.plugin.frame import RoiManager
from ij.measure import ResultsTable
def main():
# Get image processor and imgplus
imp = img.getProcessor()
imgp = data.getImgPlus()
fname = data.getSource()
name = os.path.basename(fname)
# Get ROIManager
rm = RoiManager.getInstance()
if not rm:
status.warn("Use ROI Manager tool (Analyze>Tools>ROI Manager...).")
return False
if len(rm.getRoisAsArray()) == 0:
status.warn("ROI Manager does not have any ROI.")
return False
if save_roi:
roi_path = os.path.splitext(fname)[0] + ".ROI.zip"
rm.runCommand("Save", roi_path);
rt = ResultsTable()
for i, roi in enumerate(rm.getRoisAsArray()):
x1 = roi.x1
y1 = roi.y1
x2 = roi.x2
y2 = roi.y2
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
run_length = roi.y1 - roi.y2
run_duration = roi.x2 - roi.x1
run_speed = run_length / run_duration
rt.incrementCounter()
rt.addValue("Track ID", i+1)
rt.addValue("Track Length (um)", run_length)
rt.addValue("Track duration (s)", run_duration)
rt.addValue("Track speed (um/s)", run_speed)
results_path = roi_path = os.path.splitext(fname)[0] + ".Results.csv"
rt.save(results_path)
rt.show('Comet Analysis Results for "%s"' % name)
main()
|
|
96bbf25be25482a7edfd92ec9b956b0bbeab39c4
|
src/traffic/__init__.py
|
src/traffic/__init__.py
|
from datetime import datetime
import zmq
from messages import common_pb2, replies_pb2, requests_pb2
class Connection(object):
def __init__(self, uri, context=None):
self._uri = uri
if context is None:
context = zmq.Context()
self._context = context
self._socket = self._context.socket(zmq.REQ)
self._connected = False
def connect(self):
assert not self._connected
self._socket.connect(self._uri)
self._connected = True
def disconnect(self):
assert self._connected
self._socket.disconnect(self._uri)
self._connected = False
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.disconnect()
def send_msg(self, msg):
assert self._connected
assert isinstance(msg, requests_pb2.Request)
self._socket.send(msg.SerializeToString())
reply = replies_pb2.Reply()
reply.ParseFromString(self._socket.recv())
return reply
def get_summary_unix(conn, interval_start, interval_end, clients):
assert interval_start < interval_end;
assert len(clients) != 0
request = requests_pb2.Request()
request.version=1
message = request.summary
message.range.start = interval_start
message.range.end = interval_end
message.addresses.extend(clients)
reply = conn.send_msg(request)
return reply.summary
def get_summary(conn, interval_start, interval_end, clients):
assert isinstance(interval_start, datetime)
assert isinstance(interval_end, datetime)
epoch = datetime(1970,1,1)
return get_summary_unix(conn,
int((interval_start - epoch).total_seconds()),
int((interval_end - epoch).total_seconds()),
clients)
|
Add a basic summary query implementation
|
Add a basic summary query implementation
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de>
|
Python
|
bsd-3-clause
|
agdsn/traffic-service-client,agdsn/traffic-service-client
|
Add a basic summary query implementation
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de>
|
from datetime import datetime
import zmq
from messages import common_pb2, replies_pb2, requests_pb2
class Connection(object):
def __init__(self, uri, context=None):
self._uri = uri
if context is None:
context = zmq.Context()
self._context = context
self._socket = self._context.socket(zmq.REQ)
self._connected = False
def connect(self):
assert not self._connected
self._socket.connect(self._uri)
self._connected = True
def disconnect(self):
assert self._connected
self._socket.disconnect(self._uri)
self._connected = False
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.disconnect()
def send_msg(self, msg):
assert self._connected
assert isinstance(msg, requests_pb2.Request)
self._socket.send(msg.SerializeToString())
reply = replies_pb2.Reply()
reply.ParseFromString(self._socket.recv())
return reply
def get_summary_unix(conn, interval_start, interval_end, clients):
assert interval_start < interval_end;
assert len(clients) != 0
request = requests_pb2.Request()
request.version=1
message = request.summary
message.range.start = interval_start
message.range.end = interval_end
message.addresses.extend(clients)
reply = conn.send_msg(request)
return reply.summary
def get_summary(conn, interval_start, interval_end, clients):
assert isinstance(interval_start, datetime)
assert isinstance(interval_end, datetime)
epoch = datetime(1970,1,1)
return get_summary_unix(conn,
int((interval_start - epoch).total_seconds()),
int((interval_end - epoch).total_seconds()),
clients)
|
<commit_before><commit_msg>Add a basic summary query implementation
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de><commit_after>
|
from datetime import datetime
import zmq
from messages import common_pb2, replies_pb2, requests_pb2
class Connection(object):
def __init__(self, uri, context=None):
self._uri = uri
if context is None:
context = zmq.Context()
self._context = context
self._socket = self._context.socket(zmq.REQ)
self._connected = False
def connect(self):
assert not self._connected
self._socket.connect(self._uri)
self._connected = True
def disconnect(self):
assert self._connected
self._socket.disconnect(self._uri)
self._connected = False
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.disconnect()
def send_msg(self, msg):
assert self._connected
assert isinstance(msg, requests_pb2.Request)
self._socket.send(msg.SerializeToString())
reply = replies_pb2.Reply()
reply.ParseFromString(self._socket.recv())
return reply
def get_summary_unix(conn, interval_start, interval_end, clients):
assert interval_start < interval_end;
assert len(clients) != 0
request = requests_pb2.Request()
request.version=1
message = request.summary
message.range.start = interval_start
message.range.end = interval_end
message.addresses.extend(clients)
reply = conn.send_msg(request)
return reply.summary
def get_summary(conn, interval_start, interval_end, clients):
assert isinstance(interval_start, datetime)
assert isinstance(interval_end, datetime)
epoch = datetime(1970,1,1)
return get_summary_unix(conn,
int((interval_start - epoch).total_seconds()),
int((interval_end - epoch).total_seconds()),
clients)
|
Add a basic summary query implementation
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de>from datetime import datetime
import zmq
from messages import common_pb2, replies_pb2, requests_pb2
class Connection(object):
def __init__(self, uri, context=None):
self._uri = uri
if context is None:
context = zmq.Context()
self._context = context
self._socket = self._context.socket(zmq.REQ)
self._connected = False
def connect(self):
assert not self._connected
self._socket.connect(self._uri)
self._connected = True
def disconnect(self):
assert self._connected
self._socket.disconnect(self._uri)
self._connected = False
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.disconnect()
def send_msg(self, msg):
assert self._connected
assert isinstance(msg, requests_pb2.Request)
self._socket.send(msg.SerializeToString())
reply = replies_pb2.Reply()
reply.ParseFromString(self._socket.recv())
return reply
def get_summary_unix(conn, interval_start, interval_end, clients):
assert interval_start < interval_end;
assert len(clients) != 0
request = requests_pb2.Request()
request.version=1
message = request.summary
message.range.start = interval_start
message.range.end = interval_end
message.addresses.extend(clients)
reply = conn.send_msg(request)
return reply.summary
def get_summary(conn, interval_start, interval_end, clients):
assert isinstance(interval_start, datetime)
assert isinstance(interval_end, datetime)
epoch = datetime(1970,1,1)
return get_summary_unix(conn,
int((interval_start - epoch).total_seconds()),
int((interval_end - epoch).total_seconds()),
clients)
|
<commit_before><commit_msg>Add a basic summary query implementation
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de><commit_after>from datetime import datetime
import zmq
from messages import common_pb2, replies_pb2, requests_pb2
class Connection(object):
def __init__(self, uri, context=None):
self._uri = uri
if context is None:
context = zmq.Context()
self._context = context
self._socket = self._context.socket(zmq.REQ)
self._connected = False
def connect(self):
assert not self._connected
self._socket.connect(self._uri)
self._connected = True
def disconnect(self):
assert self._connected
self._socket.disconnect(self._uri)
self._connected = False
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.disconnect()
def send_msg(self, msg):
assert self._connected
assert isinstance(msg, requests_pb2.Request)
self._socket.send(msg.SerializeToString())
reply = replies_pb2.Reply()
reply.ParseFromString(self._socket.recv())
return reply
def get_summary_unix(conn, interval_start, interval_end, clients):
assert interval_start < interval_end;
assert len(clients) != 0
request = requests_pb2.Request()
request.version=1
message = request.summary
message.range.start = interval_start
message.range.end = interval_end
message.addresses.extend(clients)
reply = conn.send_msg(request)
return reply.summary
def get_summary(conn, interval_start, interval_end, clients):
assert isinstance(interval_start, datetime)
assert isinstance(interval_end, datetime)
epoch = datetime(1970,1,1)
return get_summary_unix(conn,
int((interval_start - epoch).total_seconds()),
int((interval_end - epoch).total_seconds()),
clients)
|
|
e9f88f1c43189fe429730c488f4514bf78edea4e
|
mistune/__main__.py
|
mistune/__main__.py
|
import sys
import argparse
from . import (
create_markdown,
__version__ as version
)
def _md(args):
if args.plugin:
plugins = args.plugin
else:
# default plugins
plugins = ['strikethrough', 'footnotes', 'table', 'speedup']
return create_markdown(
escape=args.escape,
hard_wrap=args.hardwrap,
renderer=args.renderer,
plugins=plugins,
)
def _output(text, args):
if args.output:
with open(args.output, 'w') as f:
f.write(text)
else:
print(text)
CMD_HELP = '''Mistune, a sane and fast python markdown parser.
Here are some use cases of the command line tool:
$ python -m mistune -m "Hi **Markdown**"
<p>Hi <strong>Markdown</strong></p>
$ python -m mistune -f README.md
<p>...
'''
def cli():
parser = argparse.ArgumentParser(
prog='python -m mistune',
description=CMD_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-m', '--message',
help='the markdown message to conver',
)
parser.add_argument(
'-f', '--file',
help='the markdown file to convert',
)
parser.add_argument(
'-p', '--plugin',
metavar='NAME',
action='extend',
nargs='+',
help='specifiy a plugin to use',
)
parser.add_argument(
'--escape',
action='store_true',
help='turn on escape option',
)
parser.add_argument(
'--hardwrap',
action='store_true',
help='turn on hardwrap option',
)
parser.add_argument(
'-o', '--output',
help='write the rendered result into file',
)
parser.add_argument(
'-r', '--renderer',
default='html',
help='specify the output renderer',
)
parser.add_argument('--version', action='version', version='mistune ' + version)
args = parser.parse_args()
if not args.message and not args.file:
print('You MUST specify a message or file')
return sys.exit(1)
if args.message:
md = _md(args)
text = md(args.message)
_output(text, args)
elif args.file:
md = _md(args)
text = md.read(args.file)[0]
_output(text, args)
if __name__ == '__main__':
cli()
|
Add python -m mistune cli
|
Add python -m mistune cli
|
Python
|
bsd-3-clause
|
lepture/mistune
|
Add python -m mistune cli
|
import sys
import argparse
from . import (
create_markdown,
__version__ as version
)
def _md(args):
if args.plugin:
plugins = args.plugin
else:
# default plugins
plugins = ['strikethrough', 'footnotes', 'table', 'speedup']
return create_markdown(
escape=args.escape,
hard_wrap=args.hardwrap,
renderer=args.renderer,
plugins=plugins,
)
def _output(text, args):
if args.output:
with open(args.output, 'w') as f:
f.write(text)
else:
print(text)
CMD_HELP = '''Mistune, a sane and fast python markdown parser.
Here are some use cases of the command line tool:
$ python -m mistune -m "Hi **Markdown**"
<p>Hi <strong>Markdown</strong></p>
$ python -m mistune -f README.md
<p>...
'''
def cli():
parser = argparse.ArgumentParser(
prog='python -m mistune',
description=CMD_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-m', '--message',
help='the markdown message to conver',
)
parser.add_argument(
'-f', '--file',
help='the markdown file to convert',
)
parser.add_argument(
'-p', '--plugin',
metavar='NAME',
action='extend',
nargs='+',
help='specifiy a plugin to use',
)
parser.add_argument(
'--escape',
action='store_true',
help='turn on escape option',
)
parser.add_argument(
'--hardwrap',
action='store_true',
help='turn on hardwrap option',
)
parser.add_argument(
'-o', '--output',
help='write the rendered result into file',
)
parser.add_argument(
'-r', '--renderer',
default='html',
help='specify the output renderer',
)
parser.add_argument('--version', action='version', version='mistune ' + version)
args = parser.parse_args()
if not args.message and not args.file:
print('You MUST specify a message or file')
return sys.exit(1)
if args.message:
md = _md(args)
text = md(args.message)
_output(text, args)
elif args.file:
md = _md(args)
text = md.read(args.file)[0]
_output(text, args)
if __name__ == '__main__':
cli()
|
<commit_before><commit_msg>Add python -m mistune cli<commit_after>
|
import sys
import argparse
from . import (
create_markdown,
__version__ as version
)
def _md(args):
if args.plugin:
plugins = args.plugin
else:
# default plugins
plugins = ['strikethrough', 'footnotes', 'table', 'speedup']
return create_markdown(
escape=args.escape,
hard_wrap=args.hardwrap,
renderer=args.renderer,
plugins=plugins,
)
def _output(text, args):
if args.output:
with open(args.output, 'w') as f:
f.write(text)
else:
print(text)
CMD_HELP = '''Mistune, a sane and fast python markdown parser.
Here are some use cases of the command line tool:
$ python -m mistune -m "Hi **Markdown**"
<p>Hi <strong>Markdown</strong></p>
$ python -m mistune -f README.md
<p>...
'''
def cli():
parser = argparse.ArgumentParser(
prog='python -m mistune',
description=CMD_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-m', '--message',
help='the markdown message to conver',
)
parser.add_argument(
'-f', '--file',
help='the markdown file to convert',
)
parser.add_argument(
'-p', '--plugin',
metavar='NAME',
action='extend',
nargs='+',
help='specifiy a plugin to use',
)
parser.add_argument(
'--escape',
action='store_true',
help='turn on escape option',
)
parser.add_argument(
'--hardwrap',
action='store_true',
help='turn on hardwrap option',
)
parser.add_argument(
'-o', '--output',
help='write the rendered result into file',
)
parser.add_argument(
'-r', '--renderer',
default='html',
help='specify the output renderer',
)
parser.add_argument('--version', action='version', version='mistune ' + version)
args = parser.parse_args()
if not args.message and not args.file:
print('You MUST specify a message or file')
return sys.exit(1)
if args.message:
md = _md(args)
text = md(args.message)
_output(text, args)
elif args.file:
md = _md(args)
text = md.read(args.file)[0]
_output(text, args)
if __name__ == '__main__':
cli()
|
Add python -m mistune cliimport sys
import argparse
from . import (
create_markdown,
__version__ as version
)
def _md(args):
if args.plugin:
plugins = args.plugin
else:
# default plugins
plugins = ['strikethrough', 'footnotes', 'table', 'speedup']
return create_markdown(
escape=args.escape,
hard_wrap=args.hardwrap,
renderer=args.renderer,
plugins=plugins,
)
def _output(text, args):
if args.output:
with open(args.output, 'w') as f:
f.write(text)
else:
print(text)
CMD_HELP = '''Mistune, a sane and fast python markdown parser.
Here are some use cases of the command line tool:
$ python -m mistune -m "Hi **Markdown**"
<p>Hi <strong>Markdown</strong></p>
$ python -m mistune -f README.md
<p>...
'''
def cli():
parser = argparse.ArgumentParser(
prog='python -m mistune',
description=CMD_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-m', '--message',
help='the markdown message to conver',
)
parser.add_argument(
'-f', '--file',
help='the markdown file to convert',
)
parser.add_argument(
'-p', '--plugin',
metavar='NAME',
action='extend',
nargs='+',
help='specifiy a plugin to use',
)
parser.add_argument(
'--escape',
action='store_true',
help='turn on escape option',
)
parser.add_argument(
'--hardwrap',
action='store_true',
help='turn on hardwrap option',
)
parser.add_argument(
'-o', '--output',
help='write the rendered result into file',
)
parser.add_argument(
'-r', '--renderer',
default='html',
help='specify the output renderer',
)
parser.add_argument('--version', action='version', version='mistune ' + version)
args = parser.parse_args()
if not args.message and not args.file:
print('You MUST specify a message or file')
return sys.exit(1)
if args.message:
md = _md(args)
text = md(args.message)
_output(text, args)
elif args.file:
md = _md(args)
text = md.read(args.file)[0]
_output(text, args)
if __name__ == '__main__':
cli()
|
<commit_before><commit_msg>Add python -m mistune cli<commit_after>import sys
import argparse
from . import (
create_markdown,
__version__ as version
)
def _md(args):
if args.plugin:
plugins = args.plugin
else:
# default plugins
plugins = ['strikethrough', 'footnotes', 'table', 'speedup']
return create_markdown(
escape=args.escape,
hard_wrap=args.hardwrap,
renderer=args.renderer,
plugins=plugins,
)
def _output(text, args):
if args.output:
with open(args.output, 'w') as f:
f.write(text)
else:
print(text)
CMD_HELP = '''Mistune, a sane and fast python markdown parser.
Here are some use cases of the command line tool:
$ python -m mistune -m "Hi **Markdown**"
<p>Hi <strong>Markdown</strong></p>
$ python -m mistune -f README.md
<p>...
'''
def cli():
parser = argparse.ArgumentParser(
prog='python -m mistune',
description=CMD_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-m', '--message',
help='the markdown message to conver',
)
parser.add_argument(
'-f', '--file',
help='the markdown file to convert',
)
parser.add_argument(
'-p', '--plugin',
metavar='NAME',
action='extend',
nargs='+',
help='specifiy a plugin to use',
)
parser.add_argument(
'--escape',
action='store_true',
help='turn on escape option',
)
parser.add_argument(
'--hardwrap',
action='store_true',
help='turn on hardwrap option',
)
parser.add_argument(
'-o', '--output',
help='write the rendered result into file',
)
parser.add_argument(
'-r', '--renderer',
default='html',
help='specify the output renderer',
)
parser.add_argument('--version', action='version', version='mistune ' + version)
args = parser.parse_args()
if not args.message and not args.file:
print('You MUST specify a message or file')
return sys.exit(1)
if args.message:
md = _md(args)
text = md(args.message)
_output(text, args)
elif args.file:
md = _md(args)
text = md.read(args.file)[0]
_output(text, args)
if __name__ == '__main__':
cli()
|
|
3e345bc4a17cf53c40ef51cd2ae1732744be7e60
|
cardbox/deck_forms.py
|
cardbox/deck_forms.py
|
from django.forms import ModelForm
from django.forms.widgets import Textarea, TextInput
from deck_model import Deck
class DeckForm(ModelForm):
"""The basic form for updating or editing decks"""
class Meta:
model = Deck
fields = ('title', 'description')
widgets = {
'title': TextInput(attrs={'class': "form-control"}),
'description': Textarea(attrs={'class': "form-control"})
}
|
Add custom form for editing and updating of decks
|
Add custom form for editing and updating of decks
|
Python
|
mit
|
DummyDivision/Tsune,DummyDivision/Tsune,DummyDivision/Tsune
|
Add custom form for editing and updating of decks
|
from django.forms import ModelForm
from django.forms.widgets import Textarea, TextInput
from deck_model import Deck
class DeckForm(ModelForm):
"""The basic form for updating or editing decks"""
class Meta:
model = Deck
fields = ('title', 'description')
widgets = {
'title': TextInput(attrs={'class': "form-control"}),
'description': Textarea(attrs={'class': "form-control"})
}
|
<commit_before><commit_msg>Add custom form for editing and updating of decks<commit_after>
|
from django.forms import ModelForm
from django.forms.widgets import Textarea, TextInput
from deck_model import Deck
class DeckForm(ModelForm):
"""The basic form for updating or editing decks"""
class Meta:
model = Deck
fields = ('title', 'description')
widgets = {
'title': TextInput(attrs={'class': "form-control"}),
'description': Textarea(attrs={'class': "form-control"})
}
|
Add custom form for editing and updating of decksfrom django.forms import ModelForm
from django.forms.widgets import Textarea, TextInput
from deck_model import Deck
class DeckForm(ModelForm):
"""The basic form for updating or editing decks"""
class Meta:
model = Deck
fields = ('title', 'description')
widgets = {
'title': TextInput(attrs={'class': "form-control"}),
'description': Textarea(attrs={'class': "form-control"})
}
|
<commit_before><commit_msg>Add custom form for editing and updating of decks<commit_after>from django.forms import ModelForm
from django.forms.widgets import Textarea, TextInput
from deck_model import Deck
class DeckForm(ModelForm):
"""The basic form for updating or editing decks"""
class Meta:
model = Deck
fields = ('title', 'description')
widgets = {
'title': TextInput(attrs={'class': "form-control"}),
'description': Textarea(attrs={'class': "form-control"})
}
|
|
fa8b40b8ebc088f087ff76c36068fea67dae0824
|
rnacentral/portal/management/commands/update_coordinate_names.py
|
rnacentral/portal/management/commands/update_coordinate_names.py
|
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.core.management.base import BaseCommand
from django.db import connection
class Command(BaseCommand):
"""
Usage:
python manage.py update_coordinate_names
"""
def handle(self, *args, **options):
"""
Main function, called by django.
"""
sql = """
UPDATE rnc_coordinates a
SET
name = b.ensembl_name,
primary_start = local_start,
primary_end = local_end
FROM ensembl_insdc_mapping b
WHERE
a.primary_accession = b.insdc
AND a.name IS NULL
"""
with connection.cursor() as cursor:
cursor.execute(sql)
|
Add management command for updating genome coordinate names using Ensembl-INSDC mapping
|
Add management command for updating genome coordinate names using Ensembl-INSDC mapping
|
Python
|
apache-2.0
|
RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode
|
Add management command for updating genome coordinate names using Ensembl-INSDC mapping
|
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.core.management.base import BaseCommand
from django.db import connection
class Command(BaseCommand):
"""
Usage:
python manage.py update_coordinate_names
"""
def handle(self, *args, **options):
"""
Main function, called by django.
"""
sql = """
UPDATE rnc_coordinates a
SET
name = b.ensembl_name,
primary_start = local_start,
primary_end = local_end
FROM ensembl_insdc_mapping b
WHERE
a.primary_accession = b.insdc
AND a.name IS NULL
"""
with connection.cursor() as cursor:
cursor.execute(sql)
|
<commit_before><commit_msg>Add management command for updating genome coordinate names using Ensembl-INSDC mapping<commit_after>
|
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.core.management.base import BaseCommand
from django.db import connection
class Command(BaseCommand):
"""
Usage:
python manage.py update_coordinate_names
"""
def handle(self, *args, **options):
"""
Main function, called by django.
"""
sql = """
UPDATE rnc_coordinates a
SET
name = b.ensembl_name,
primary_start = local_start,
primary_end = local_end
FROM ensembl_insdc_mapping b
WHERE
a.primary_accession = b.insdc
AND a.name IS NULL
"""
with connection.cursor() as cursor:
cursor.execute(sql)
|
Add management command for updating genome coordinate names using Ensembl-INSDC mapping"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.core.management.base import BaseCommand
from django.db import connection
class Command(BaseCommand):
"""
Usage:
python manage.py update_coordinate_names
"""
def handle(self, *args, **options):
"""
Main function, called by django.
"""
sql = """
UPDATE rnc_coordinates a
SET
name = b.ensembl_name,
primary_start = local_start,
primary_end = local_end
FROM ensembl_insdc_mapping b
WHERE
a.primary_accession = b.insdc
AND a.name IS NULL
"""
with connection.cursor() as cursor:
cursor.execute(sql)
|
<commit_before><commit_msg>Add management command for updating genome coordinate names using Ensembl-INSDC mapping<commit_after>"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.core.management.base import BaseCommand
from django.db import connection
class Command(BaseCommand):
"""
Usage:
python manage.py update_coordinate_names
"""
def handle(self, *args, **options):
"""
Main function, called by django.
"""
sql = """
UPDATE rnc_coordinates a
SET
name = b.ensembl_name,
primary_start = local_start,
primary_end = local_end
FROM ensembl_insdc_mapping b
WHERE
a.primary_accession = b.insdc
AND a.name IS NULL
"""
with connection.cursor() as cursor:
cursor.execute(sql)
|
|
6418807dbba9fb946ffeb05aee525c51c2e71f75
|
tests/fixtures.py
|
tests/fixtures.py
|
"""Defines fixtures that can be used to streamline tests and / or define dependencies"""
from random import randint
import pytest
import hug
@pytest.fixture
def hug_api():
"""Defines a dependency for and then includes a uniquely identified hug API for a single test case"""
return hug.API('fake_api_{}'.format(randint(0, 1000000)))
|
Fix fixture, add doc string
|
Fix fixture, add doc string
|
Python
|
mit
|
MuhammadAlkarouri/hug,timothycrosley/hug,timothycrosley/hug,timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug
|
Fix fixture, add doc string
|
"""Defines fixtures that can be used to streamline tests and / or define dependencies"""
from random import randint
import pytest
import hug
@pytest.fixture
def hug_api():
"""Defines a dependency for and then includes a uniquely identified hug API for a single test case"""
return hug.API('fake_api_{}'.format(randint(0, 1000000)))
|
<commit_before><commit_msg>Fix fixture, add doc string<commit_after>
|
"""Defines fixtures that can be used to streamline tests and / or define dependencies"""
from random import randint
import pytest
import hug
@pytest.fixture
def hug_api():
"""Defines a dependency for and then includes a uniquely identified hug API for a single test case"""
return hug.API('fake_api_{}'.format(randint(0, 1000000)))
|
Fix fixture, add doc string"""Defines fixtures that can be used to streamline tests and / or define dependencies"""
from random import randint
import pytest
import hug
@pytest.fixture
def hug_api():
"""Defines a dependency for and then includes a uniquely identified hug API for a single test case"""
return hug.API('fake_api_{}'.format(randint(0, 1000000)))
|
<commit_before><commit_msg>Fix fixture, add doc string<commit_after>"""Defines fixtures that can be used to streamline tests and / or define dependencies"""
from random import randint
import pytest
import hug
@pytest.fixture
def hug_api():
"""Defines a dependency for and then includes a uniquely identified hug API for a single test case"""
return hug.API('fake_api_{}'.format(randint(0, 1000000)))
|
|
47c1dfd602281c56973de0d8afe64b923eb29592
|
test/test_env.py
|
test/test_env.py
|
from _ebcf_alexa import env
from unittest.mock import patch, call
import pytest
@pytest.yield_fixture
def mock_now():
with patch.object(env, 'now') as now:
yield now
@patch('datetime.datetime')
def test_now_is_utc(fake_datetime):
assert env.now()
assert fake_datetime.now.call_args == call(tz=env.UTC)
def test_local_now(mock_now):
assert env.localnow() == mock_now.return_value.astimezone.return_value
assert mock_now.return_value.astimezone.call_args == call(env.TZ)
def test_date(mock_now):
assert env.date() == mock_now.return_value.date.return_value
def test_local_date():
with patch.object(env, 'localnow') as ln:
assert env.localdate() == ln.return_value.date.return_value
|
Add unit tests for env module.
|
Add unit tests for env module.
These are pretty simple - just tests wiring to datetime and pytz
|
Python
|
mit
|
dmotles/ebcf-alexa
|
Add unit tests for env module.
These are pretty simple - just tests wiring to datetime and pytz
|
from _ebcf_alexa import env
from unittest.mock import patch, call
import pytest
@pytest.yield_fixture
def mock_now():
with patch.object(env, 'now') as now:
yield now
@patch('datetime.datetime')
def test_now_is_utc(fake_datetime):
assert env.now()
assert fake_datetime.now.call_args == call(tz=env.UTC)
def test_local_now(mock_now):
assert env.localnow() == mock_now.return_value.astimezone.return_value
assert mock_now.return_value.astimezone.call_args == call(env.TZ)
def test_date(mock_now):
assert env.date() == mock_now.return_value.date.return_value
def test_local_date():
with patch.object(env, 'localnow') as ln:
assert env.localdate() == ln.return_value.date.return_value
|
<commit_before><commit_msg>Add unit tests for env module.
These are pretty simple - just tests wiring to datetime and pytz<commit_after>
|
from _ebcf_alexa import env
from unittest.mock import patch, call
import pytest
@pytest.yield_fixture
def mock_now():
with patch.object(env, 'now') as now:
yield now
@patch('datetime.datetime')
def test_now_is_utc(fake_datetime):
assert env.now()
assert fake_datetime.now.call_args == call(tz=env.UTC)
def test_local_now(mock_now):
assert env.localnow() == mock_now.return_value.astimezone.return_value
assert mock_now.return_value.astimezone.call_args == call(env.TZ)
def test_date(mock_now):
assert env.date() == mock_now.return_value.date.return_value
def test_local_date():
with patch.object(env, 'localnow') as ln:
assert env.localdate() == ln.return_value.date.return_value
|
Add unit tests for env module.
These are pretty simple - just tests wiring to datetime and pytzfrom _ebcf_alexa import env
from unittest.mock import patch, call
import pytest
@pytest.yield_fixture
def mock_now():
with patch.object(env, 'now') as now:
yield now
@patch('datetime.datetime')
def test_now_is_utc(fake_datetime):
assert env.now()
assert fake_datetime.now.call_args == call(tz=env.UTC)
def test_local_now(mock_now):
assert env.localnow() == mock_now.return_value.astimezone.return_value
assert mock_now.return_value.astimezone.call_args == call(env.TZ)
def test_date(mock_now):
assert env.date() == mock_now.return_value.date.return_value
def test_local_date():
with patch.object(env, 'localnow') as ln:
assert env.localdate() == ln.return_value.date.return_value
|
<commit_before><commit_msg>Add unit tests for env module.
These are pretty simple - just tests wiring to datetime and pytz<commit_after>from _ebcf_alexa import env
from unittest.mock import patch, call
import pytest
@pytest.yield_fixture
def mock_now():
with patch.object(env, 'now') as now:
yield now
@patch('datetime.datetime')
def test_now_is_utc(fake_datetime):
assert env.now()
assert fake_datetime.now.call_args == call(tz=env.UTC)
def test_local_now(mock_now):
assert env.localnow() == mock_now.return_value.astimezone.return_value
assert mock_now.return_value.astimezone.call_args == call(env.TZ)
def test_date(mock_now):
assert env.date() == mock_now.return_value.date.return_value
def test_local_date():
with patch.object(env, 'localnow') as ln:
assert env.localdate() == ln.return_value.date.return_value
|
|
fc9e9b4b9bdee1bd1f6b112c90772702cf60ad2d
|
test_converge.py
|
test_converge.py
|
#!/usr/bin/env python
import functools
import logging
import unittest
import converge
import converge.processes
from converge.framework import datastore
from converge.framework import scenario
def with_scenarios(TestCase):
loader = unittest.defaultTestLoader
def create_test_func(generic_test, params):
@functools.wraps(generic_test)
def test_func(testcase, *args, **kwargs):
for key, value in params.items():
setattr(testcase, key, value)
return generic_test(testcase, *args, **kwargs)
return test_func
for test_name in loader.getTestCaseNames(TestCase):
base_test = getattr(TestCase, test_name)
for scenario in getattr(TestCase, 'scenarios', []):
name, parameters = scenario
test_func = create_test_func(base_test, parameters)
setattr(TestCase, '%s(%s)' % (test_name, name), test_func)
delattr(TestCase, test_name)
TestCase.scenarios = None
return TestCase
@with_scenarios
class ScenarioTest(unittest.TestCase):
scenarios = [(name, {'name': name, 'path': path})
for name, path in scenario.list_all('scenarios')]
def setUp(self):
super(ScenarioTest, self).setUp()
self.procs = converge.processes.Processes()
def tearDown(self):
datastore.Datastore.clear_all()
super(ScenarioTest, self).tearDown()
def test_scenario(self):
runner = scenario.Scenario(self.name, self.path)
runner(self.procs.event_loop,
**converge.scenario_globals(self.procs, self))
if __name__ == '__main__':
#converge.setup_log(logging.root)
unittest.main()
|
Add a unittest-based test suite for scenarios
|
Add a unittest-based test suite for scenarios
This can be run directly using:
./test_converge.py
or (equivalently):
python -m unittest test_converge
or with a test runner such as nose:
nosetests test_converge
|
Python
|
apache-2.0
|
zaneb/heat-convergence-prototype
|
Add a unittest-based test suite for scenarios
This can be run directly using:
./test_converge.py
or (equivalently):
python -m unittest test_converge
or with a test runner such as nose:
nosetests test_converge
|
#!/usr/bin/env python
import functools
import logging
import unittest
import converge
import converge.processes
from converge.framework import datastore
from converge.framework import scenario
def with_scenarios(TestCase):
loader = unittest.defaultTestLoader
def create_test_func(generic_test, params):
@functools.wraps(generic_test)
def test_func(testcase, *args, **kwargs):
for key, value in params.items():
setattr(testcase, key, value)
return generic_test(testcase, *args, **kwargs)
return test_func
for test_name in loader.getTestCaseNames(TestCase):
base_test = getattr(TestCase, test_name)
for scenario in getattr(TestCase, 'scenarios', []):
name, parameters = scenario
test_func = create_test_func(base_test, parameters)
setattr(TestCase, '%s(%s)' % (test_name, name), test_func)
delattr(TestCase, test_name)
TestCase.scenarios = None
return TestCase
@with_scenarios
class ScenarioTest(unittest.TestCase):
scenarios = [(name, {'name': name, 'path': path})
for name, path in scenario.list_all('scenarios')]
def setUp(self):
super(ScenarioTest, self).setUp()
self.procs = converge.processes.Processes()
def tearDown(self):
datastore.Datastore.clear_all()
super(ScenarioTest, self).tearDown()
def test_scenario(self):
runner = scenario.Scenario(self.name, self.path)
runner(self.procs.event_loop,
**converge.scenario_globals(self.procs, self))
if __name__ == '__main__':
#converge.setup_log(logging.root)
unittest.main()
|
<commit_before><commit_msg>Add a unittest-based test suite for scenarios
This can be run directly using:
./test_converge.py
or (equivalently):
python -m unittest test_converge
or with a test runner such as nose:
nosetests test_converge<commit_after>
|
#!/usr/bin/env python
import functools
import logging
import unittest
import converge
import converge.processes
from converge.framework import datastore
from converge.framework import scenario
def with_scenarios(TestCase):
loader = unittest.defaultTestLoader
def create_test_func(generic_test, params):
@functools.wraps(generic_test)
def test_func(testcase, *args, **kwargs):
for key, value in params.items():
setattr(testcase, key, value)
return generic_test(testcase, *args, **kwargs)
return test_func
for test_name in loader.getTestCaseNames(TestCase):
base_test = getattr(TestCase, test_name)
for scenario in getattr(TestCase, 'scenarios', []):
name, parameters = scenario
test_func = create_test_func(base_test, parameters)
setattr(TestCase, '%s(%s)' % (test_name, name), test_func)
delattr(TestCase, test_name)
TestCase.scenarios = None
return TestCase
@with_scenarios
class ScenarioTest(unittest.TestCase):
scenarios = [(name, {'name': name, 'path': path})
for name, path in scenario.list_all('scenarios')]
def setUp(self):
super(ScenarioTest, self).setUp()
self.procs = converge.processes.Processes()
def tearDown(self):
datastore.Datastore.clear_all()
super(ScenarioTest, self).tearDown()
def test_scenario(self):
runner = scenario.Scenario(self.name, self.path)
runner(self.procs.event_loop,
**converge.scenario_globals(self.procs, self))
if __name__ == '__main__':
#converge.setup_log(logging.root)
unittest.main()
|
Add a unittest-based test suite for scenarios
This can be run directly using:
./test_converge.py
or (equivalently):
python -m unittest test_converge
or with a test runner such as nose:
nosetests test_converge#!/usr/bin/env python
import functools
import logging
import unittest
import converge
import converge.processes
from converge.framework import datastore
from converge.framework import scenario
def with_scenarios(TestCase):
loader = unittest.defaultTestLoader
def create_test_func(generic_test, params):
@functools.wraps(generic_test)
def test_func(testcase, *args, **kwargs):
for key, value in params.items():
setattr(testcase, key, value)
return generic_test(testcase, *args, **kwargs)
return test_func
for test_name in loader.getTestCaseNames(TestCase):
base_test = getattr(TestCase, test_name)
for scenario in getattr(TestCase, 'scenarios', []):
name, parameters = scenario
test_func = create_test_func(base_test, parameters)
setattr(TestCase, '%s(%s)' % (test_name, name), test_func)
delattr(TestCase, test_name)
TestCase.scenarios = None
return TestCase
@with_scenarios
class ScenarioTest(unittest.TestCase):
scenarios = [(name, {'name': name, 'path': path})
for name, path in scenario.list_all('scenarios')]
def setUp(self):
super(ScenarioTest, self).setUp()
self.procs = converge.processes.Processes()
def tearDown(self):
datastore.Datastore.clear_all()
super(ScenarioTest, self).tearDown()
def test_scenario(self):
runner = scenario.Scenario(self.name, self.path)
runner(self.procs.event_loop,
**converge.scenario_globals(self.procs, self))
if __name__ == '__main__':
#converge.setup_log(logging.root)
unittest.main()
|
<commit_before><commit_msg>Add a unittest-based test suite for scenarios
This can be run directly using:
./test_converge.py
or (equivalently):
python -m unittest test_converge
or with a test runner such as nose:
nosetests test_converge<commit_after>#!/usr/bin/env python
import functools
import logging
import unittest
import converge
import converge.processes
from converge.framework import datastore
from converge.framework import scenario
def with_scenarios(TestCase):
loader = unittest.defaultTestLoader
def create_test_func(generic_test, params):
@functools.wraps(generic_test)
def test_func(testcase, *args, **kwargs):
for key, value in params.items():
setattr(testcase, key, value)
return generic_test(testcase, *args, **kwargs)
return test_func
for test_name in loader.getTestCaseNames(TestCase):
base_test = getattr(TestCase, test_name)
for scenario in getattr(TestCase, 'scenarios', []):
name, parameters = scenario
test_func = create_test_func(base_test, parameters)
setattr(TestCase, '%s(%s)' % (test_name, name), test_func)
delattr(TestCase, test_name)
TestCase.scenarios = None
return TestCase
@with_scenarios
class ScenarioTest(unittest.TestCase):
scenarios = [(name, {'name': name, 'path': path})
for name, path in scenario.list_all('scenarios')]
def setUp(self):
super(ScenarioTest, self).setUp()
self.procs = converge.processes.Processes()
def tearDown(self):
datastore.Datastore.clear_all()
super(ScenarioTest, self).tearDown()
def test_scenario(self):
runner = scenario.Scenario(self.name, self.path)
runner(self.procs.event_loop,
**converge.scenario_globals(self.procs, self))
if __name__ == '__main__':
#converge.setup_log(logging.root)
unittest.main()
|
|
f65c6f3939c50326eea14bd0dadc77b7c9364dd2
|
gssapi/creds.py
|
gssapi/creds.py
|
from __future__ import absolute_import
from ctypes import cast, byref, c_char_p, c_void_p, string_at
from .gssapi_h import (
GSS_C_NO_CREDENTIAL, GSS_C_NO_NAME, GSS_C_INDEFINITE, GSS_C_NO_OID_SET, GSS_C_BOTH,
GSS_S_COMPLETE,
OM_uint32, gss_cred_id_t,
gss_init_sec_context, gss_accept_sec_context, gss_delete_sec_context, gss_release_buffer,
gss_release_cred, gss_release_name
)
from .error import GSSException, GSSMechException
from .names import MechName
class Credential(object):
"""Wraps a GSS credential handle (gss_cred_id_t)"""
def __init__(self, desired_name=GSS_C_NO_NAME, time_req=GSS_C_INDEFINITE,
desired_mechs=GSS_C_NO_OID_SET, cred_usage=GSS_C_BOTH):
super(Credential, self).__init__()
|
Add a module to deal with credentials
|
Add a module to deal with credentials
|
Python
|
mit
|
sigmaris/python-gssapi,sigmaris/python-gssapi,sigmaris/python-gssapi,sigmaris/python-gssapi
|
Add a module to deal with credentials
|
from __future__ import absolute_import
from ctypes import cast, byref, c_char_p, c_void_p, string_at
from .gssapi_h import (
GSS_C_NO_CREDENTIAL, GSS_C_NO_NAME, GSS_C_INDEFINITE, GSS_C_NO_OID_SET, GSS_C_BOTH,
GSS_S_COMPLETE,
OM_uint32, gss_cred_id_t,
gss_init_sec_context, gss_accept_sec_context, gss_delete_sec_context, gss_release_buffer,
gss_release_cred, gss_release_name
)
from .error import GSSException, GSSMechException
from .names import MechName
class Credential(object):
"""Wraps a GSS credential handle (gss_cred_id_t)"""
def __init__(self, desired_name=GSS_C_NO_NAME, time_req=GSS_C_INDEFINITE,
desired_mechs=GSS_C_NO_OID_SET, cred_usage=GSS_C_BOTH):
super(Credential, self).__init__()
|
<commit_before><commit_msg>Add a module to deal with credentials<commit_after>
|
from __future__ import absolute_import
from ctypes import cast, byref, c_char_p, c_void_p, string_at
from .gssapi_h import (
GSS_C_NO_CREDENTIAL, GSS_C_NO_NAME, GSS_C_INDEFINITE, GSS_C_NO_OID_SET, GSS_C_BOTH,
GSS_S_COMPLETE,
OM_uint32, gss_cred_id_t,
gss_init_sec_context, gss_accept_sec_context, gss_delete_sec_context, gss_release_buffer,
gss_release_cred, gss_release_name
)
from .error import GSSException, GSSMechException
from .names import MechName
class Credential(object):
"""Wraps a GSS credential handle (gss_cred_id_t)"""
def __init__(self, desired_name=GSS_C_NO_NAME, time_req=GSS_C_INDEFINITE,
desired_mechs=GSS_C_NO_OID_SET, cred_usage=GSS_C_BOTH):
super(Credential, self).__init__()
|
Add a module to deal with credentialsfrom __future__ import absolute_import
from ctypes import cast, byref, c_char_p, c_void_p, string_at
from .gssapi_h import (
GSS_C_NO_CREDENTIAL, GSS_C_NO_NAME, GSS_C_INDEFINITE, GSS_C_NO_OID_SET, GSS_C_BOTH,
GSS_S_COMPLETE,
OM_uint32, gss_cred_id_t,
gss_init_sec_context, gss_accept_sec_context, gss_delete_sec_context, gss_release_buffer,
gss_release_cred, gss_release_name
)
from .error import GSSException, GSSMechException
from .names import MechName
class Credential(object):
"""Wraps a GSS credential handle (gss_cred_id_t)"""
def __init__(self, desired_name=GSS_C_NO_NAME, time_req=GSS_C_INDEFINITE,
desired_mechs=GSS_C_NO_OID_SET, cred_usage=GSS_C_BOTH):
super(Credential, self).__init__()
|
<commit_before><commit_msg>Add a module to deal with credentials<commit_after>from __future__ import absolute_import
from ctypes import cast, byref, c_char_p, c_void_p, string_at
from .gssapi_h import (
GSS_C_NO_CREDENTIAL, GSS_C_NO_NAME, GSS_C_INDEFINITE, GSS_C_NO_OID_SET, GSS_C_BOTH,
GSS_S_COMPLETE,
OM_uint32, gss_cred_id_t,
gss_init_sec_context, gss_accept_sec_context, gss_delete_sec_context, gss_release_buffer,
gss_release_cred, gss_release_name
)
from .error import GSSException, GSSMechException
from .names import MechName
class Credential(object):
"""Wraps a GSS credential handle (gss_cred_id_t)"""
def __init__(self, desired_name=GSS_C_NO_NAME, time_req=GSS_C_INDEFINITE,
desired_mechs=GSS_C_NO_OID_SET, cred_usage=GSS_C_BOTH):
super(Credential, self).__init__()
|
|
8e8c14446a0089ee7fa57cfd5520c7d6d6e2711e
|
usercustomize.py
|
usercustomize.py
|
""" Customize Python Interpreter.
Link your user customizing file to this file.
For more info see: https://docs.python.org/3/library/site.html
"Default value is ~/.local/lib/pythonX.Y/site-packages for UNIX and
non-framework Mac OS X builds, ~/Library/Python/X.Y/lib/python/site-packages
for Mac framework builds, and %APPDATA%\Python\PythonXY\site-packages on
Windows."
Sun May 4 18:06:08 CST 2014
"""
import cgitb
cgitb.enable(format='text')
|
Add Python user customization file.
|
Add Python user customization file.
|
Python
|
mit
|
fossilet/dotfiles,fossilet/dotfiles,fossilet/dotfiles
|
Add Python user customization file.
|
""" Customize Python Interpreter.
Link your user customizing file to this file.
For more info see: https://docs.python.org/3/library/site.html
"Default value is ~/.local/lib/pythonX.Y/site-packages for UNIX and
non-framework Mac OS X builds, ~/Library/Python/X.Y/lib/python/site-packages
for Mac framework builds, and %APPDATA%\Python\PythonXY\site-packages on
Windows."
Sun May 4 18:06:08 CST 2014
"""
import cgitb
cgitb.enable(format='text')
|
<commit_before><commit_msg>Add Python user customization file.<commit_after>
|
""" Customize Python Interpreter.
Link your user customizing file to this file.
For more info see: https://docs.python.org/3/library/site.html
"Default value is ~/.local/lib/pythonX.Y/site-packages for UNIX and
non-framework Mac OS X builds, ~/Library/Python/X.Y/lib/python/site-packages
for Mac framework builds, and %APPDATA%\Python\PythonXY\site-packages on
Windows."
Sun May 4 18:06:08 CST 2014
"""
import cgitb
cgitb.enable(format='text')
|
Add Python user customization file.""" Customize Python Interpreter.
Link your user customizing file to this file.
For more info see: https://docs.python.org/3/library/site.html
"Default value is ~/.local/lib/pythonX.Y/site-packages for UNIX and
non-framework Mac OS X builds, ~/Library/Python/X.Y/lib/python/site-packages
for Mac framework builds, and %APPDATA%\Python\PythonXY\site-packages on
Windows."
Sun May 4 18:06:08 CST 2014
"""
import cgitb
cgitb.enable(format='text')
|
<commit_before><commit_msg>Add Python user customization file.<commit_after>""" Customize Python Interpreter.
Link your user customizing file to this file.
For more info see: https://docs.python.org/3/library/site.html
"Default value is ~/.local/lib/pythonX.Y/site-packages for UNIX and
non-framework Mac OS X builds, ~/Library/Python/X.Y/lib/python/site-packages
for Mac framework builds, and %APPDATA%\Python\PythonXY\site-packages on
Windows."
Sun May 4 18:06:08 CST 2014
"""
import cgitb
cgitb.enable(format='text')
|
|
13be4749aef2415ab84ffbd090c5b24d8ed98af5
|
tests/TestBloArticle.py
|
tests/TestBloArticle.py
|
import unittest
from blo.BloArticle import BloArticle
class TestBloArticle(unittest.TestCase):
def setUp(self):
self.blo_article = BloArticle()
def test_failed_load_from_file(self):
file_path = ""
with self.assertRaises(FileNotFoundError):
self.blo_article.load_from_file(file_path)
def test_success_load_from_file(self):
file_path = "./test_article_1.md"
self.assertIsNone(self.blo_article.load_from_file(file_path))
self.assertFalse(self.blo_article._raw_text == "")
def test_convert_to_html(self):
pass
def test_get_digest(self):
pass
|
Add test case of BloArticle class
|
Add test case of BloArticle class
Implement basic test setup, test case of load_from_file method.
|
Python
|
mit
|
10nin/blo,10nin/blo
|
Add test case of BloArticle class
Implement basic test setup, test case of load_from_file method.
|
import unittest
from blo.BloArticle import BloArticle
class TestBloArticle(unittest.TestCase):
def setUp(self):
self.blo_article = BloArticle()
def test_failed_load_from_file(self):
file_path = ""
with self.assertRaises(FileNotFoundError):
self.blo_article.load_from_file(file_path)
def test_success_load_from_file(self):
file_path = "./test_article_1.md"
self.assertIsNone(self.blo_article.load_from_file(file_path))
self.assertFalse(self.blo_article._raw_text == "")
def test_convert_to_html(self):
pass
def test_get_digest(self):
pass
|
<commit_before><commit_msg>Add test case of BloArticle class
Implement basic test setup, test case of load_from_file method.<commit_after>
|
import unittest
from blo.BloArticle import BloArticle
class TestBloArticle(unittest.TestCase):
def setUp(self):
self.blo_article = BloArticle()
def test_failed_load_from_file(self):
file_path = ""
with self.assertRaises(FileNotFoundError):
self.blo_article.load_from_file(file_path)
def test_success_load_from_file(self):
file_path = "./test_article_1.md"
self.assertIsNone(self.blo_article.load_from_file(file_path))
self.assertFalse(self.blo_article._raw_text == "")
def test_convert_to_html(self):
pass
def test_get_digest(self):
pass
|
Add test case of BloArticle class
Implement basic test setup, test case of load_from_file method.import unittest
from blo.BloArticle import BloArticle
class TestBloArticle(unittest.TestCase):
def setUp(self):
self.blo_article = BloArticle()
def test_failed_load_from_file(self):
file_path = ""
with self.assertRaises(FileNotFoundError):
self.blo_article.load_from_file(file_path)
def test_success_load_from_file(self):
file_path = "./test_article_1.md"
self.assertIsNone(self.blo_article.load_from_file(file_path))
self.assertFalse(self.blo_article._raw_text == "")
def test_convert_to_html(self):
pass
def test_get_digest(self):
pass
|
<commit_before><commit_msg>Add test case of BloArticle class
Implement basic test setup, test case of load_from_file method.<commit_after>import unittest
from blo.BloArticle import BloArticle
class TestBloArticle(unittest.TestCase):
def setUp(self):
self.blo_article = BloArticle()
def test_failed_load_from_file(self):
file_path = ""
with self.assertRaises(FileNotFoundError):
self.blo_article.load_from_file(file_path)
def test_success_load_from_file(self):
file_path = "./test_article_1.md"
self.assertIsNone(self.blo_article.load_from_file(file_path))
self.assertFalse(self.blo_article._raw_text == "")
def test_convert_to_html(self):
pass
def test_get_digest(self):
pass
|
|
4442fabf9292efa44a82f420e2d3e807d7d15b04
|
tests/test_cli.py
|
tests/test_cli.py
|
from click.testing import CliRunner
from tinydb import TinyDB, where
from tinydb.storages import MemoryStorage
import pytest
try:
import mock
except ImportError:
from unittest import mock
from passpie import cli
@pytest.fixture
def mock_db(mocker):
credentials = [
{'login': 'foo', 'name': 'bar', 'fullname': 'foo@bar',
'password': '', 'comment': ''},
{'login': 'foa', 'name': 'bazzy', 'fullname': 'foa@bazzy',
'password': '', 'comment': ''},
{'login': 'spam', 'name': 'egg', 'fullname': 'spam@egg',
'password': '', 'comment': ''},
]
database = TinyDB(storage=MemoryStorage)
database.insert_multiple(credentials)
MockDB = mock.MagicMock(return_value=database)
mocker.patch('passpie.cli.Database', MockDB)
return database
def test_cli_search_find_results_by_login_regex(mock_db):
runner = CliRunner()
result = runner.invoke(cli.search, ['fo[oa]'])
assert result.exit_code == 0
assert 'foo' in result.output
assert 'foa' in result.output
assert 'spam' not in result.output
def test_cli_remove_delete_credential_found_by_database(mock_db):
runner = CliRunner()
result = runner.invoke(cli.remove, ['foo@bar'], input='y')
result_print = runner.invoke(cli.cli)
assert result.exit_code == 0
assert 'foo' not in result_print.output
def test_cli_add_credential_to_database(mock_db):
fullname = 'test_user@example'
runner = CliRunner()
result = runner.invoke(cli.add, [fullname, '--random'])
assert result.exit_code == 0
assert mock_db.get(where('fullname') == fullname)
def test_cli_copy_credential_password_to_database(mocker, mock_db):
fullname = 'foo@bar'
password = 's3cr3t'
mocker.patch('passpie.cli.ensure_passphrase')
mock_pyperclip = mocker.patch('passpie.cli.pyperclip')
mocker.patch('passpie.cli.Cryptor.decrypt',
mock.Mock(return_value=password))
runner = CliRunner()
result = runner.invoke(cli.copy, [fullname], input='passphrase')
assert result.exit_code == 0
assert mock_pyperclip.copy.called
mock_pyperclip.copy.assert_called_once_with(password)
|
Add more tests to cli
|
Add more tests to cli
|
Python
|
mit
|
eiginn/passpie,eiginn/passpie,scorphus/passpie,scorphus/passpie,marcwebbie/passpie,marcwebbie/passpie
|
Add more tests to cli
|
from click.testing import CliRunner
from tinydb import TinyDB, where
from tinydb.storages import MemoryStorage
import pytest
try:
import mock
except ImportError:
from unittest import mock
from passpie import cli
@pytest.fixture
def mock_db(mocker):
credentials = [
{'login': 'foo', 'name': 'bar', 'fullname': 'foo@bar',
'password': '', 'comment': ''},
{'login': 'foa', 'name': 'bazzy', 'fullname': 'foa@bazzy',
'password': '', 'comment': ''},
{'login': 'spam', 'name': 'egg', 'fullname': 'spam@egg',
'password': '', 'comment': ''},
]
database = TinyDB(storage=MemoryStorage)
database.insert_multiple(credentials)
MockDB = mock.MagicMock(return_value=database)
mocker.patch('passpie.cli.Database', MockDB)
return database
def test_cli_search_find_results_by_login_regex(mock_db):
runner = CliRunner()
result = runner.invoke(cli.search, ['fo[oa]'])
assert result.exit_code == 0
assert 'foo' in result.output
assert 'foa' in result.output
assert 'spam' not in result.output
def test_cli_remove_delete_credential_found_by_database(mock_db):
runner = CliRunner()
result = runner.invoke(cli.remove, ['foo@bar'], input='y')
result_print = runner.invoke(cli.cli)
assert result.exit_code == 0
assert 'foo' not in result_print.output
def test_cli_add_credential_to_database(mock_db):
fullname = 'test_user@example'
runner = CliRunner()
result = runner.invoke(cli.add, [fullname, '--random'])
assert result.exit_code == 0
assert mock_db.get(where('fullname') == fullname)
def test_cli_copy_credential_password_to_database(mocker, mock_db):
fullname = 'foo@bar'
password = 's3cr3t'
mocker.patch('passpie.cli.ensure_passphrase')
mock_pyperclip = mocker.patch('passpie.cli.pyperclip')
mocker.patch('passpie.cli.Cryptor.decrypt',
mock.Mock(return_value=password))
runner = CliRunner()
result = runner.invoke(cli.copy, [fullname], input='passphrase')
assert result.exit_code == 0
assert mock_pyperclip.copy.called
mock_pyperclip.copy.assert_called_once_with(password)
|
<commit_before><commit_msg>Add more tests to cli<commit_after>
|
from click.testing import CliRunner
from tinydb import TinyDB, where
from tinydb.storages import MemoryStorage
import pytest
try:
import mock
except ImportError:
from unittest import mock
from passpie import cli
@pytest.fixture
def mock_db(mocker):
credentials = [
{'login': 'foo', 'name': 'bar', 'fullname': 'foo@bar',
'password': '', 'comment': ''},
{'login': 'foa', 'name': 'bazzy', 'fullname': 'foa@bazzy',
'password': '', 'comment': ''},
{'login': 'spam', 'name': 'egg', 'fullname': 'spam@egg',
'password': '', 'comment': ''},
]
database = TinyDB(storage=MemoryStorage)
database.insert_multiple(credentials)
MockDB = mock.MagicMock(return_value=database)
mocker.patch('passpie.cli.Database', MockDB)
return database
def test_cli_search_find_results_by_login_regex(mock_db):
runner = CliRunner()
result = runner.invoke(cli.search, ['fo[oa]'])
assert result.exit_code == 0
assert 'foo' in result.output
assert 'foa' in result.output
assert 'spam' not in result.output
def test_cli_remove_delete_credential_found_by_database(mock_db):
runner = CliRunner()
result = runner.invoke(cli.remove, ['foo@bar'], input='y')
result_print = runner.invoke(cli.cli)
assert result.exit_code == 0
assert 'foo' not in result_print.output
def test_cli_add_credential_to_database(mock_db):
fullname = 'test_user@example'
runner = CliRunner()
result = runner.invoke(cli.add, [fullname, '--random'])
assert result.exit_code == 0
assert mock_db.get(where('fullname') == fullname)
def test_cli_copy_credential_password_to_database(mocker, mock_db):
fullname = 'foo@bar'
password = 's3cr3t'
mocker.patch('passpie.cli.ensure_passphrase')
mock_pyperclip = mocker.patch('passpie.cli.pyperclip')
mocker.patch('passpie.cli.Cryptor.decrypt',
mock.Mock(return_value=password))
runner = CliRunner()
result = runner.invoke(cli.copy, [fullname], input='passphrase')
assert result.exit_code == 0
assert mock_pyperclip.copy.called
mock_pyperclip.copy.assert_called_once_with(password)
|
Add more tests to clifrom click.testing import CliRunner
from tinydb import TinyDB, where
from tinydb.storages import MemoryStorage
import pytest
try:
import mock
except ImportError:
from unittest import mock
from passpie import cli
@pytest.fixture
def mock_db(mocker):
credentials = [
{'login': 'foo', 'name': 'bar', 'fullname': 'foo@bar',
'password': '', 'comment': ''},
{'login': 'foa', 'name': 'bazzy', 'fullname': 'foa@bazzy',
'password': '', 'comment': ''},
{'login': 'spam', 'name': 'egg', 'fullname': 'spam@egg',
'password': '', 'comment': ''},
]
database = TinyDB(storage=MemoryStorage)
database.insert_multiple(credentials)
MockDB = mock.MagicMock(return_value=database)
mocker.patch('passpie.cli.Database', MockDB)
return database
def test_cli_search_find_results_by_login_regex(mock_db):
runner = CliRunner()
result = runner.invoke(cli.search, ['fo[oa]'])
assert result.exit_code == 0
assert 'foo' in result.output
assert 'foa' in result.output
assert 'spam' not in result.output
def test_cli_remove_delete_credential_found_by_database(mock_db):
runner = CliRunner()
result = runner.invoke(cli.remove, ['foo@bar'], input='y')
result_print = runner.invoke(cli.cli)
assert result.exit_code == 0
assert 'foo' not in result_print.output
def test_cli_add_credential_to_database(mock_db):
fullname = 'test_user@example'
runner = CliRunner()
result = runner.invoke(cli.add, [fullname, '--random'])
assert result.exit_code == 0
assert mock_db.get(where('fullname') == fullname)
def test_cli_copy_credential_password_to_database(mocker, mock_db):
fullname = 'foo@bar'
password = 's3cr3t'
mocker.patch('passpie.cli.ensure_passphrase')
mock_pyperclip = mocker.patch('passpie.cli.pyperclip')
mocker.patch('passpie.cli.Cryptor.decrypt',
mock.Mock(return_value=password))
runner = CliRunner()
result = runner.invoke(cli.copy, [fullname], input='passphrase')
assert result.exit_code == 0
assert mock_pyperclip.copy.called
mock_pyperclip.copy.assert_called_once_with(password)
|
<commit_before><commit_msg>Add more tests to cli<commit_after>from click.testing import CliRunner
from tinydb import TinyDB, where
from tinydb.storages import MemoryStorage
import pytest
try:
import mock
except ImportError:
from unittest import mock
from passpie import cli
@pytest.fixture
def mock_db(mocker):
credentials = [
{'login': 'foo', 'name': 'bar', 'fullname': 'foo@bar',
'password': '', 'comment': ''},
{'login': 'foa', 'name': 'bazzy', 'fullname': 'foa@bazzy',
'password': '', 'comment': ''},
{'login': 'spam', 'name': 'egg', 'fullname': 'spam@egg',
'password': '', 'comment': ''},
]
database = TinyDB(storage=MemoryStorage)
database.insert_multiple(credentials)
MockDB = mock.MagicMock(return_value=database)
mocker.patch('passpie.cli.Database', MockDB)
return database
def test_cli_search_find_results_by_login_regex(mock_db):
runner = CliRunner()
result = runner.invoke(cli.search, ['fo[oa]'])
assert result.exit_code == 0
assert 'foo' in result.output
assert 'foa' in result.output
assert 'spam' not in result.output
def test_cli_remove_delete_credential_found_by_database(mock_db):
runner = CliRunner()
result = runner.invoke(cli.remove, ['foo@bar'], input='y')
result_print = runner.invoke(cli.cli)
assert result.exit_code == 0
assert 'foo' not in result_print.output
def test_cli_add_credential_to_database(mock_db):
fullname = 'test_user@example'
runner = CliRunner()
result = runner.invoke(cli.add, [fullname, '--random'])
assert result.exit_code == 0
assert mock_db.get(where('fullname') == fullname)
def test_cli_copy_credential_password_to_database(mocker, mock_db):
fullname = 'foo@bar'
password = 's3cr3t'
mocker.patch('passpie.cli.ensure_passphrase')
mock_pyperclip = mocker.patch('passpie.cli.pyperclip')
mocker.patch('passpie.cli.Cryptor.decrypt',
mock.Mock(return_value=password))
runner = CliRunner()
result = runner.invoke(cli.copy, [fullname], input='passphrase')
assert result.exit_code == 0
assert mock_pyperclip.copy.called
mock_pyperclip.copy.assert_called_once_with(password)
|
|
436719050ada475d840004a49c693d08c3f92034
|
greatbigcrane/project/widgets.py
|
greatbigcrane/project/widgets.py
|
from django.forms.widgets import Textarea
from django.utils.safestring import mark_safe
class LineEditorWidget(Textarea):
class Media:
js = ('js/jquery-1.4.2.min.js' ,'js/jquery.lineeditor.js')
def render(self, name, value, attrs=None):
if isinstance(value,list):
value = "\n".join(value)
rendered = super(LineEditorWidget, self).render(name, value, attrs)
return rendered + mark_safe(u'''<script type="text/javascript" defer="defer">
$(function() {
$("#id_%s").lineeditor();
}
);
</script>''' % (name))
|
Add a widget for line editors.
|
Add a widget for line editors.
|
Python
|
apache-2.0
|
pnomolos/greatbigcrane,pnomolos/greatbigcrane
|
Add a widget for line editors.
|
from django.forms.widgets import Textarea
from django.utils.safestring import mark_safe
class LineEditorWidget(Textarea):
class Media:
js = ('js/jquery-1.4.2.min.js' ,'js/jquery.lineeditor.js')
def render(self, name, value, attrs=None):
if isinstance(value,list):
value = "\n".join(value)
rendered = super(LineEditorWidget, self).render(name, value, attrs)
return rendered + mark_safe(u'''<script type="text/javascript" defer="defer">
$(function() {
$("#id_%s").lineeditor();
}
);
</script>''' % (name))
|
<commit_before><commit_msg>Add a widget for line editors.<commit_after>
|
from django.forms.widgets import Textarea
from django.utils.safestring import mark_safe
class LineEditorWidget(Textarea):
class Media:
js = ('js/jquery-1.4.2.min.js' ,'js/jquery.lineeditor.js')
def render(self, name, value, attrs=None):
if isinstance(value,list):
value = "\n".join(value)
rendered = super(LineEditorWidget, self).render(name, value, attrs)
return rendered + mark_safe(u'''<script type="text/javascript" defer="defer">
$(function() {
$("#id_%s").lineeditor();
}
);
</script>''' % (name))
|
Add a widget for line editors.from django.forms.widgets import Textarea
from django.utils.safestring import mark_safe
class LineEditorWidget(Textarea):
class Media:
js = ('js/jquery-1.4.2.min.js' ,'js/jquery.lineeditor.js')
def render(self, name, value, attrs=None):
if isinstance(value,list):
value = "\n".join(value)
rendered = super(LineEditorWidget, self).render(name, value, attrs)
return rendered + mark_safe(u'''<script type="text/javascript" defer="defer">
$(function() {
$("#id_%s").lineeditor();
}
);
</script>''' % (name))
|
<commit_before><commit_msg>Add a widget for line editors.<commit_after>from django.forms.widgets import Textarea
from django.utils.safestring import mark_safe
class LineEditorWidget(Textarea):
class Media:
js = ('js/jquery-1.4.2.min.js' ,'js/jquery.lineeditor.js')
def render(self, name, value, attrs=None):
if isinstance(value,list):
value = "\n".join(value)
rendered = super(LineEditorWidget, self).render(name, value, attrs)
return rendered + mark_safe(u'''<script type="text/javascript" defer="defer">
$(function() {
$("#id_%s").lineeditor();
}
);
</script>''' % (name))
|
|
6f9d02510ad861bf8ae5ad8f1ae335a4e565756d
|
tests/test_io.py
|
tests/test_io.py
|
from unittest.mock import MagicMock, patch
import pytest
from isort import io
class TestFile:
def test_read(self, tmpdir):
test_file_content = """# -*- encoding: ascii -*-
import ☺
"""
test_file = tmpdir.join("file.py")
test_file.write(test_file_content)
# able to read file even with incorrect encoding, if it is UTF-8 compatible
assert io.File.read(test_file).contents == test_file_content
# unless the locale is also ASCII
with pytest.raises(io.UnableToDetermineEncoding):
with patch("locale.getpreferredencoding", lambda value: "ascii"):
io.File.read(test_file).contents
|
Add initial unit tests for io module
|
Add initial unit tests for io module
|
Python
|
mit
|
PyCQA/isort,PyCQA/isort
|
Add initial unit tests for io module
|
from unittest.mock import MagicMock, patch
import pytest
from isort import io
class TestFile:
def test_read(self, tmpdir):
test_file_content = """# -*- encoding: ascii -*-
import ☺
"""
test_file = tmpdir.join("file.py")
test_file.write(test_file_content)
# able to read file even with incorrect encoding, if it is UTF-8 compatible
assert io.File.read(test_file).contents == test_file_content
# unless the locale is also ASCII
with pytest.raises(io.UnableToDetermineEncoding):
with patch("locale.getpreferredencoding", lambda value: "ascii"):
io.File.read(test_file).contents
|
<commit_before><commit_msg>Add initial unit tests for io module<commit_after>
|
from unittest.mock import MagicMock, patch
import pytest
from isort import io
class TestFile:
def test_read(self, tmpdir):
test_file_content = """# -*- encoding: ascii -*-
import ☺
"""
test_file = tmpdir.join("file.py")
test_file.write(test_file_content)
# able to read file even with incorrect encoding, if it is UTF-8 compatible
assert io.File.read(test_file).contents == test_file_content
# unless the locale is also ASCII
with pytest.raises(io.UnableToDetermineEncoding):
with patch("locale.getpreferredencoding", lambda value: "ascii"):
io.File.read(test_file).contents
|
Add initial unit tests for io modulefrom unittest.mock import MagicMock, patch
import pytest
from isort import io
class TestFile:
def test_read(self, tmpdir):
test_file_content = """# -*- encoding: ascii -*-
import ☺
"""
test_file = tmpdir.join("file.py")
test_file.write(test_file_content)
# able to read file even with incorrect encoding, if it is UTF-8 compatible
assert io.File.read(test_file).contents == test_file_content
# unless the locale is also ASCII
with pytest.raises(io.UnableToDetermineEncoding):
with patch("locale.getpreferredencoding", lambda value: "ascii"):
io.File.read(test_file).contents
|
<commit_before><commit_msg>Add initial unit tests for io module<commit_after>from unittest.mock import MagicMock, patch
import pytest
from isort import io
class TestFile:
def test_read(self, tmpdir):
test_file_content = """# -*- encoding: ascii -*-
import ☺
"""
test_file = tmpdir.join("file.py")
test_file.write(test_file_content)
# able to read file even with incorrect encoding, if it is UTF-8 compatible
assert io.File.read(test_file).contents == test_file_content
# unless the locale is also ASCII
with pytest.raises(io.UnableToDetermineEncoding):
with patch("locale.getpreferredencoding", lambda value: "ascii"):
io.File.read(test_file).contents
|
|
0f32a1e193a0064e5d5313cdc205d15cea71f1e7
|
tests/graphics/hipposcalability.py
|
tests/graphics/hipposcalability.py
|
import hippo
import gtk
import gobject
from sugar.graphics.icon import CanvasIcon
from sugar.graphics.roundbox import CanvasRoundBox
import common
test = common.Test()
canvas = hippo.Canvas()
test.pack_start(canvas)
canvas.show()
scrollbars = hippo.CanvasScrollbars()
canvas.set_root(scrollbars)
box = hippo.CanvasBox(padding=10, spacing=10)
scrollbars.set_root(box)
def idle_cb():
global countdown
for i in range(0, 100):
entry = CanvasRoundBox(padding=10, spacing=10)
for j in range(0, 3):
icon = CanvasIcon(icon_name='go-left')
entry.append(icon)
for j in range(0, 2):
text = hippo.CanvasText(text='Text %s %s' % (countdown, j))
entry.append(text)
box.append(entry)
countdown -= 1
return countdown > 0
countdown = 1000
gobject.idle_add(idle_cb)
test.show()
if __name__ == "__main__":
common.main(test)
|
Test for a long hippo scrolling view.
|
Test for a long hippo scrolling view.
|
Python
|
lgpl-2.1
|
sugarlabs/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,tchx84/debian-pkg-sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,sugarlabs/sugar-toolkit,samdroid-apps/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,sugarlabs/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,tchx84/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,gusDuarte/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,sugarlabs/sugar-toolkit,gusDuarte/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,manuq/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,ceibal-tatu/sugar-toolkit,i5o/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,puneetgkaur/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,i5o/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3
|
Test for a long hippo scrolling view.
|
import hippo
import gtk
import gobject
from sugar.graphics.icon import CanvasIcon
from sugar.graphics.roundbox import CanvasRoundBox
import common
test = common.Test()
canvas = hippo.Canvas()
test.pack_start(canvas)
canvas.show()
scrollbars = hippo.CanvasScrollbars()
canvas.set_root(scrollbars)
box = hippo.CanvasBox(padding=10, spacing=10)
scrollbars.set_root(box)
def idle_cb():
global countdown
for i in range(0, 100):
entry = CanvasRoundBox(padding=10, spacing=10)
for j in range(0, 3):
icon = CanvasIcon(icon_name='go-left')
entry.append(icon)
for j in range(0, 2):
text = hippo.CanvasText(text='Text %s %s' % (countdown, j))
entry.append(text)
box.append(entry)
countdown -= 1
return countdown > 0
countdown = 1000
gobject.idle_add(idle_cb)
test.show()
if __name__ == "__main__":
common.main(test)
|
<commit_before><commit_msg>Test for a long hippo scrolling view.<commit_after>
|
import hippo
import gtk
import gobject
from sugar.graphics.icon import CanvasIcon
from sugar.graphics.roundbox import CanvasRoundBox
import common
test = common.Test()
canvas = hippo.Canvas()
test.pack_start(canvas)
canvas.show()
scrollbars = hippo.CanvasScrollbars()
canvas.set_root(scrollbars)
box = hippo.CanvasBox(padding=10, spacing=10)
scrollbars.set_root(box)
def idle_cb():
global countdown
for i in range(0, 100):
entry = CanvasRoundBox(padding=10, spacing=10)
for j in range(0, 3):
icon = CanvasIcon(icon_name='go-left')
entry.append(icon)
for j in range(0, 2):
text = hippo.CanvasText(text='Text %s %s' % (countdown, j))
entry.append(text)
box.append(entry)
countdown -= 1
return countdown > 0
countdown = 1000
gobject.idle_add(idle_cb)
test.show()
if __name__ == "__main__":
common.main(test)
|
Test for a long hippo scrolling view.import hippo
import gtk
import gobject
from sugar.graphics.icon import CanvasIcon
from sugar.graphics.roundbox import CanvasRoundBox
import common
test = common.Test()
canvas = hippo.Canvas()
test.pack_start(canvas)
canvas.show()
scrollbars = hippo.CanvasScrollbars()
canvas.set_root(scrollbars)
box = hippo.CanvasBox(padding=10, spacing=10)
scrollbars.set_root(box)
def idle_cb():
global countdown
for i in range(0, 100):
entry = CanvasRoundBox(padding=10, spacing=10)
for j in range(0, 3):
icon = CanvasIcon(icon_name='go-left')
entry.append(icon)
for j in range(0, 2):
text = hippo.CanvasText(text='Text %s %s' % (countdown, j))
entry.append(text)
box.append(entry)
countdown -= 1
return countdown > 0
countdown = 1000
gobject.idle_add(idle_cb)
test.show()
if __name__ == "__main__":
common.main(test)
|
<commit_before><commit_msg>Test for a long hippo scrolling view.<commit_after>import hippo
import gtk
import gobject
from sugar.graphics.icon import CanvasIcon
from sugar.graphics.roundbox import CanvasRoundBox
import common
test = common.Test()
canvas = hippo.Canvas()
test.pack_start(canvas)
canvas.show()
scrollbars = hippo.CanvasScrollbars()
canvas.set_root(scrollbars)
box = hippo.CanvasBox(padding=10, spacing=10)
scrollbars.set_root(box)
def idle_cb():
global countdown
for i in range(0, 100):
entry = CanvasRoundBox(padding=10, spacing=10)
for j in range(0, 3):
icon = CanvasIcon(icon_name='go-left')
entry.append(icon)
for j in range(0, 2):
text = hippo.CanvasText(text='Text %s %s' % (countdown, j))
entry.append(text)
box.append(entry)
countdown -= 1
return countdown > 0
countdown = 1000
gobject.idle_add(idle_cb)
test.show()
if __name__ == "__main__":
common.main(test)
|
|
288a59cfeade739260a1f76cf632d735677022be
|
src/test_scores_db.py
|
src/test_scores_db.py
|
import scores_db
import mock
import redis_client
import control
from twisted.internet import defer
def test_set_scores():
fake_connection = mock.Mock()
fake_connection.set = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
scores_db.scores.set_match_score(1, 'ABC', 12)
fake_connection.set.assert_called_once_with('comp:scores:1:ABC:game', 12)
|
Add the start of some test for the scoring stuff.
|
Add the start of some test for the scoring stuff.
Change-Id: I7eb7b9176eb9a259d8fac0a99610f4bb3b938a51
|
Python
|
mit
|
prophile/compd,prophile/compd
|
Add the start of some test for the scoring stuff.
Change-Id: I7eb7b9176eb9a259d8fac0a99610f4bb3b938a51
|
import scores_db
import mock
import redis_client
import control
from twisted.internet import defer
def test_set_scores():
fake_connection = mock.Mock()
fake_connection.set = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
scores_db.scores.set_match_score(1, 'ABC', 12)
fake_connection.set.assert_called_once_with('comp:scores:1:ABC:game', 12)
|
<commit_before><commit_msg>Add the start of some test for the scoring stuff.
Change-Id: I7eb7b9176eb9a259d8fac0a99610f4bb3b938a51<commit_after>
|
import scores_db
import mock
import redis_client
import control
from twisted.internet import defer
def test_set_scores():
fake_connection = mock.Mock()
fake_connection.set = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
scores_db.scores.set_match_score(1, 'ABC', 12)
fake_connection.set.assert_called_once_with('comp:scores:1:ABC:game', 12)
|
Add the start of some test for the scoring stuff.
Change-Id: I7eb7b9176eb9a259d8fac0a99610f4bb3b938a51import scores_db
import mock
import redis_client
import control
from twisted.internet import defer
def test_set_scores():
fake_connection = mock.Mock()
fake_connection.set = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
scores_db.scores.set_match_score(1, 'ABC', 12)
fake_connection.set.assert_called_once_with('comp:scores:1:ABC:game', 12)
|
<commit_before><commit_msg>Add the start of some test for the scoring stuff.
Change-Id: I7eb7b9176eb9a259d8fac0a99610f4bb3b938a51<commit_after>import scores_db
import mock
import redis_client
import control
from twisted.internet import defer
def test_set_scores():
fake_connection = mock.Mock()
fake_connection.set = mock.Mock()
with mock.patch('redis_client.connection', fake_connection):
scores_db.scores.set_match_score(1, 'ABC', 12)
fake_connection.set.assert_called_once_with('comp:scores:1:ABC:game', 12)
|
|
4fd051fd6d048e64f574097a3ca314111087ee45
|
theanets/convolution.py
|
theanets/convolution.py
|
# -*- coding: utf-8 -*-
'''This module contains convolution network structures.'''
from . import feedforward
class Regressor(feedforward.Regressor):
'''A regressor attempts to produce a target output.
A convolutional regression model takes the following inputs during training:
- ``x``: A three-dimensional array of input data. Each element of axis 0 of
``x`` is expected to be one moment in time. Each element of axis 1 of
``x`` holds a single sample from a batch of data. Each element of axis 2
of ``x`` represents the measurements of a particular input variable across
all times and all data items.
- ``targets``: A two-dimensional array of target output data. Each element
of axis 0 of ``targets`` is expected to be one moment in time. Each
element of axis 1 of ``targets`` holds a single sample from a batch of
data. Each element of axis 2 of ``targets`` represents the measurements of
a particular output variable across all times and all data items.
'''
def __init__(self, layers=(), loss='mse', weighted=False):
super(feedforward.Regressor, self).__init__(
layers=layers, loss=loss, in_dim=4, out_dim=2, weighted=weighted)
class Classifier(feedforward.Classifier):
'''A classifier attempts to match a 1-hot target output.
Unlike a feedforward classifier, where the target labels are provided as a
single vector, a recurrent classifier requires a vector of target labels for
each time step in the input data. So a recurrent classifier model requires
the following inputs for training:
- ``x``: A three-dimensional array of input data. Each element of axis 0 of
``x`` is expected to be one moment in time. Each element of axis 1 of
``x`` holds a single sample in a batch of data. Each element of axis 2 of
``x`` represents the measurements of a particular input variable across
all times and all data items in a batch.
- ``labels``: A one-dimensional vector of integer target labels. Each
element of ``labels`` is expected to be the class index for a single batch
item.
'''
def __init__(self, layers=(), loss='xe', weighted=False):
super(feedforward.Classifier, self).__init__(
layers=layers, loss=loss, in_dim=4, out_dim=1, weighted=weighted)
|
# -*- coding: utf-8 -*-
'''This module contains convolution network structures.'''
from . import feedforward
class Regressor(feedforward.Regressor):
'''A regressor attempts to produce a target output.'''
INPUT_NDIM = 4
'''Number of dimensions for holding input data arrays.'''
class Classifier(feedforward.Classifier):
'''A classifier attempts to match a 1-hot target output.'''
INPUT_NDIM = 4
'''Number of dimensions for holding input data arrays.'''
|
Fix up conv models to match current master.
|
Fix up conv models to match current master.
|
Python
|
mit
|
chrinide/theanets,lmjohns3/theanets
|
# -*- coding: utf-8 -*-
'''This module contains convolution network structures.'''
from . import feedforward
class Regressor(feedforward.Regressor):
'''A regressor attempts to produce a target output.
A convolutional regression model takes the following inputs during training:
- ``x``: A three-dimensional array of input data. Each element of axis 0 of
``x`` is expected to be one moment in time. Each element of axis 1 of
``x`` holds a single sample from a batch of data. Each element of axis 2
of ``x`` represents the measurements of a particular input variable across
all times and all data items.
- ``targets``: A two-dimensional array of target output data. Each element
of axis 0 of ``targets`` is expected to be one moment in time. Each
element of axis 1 of ``targets`` holds a single sample from a batch of
data. Each element of axis 2 of ``targets`` represents the measurements of
a particular output variable across all times and all data items.
'''
def __init__(self, layers=(), loss='mse', weighted=False):
super(feedforward.Regressor, self).__init__(
layers=layers, loss=loss, in_dim=4, out_dim=2, weighted=weighted)
class Classifier(feedforward.Classifier):
'''A classifier attempts to match a 1-hot target output.
Unlike a feedforward classifier, where the target labels are provided as a
single vector, a recurrent classifier requires a vector of target labels for
each time step in the input data. So a recurrent classifier model requires
the following inputs for training:
- ``x``: A three-dimensional array of input data. Each element of axis 0 of
``x`` is expected to be one moment in time. Each element of axis 1 of
``x`` holds a single sample in a batch of data. Each element of axis 2 of
``x`` represents the measurements of a particular input variable across
all times and all data items in a batch.
- ``labels``: A one-dimensional vector of integer target labels. Each
element of ``labels`` is expected to be the class index for a single batch
item.
'''
def __init__(self, layers=(), loss='xe', weighted=False):
super(feedforward.Classifier, self).__init__(
layers=layers, loss=loss, in_dim=4, out_dim=1, weighted=weighted)
Fix up conv models to match current master.
|
# -*- coding: utf-8 -*-
'''This module contains convolution network structures.'''
from . import feedforward
class Regressor(feedforward.Regressor):
'''A regressor attempts to produce a target output.'''
INPUT_NDIM = 4
'''Number of dimensions for holding input data arrays.'''
class Classifier(feedforward.Classifier):
'''A classifier attempts to match a 1-hot target output.'''
INPUT_NDIM = 4
'''Number of dimensions for holding input data arrays.'''
|
<commit_before># -*- coding: utf-8 -*-
'''This module contains convolution network structures.'''
from . import feedforward
class Regressor(feedforward.Regressor):
'''A regressor attempts to produce a target output.
A convolutional regression model takes the following inputs during training:
- ``x``: A three-dimensional array of input data. Each element of axis 0 of
``x`` is expected to be one moment in time. Each element of axis 1 of
``x`` holds a single sample from a batch of data. Each element of axis 2
of ``x`` represents the measurements of a particular input variable across
all times and all data items.
- ``targets``: A two-dimensional array of target output data. Each element
of axis 0 of ``targets`` is expected to be one moment in time. Each
element of axis 1 of ``targets`` holds a single sample from a batch of
data. Each element of axis 2 of ``targets`` represents the measurements of
a particular output variable across all times and all data items.
'''
def __init__(self, layers=(), loss='mse', weighted=False):
super(feedforward.Regressor, self).__init__(
layers=layers, loss=loss, in_dim=4, out_dim=2, weighted=weighted)
class Classifier(feedforward.Classifier):
'''A classifier attempts to match a 1-hot target output.
Unlike a feedforward classifier, where the target labels are provided as a
single vector, a recurrent classifier requires a vector of target labels for
each time step in the input data. So a recurrent classifier model requires
the following inputs for training:
- ``x``: A three-dimensional array of input data. Each element of axis 0 of
``x`` is expected to be one moment in time. Each element of axis 1 of
``x`` holds a single sample in a batch of data. Each element of axis 2 of
``x`` represents the measurements of a particular input variable across
all times and all data items in a batch.
- ``labels``: A one-dimensional vector of integer target labels. Each
element of ``labels`` is expected to be the class index for a single batch
item.
'''
def __init__(self, layers=(), loss='xe', weighted=False):
super(feedforward.Classifier, self).__init__(
layers=layers, loss=loss, in_dim=4, out_dim=1, weighted=weighted)
<commit_msg>Fix up conv models to match current master.<commit_after>
|
# -*- coding: utf-8 -*-
'''This module contains convolution network structures.'''
from . import feedforward
class Regressor(feedforward.Regressor):
'''A regressor attempts to produce a target output.'''
INPUT_NDIM = 4
'''Number of dimensions for holding input data arrays.'''
class Classifier(feedforward.Classifier):
'''A classifier attempts to match a 1-hot target output.'''
INPUT_NDIM = 4
'''Number of dimensions for holding input data arrays.'''
|
# -*- coding: utf-8 -*-
'''This module contains convolution network structures.'''
from . import feedforward
class Regressor(feedforward.Regressor):
'''A regressor attempts to produce a target output.
A convolutional regression model takes the following inputs during training:
- ``x``: A three-dimensional array of input data. Each element of axis 0 of
``x`` is expected to be one moment in time. Each element of axis 1 of
``x`` holds a single sample from a batch of data. Each element of axis 2
of ``x`` represents the measurements of a particular input variable across
all times and all data items.
- ``targets``: A two-dimensional array of target output data. Each element
of axis 0 of ``targets`` is expected to be one moment in time. Each
element of axis 1 of ``targets`` holds a single sample from a batch of
data. Each element of axis 2 of ``targets`` represents the measurements of
a particular output variable across all times and all data items.
'''
def __init__(self, layers=(), loss='mse', weighted=False):
super(feedforward.Regressor, self).__init__(
layers=layers, loss=loss, in_dim=4, out_dim=2, weighted=weighted)
class Classifier(feedforward.Classifier):
'''A classifier attempts to match a 1-hot target output.
Unlike a feedforward classifier, where the target labels are provided as a
single vector, a recurrent classifier requires a vector of target labels for
each time step in the input data. So a recurrent classifier model requires
the following inputs for training:
- ``x``: A three-dimensional array of input data. Each element of axis 0 of
``x`` is expected to be one moment in time. Each element of axis 1 of
``x`` holds a single sample in a batch of data. Each element of axis 2 of
``x`` represents the measurements of a particular input variable across
all times and all data items in a batch.
- ``labels``: A one-dimensional vector of integer target labels. Each
element of ``labels`` is expected to be the class index for a single batch
item.
'''
def __init__(self, layers=(), loss='xe', weighted=False):
super(feedforward.Classifier, self).__init__(
layers=layers, loss=loss, in_dim=4, out_dim=1, weighted=weighted)
Fix up conv models to match current master.# -*- coding: utf-8 -*-
'''This module contains convolution network structures.'''
from . import feedforward
class Regressor(feedforward.Regressor):
'''A regressor attempts to produce a target output.'''
INPUT_NDIM = 4
'''Number of dimensions for holding input data arrays.'''
class Classifier(feedforward.Classifier):
'''A classifier attempts to match a 1-hot target output.'''
INPUT_NDIM = 4
'''Number of dimensions for holding input data arrays.'''
|
<commit_before># -*- coding: utf-8 -*-
'''This module contains convolution network structures.'''
from . import feedforward
class Regressor(feedforward.Regressor):
'''A regressor attempts to produce a target output.
A convolutional regression model takes the following inputs during training:
- ``x``: A three-dimensional array of input data. Each element of axis 0 of
``x`` is expected to be one moment in time. Each element of axis 1 of
``x`` holds a single sample from a batch of data. Each element of axis 2
of ``x`` represents the measurements of a particular input variable across
all times and all data items.
- ``targets``: A two-dimensional array of target output data. Each element
of axis 0 of ``targets`` is expected to be one moment in time. Each
element of axis 1 of ``targets`` holds a single sample from a batch of
data. Each element of axis 2 of ``targets`` represents the measurements of
a particular output variable across all times and all data items.
'''
def __init__(self, layers=(), loss='mse', weighted=False):
super(feedforward.Regressor, self).__init__(
layers=layers, loss=loss, in_dim=4, out_dim=2, weighted=weighted)
class Classifier(feedforward.Classifier):
'''A classifier attempts to match a 1-hot target output.
Unlike a feedforward classifier, where the target labels are provided as a
single vector, a recurrent classifier requires a vector of target labels for
each time step in the input data. So a recurrent classifier model requires
the following inputs for training:
- ``x``: A three-dimensional array of input data. Each element of axis 0 of
``x`` is expected to be one moment in time. Each element of axis 1 of
``x`` holds a single sample in a batch of data. Each element of axis 2 of
``x`` represents the measurements of a particular input variable across
all times and all data items in a batch.
- ``labels``: A one-dimensional vector of integer target labels. Each
element of ``labels`` is expected to be the class index for a single batch
item.
'''
def __init__(self, layers=(), loss='xe', weighted=False):
super(feedforward.Classifier, self).__init__(
layers=layers, loss=loss, in_dim=4, out_dim=1, weighted=weighted)
<commit_msg>Fix up conv models to match current master.<commit_after># -*- coding: utf-8 -*-
'''This module contains convolution network structures.'''
from . import feedforward
class Regressor(feedforward.Regressor):
'''A regressor attempts to produce a target output.'''
INPUT_NDIM = 4
'''Number of dimensions for holding input data arrays.'''
class Classifier(feedforward.Classifier):
'''A classifier attempts to match a 1-hot target output.'''
INPUT_NDIM = 4
'''Number of dimensions for holding input data arrays.'''
|
457ba730a6541ab27ce8cbe06cbb6bfe246bba74
|
towel/api/decorators.py
|
towel/api/decorators.py
|
from functools import wraps
import httplib
from django.contrib.auth import authenticate
from django.utils.cache import patch_vary_headers
def http_basic_auth(func):
@wraps(func)
@vary_on_headers('Authorization')
def _decorator(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
meth, _, auth = request.META['HTTP_AUTHORIZATION'].partition(' ')
if meth.lower() == 'basic':
try:
auth = auth.strip().decode('base64')
except Exception:
return HttpResponse('Invalid authorization header',
status=httplib.BAD_REQUEST)
username, sep, password = auth.partition(':')
user = authenticate(username=username, password=password)
if user:
request.user = user
return func(request, *args, **kwargs)
return _decorator
|
Add a simple HTTP Basic Authentication decorator for the API
|
Add a simple HTTP Basic Authentication decorator for the API
|
Python
|
bsd-3-clause
|
matthiask/towel,matthiask/towel,matthiask/towel,matthiask/towel
|
Add a simple HTTP Basic Authentication decorator for the API
|
from functools import wraps
import httplib
from django.contrib.auth import authenticate
from django.utils.cache import patch_vary_headers
def http_basic_auth(func):
@wraps(func)
@vary_on_headers('Authorization')
def _decorator(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
meth, _, auth = request.META['HTTP_AUTHORIZATION'].partition(' ')
if meth.lower() == 'basic':
try:
auth = auth.strip().decode('base64')
except Exception:
return HttpResponse('Invalid authorization header',
status=httplib.BAD_REQUEST)
username, sep, password = auth.partition(':')
user = authenticate(username=username, password=password)
if user:
request.user = user
return func(request, *args, **kwargs)
return _decorator
|
<commit_before><commit_msg>Add a simple HTTP Basic Authentication decorator for the API<commit_after>
|
from functools import wraps
import httplib
from django.contrib.auth import authenticate
from django.utils.cache import patch_vary_headers
def http_basic_auth(func):
@wraps(func)
@vary_on_headers('Authorization')
def _decorator(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
meth, _, auth = request.META['HTTP_AUTHORIZATION'].partition(' ')
if meth.lower() == 'basic':
try:
auth = auth.strip().decode('base64')
except Exception:
return HttpResponse('Invalid authorization header',
status=httplib.BAD_REQUEST)
username, sep, password = auth.partition(':')
user = authenticate(username=username, password=password)
if user:
request.user = user
return func(request, *args, **kwargs)
return _decorator
|
Add a simple HTTP Basic Authentication decorator for the APIfrom functools import wraps
import httplib
from django.contrib.auth import authenticate
from django.utils.cache import patch_vary_headers
def http_basic_auth(func):
@wraps(func)
@vary_on_headers('Authorization')
def _decorator(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
meth, _, auth = request.META['HTTP_AUTHORIZATION'].partition(' ')
if meth.lower() == 'basic':
try:
auth = auth.strip().decode('base64')
except Exception:
return HttpResponse('Invalid authorization header',
status=httplib.BAD_REQUEST)
username, sep, password = auth.partition(':')
user = authenticate(username=username, password=password)
if user:
request.user = user
return func(request, *args, **kwargs)
return _decorator
|
<commit_before><commit_msg>Add a simple HTTP Basic Authentication decorator for the API<commit_after>from functools import wraps
import httplib
from django.contrib.auth import authenticate
from django.utils.cache import patch_vary_headers
def http_basic_auth(func):
@wraps(func)
@vary_on_headers('Authorization')
def _decorator(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
meth, _, auth = request.META['HTTP_AUTHORIZATION'].partition(' ')
if meth.lower() == 'basic':
try:
auth = auth.strip().decode('base64')
except Exception:
return HttpResponse('Invalid authorization header',
status=httplib.BAD_REQUEST)
username, sep, password = auth.partition(':')
user = authenticate(username=username, password=password)
if user:
request.user = user
return func(request, *args, **kwargs)
return _decorator
|
|
b1316b3db89fbee6e6c1ad807e2e36b8b4dd1874
|
Attic/act-fixup.py
|
Attic/act-fixup.py
|
from parliament.models import *
from django.db import transaction, reset_queries
if True:
with transaction.atomic():
print("Documents %d" % Document.objects.count())
for idx, doc in enumerate(Document.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
doc.keywords_changed = True
doc.save(update_fields=['origin_id'])
if True:
with transaction.atomic():
print("Signatures %d" % DocumentSignature.objects.count())
for idx, sign in enumerate(DocumentSignature.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
sign.keywords_changed = True
sign.save(update_fields=['doc'])
if True:
with transaction.atomic():
print("Statements %d" % Statement.objects.count())
for idx, st in enumerate(Statement.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
st.keywords_changed = True
st.save(update_fields=['item'])
|
Add a script to fix garbled activities
|
Add a script to fix garbled activities
|
Python
|
agpl-3.0
|
kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu,kansanmuisti/kamu
|
Add a script to fix garbled activities
|
from parliament.models import *
from django.db import transaction, reset_queries
if True:
with transaction.atomic():
print("Documents %d" % Document.objects.count())
for idx, doc in enumerate(Document.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
doc.keywords_changed = True
doc.save(update_fields=['origin_id'])
if True:
with transaction.atomic():
print("Signatures %d" % DocumentSignature.objects.count())
for idx, sign in enumerate(DocumentSignature.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
sign.keywords_changed = True
sign.save(update_fields=['doc'])
if True:
with transaction.atomic():
print("Statements %d" % Statement.objects.count())
for idx, st in enumerate(Statement.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
st.keywords_changed = True
st.save(update_fields=['item'])
|
<commit_before><commit_msg>Add a script to fix garbled activities<commit_after>
|
from parliament.models import *
from django.db import transaction, reset_queries
if True:
with transaction.atomic():
print("Documents %d" % Document.objects.count())
for idx, doc in enumerate(Document.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
doc.keywords_changed = True
doc.save(update_fields=['origin_id'])
if True:
with transaction.atomic():
print("Signatures %d" % DocumentSignature.objects.count())
for idx, sign in enumerate(DocumentSignature.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
sign.keywords_changed = True
sign.save(update_fields=['doc'])
if True:
with transaction.atomic():
print("Statements %d" % Statement.objects.count())
for idx, st in enumerate(Statement.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
st.keywords_changed = True
st.save(update_fields=['item'])
|
Add a script to fix garbled activitiesfrom parliament.models import *
from django.db import transaction, reset_queries
if True:
with transaction.atomic():
print("Documents %d" % Document.objects.count())
for idx, doc in enumerate(Document.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
doc.keywords_changed = True
doc.save(update_fields=['origin_id'])
if True:
with transaction.atomic():
print("Signatures %d" % DocumentSignature.objects.count())
for idx, sign in enumerate(DocumentSignature.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
sign.keywords_changed = True
sign.save(update_fields=['doc'])
if True:
with transaction.atomic():
print("Statements %d" % Statement.objects.count())
for idx, st in enumerate(Statement.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
st.keywords_changed = True
st.save(update_fields=['item'])
|
<commit_before><commit_msg>Add a script to fix garbled activities<commit_after>from parliament.models import *
from django.db import transaction, reset_queries
if True:
with transaction.atomic():
print("Documents %d" % Document.objects.count())
for idx, doc in enumerate(Document.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
doc.keywords_changed = True
doc.save(update_fields=['origin_id'])
if True:
with transaction.atomic():
print("Signatures %d" % DocumentSignature.objects.count())
for idx, sign in enumerate(DocumentSignature.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
sign.keywords_changed = True
sign.save(update_fields=['doc'])
if True:
with transaction.atomic():
print("Statements %d" % Statement.objects.count())
for idx, st in enumerate(Statement.objects.all()):
if idx % 1000 == 0:
reset_queries()
print(idx)
st.keywords_changed = True
st.save(update_fields=['item'])
|
|
8bf521bf26af93f13043ee6e0d70070d49f76f68
|
Home/cipherMap.py
|
Home/cipherMap.py
|
import operator
def checkio(arr):
index = convertMapToTuples(arr[0])
cube = convertCubeToList(arr[1])
output = ''
dimension = len(arr[0])
for i in range(0, 4):
index.sort(key=operator.itemgetter(0, 1))
for idx in index:
output = '{0}{1}'.format(output, cube[idx[0]][idx[1]])
index = rotateCube(index, dimension)
return output
def convertCubeToList(arr):
result = []
for i in range(len(arr)):
row = []
for j in range(len(arr[i])):
row.append(arr[i][j])
result.append(row)
return result
def convertMapToTuples(arr):
result = []
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j] != '.':
result.append((i, j))
return result
def rotateCube(tuples, dimension):
result = []
for item in tuples:
result.append((item[1], dimension - item[0] - 1))
return result
if __name__ == "__main__":
assert checkio([[
'X...',
'..X.',
'X..X',
'....'],[
'itdf',
'gdce',
'aton',
'qrdi']
]) == 'icantforgetiddqd', 'Test1'
assert checkio([[
'....',
'X..X',
'.X..',
'...X'],[
'xhwc',
'rsqx',
'xqzz',
'fyzr']
]) == 'rxqrwsfzxqxzhczy', "Test2"
|
Implement the cipher map problem.
|
Implement the cipher map problem.
|
Python
|
mit
|
edwardzhu/checkio-solution
|
Implement the cipher map problem.
|
import operator
def checkio(arr):
index = convertMapToTuples(arr[0])
cube = convertCubeToList(arr[1])
output = ''
dimension = len(arr[0])
for i in range(0, 4):
index.sort(key=operator.itemgetter(0, 1))
for idx in index:
output = '{0}{1}'.format(output, cube[idx[0]][idx[1]])
index = rotateCube(index, dimension)
return output
def convertCubeToList(arr):
result = []
for i in range(len(arr)):
row = []
for j in range(len(arr[i])):
row.append(arr[i][j])
result.append(row)
return result
def convertMapToTuples(arr):
result = []
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j] != '.':
result.append((i, j))
return result
def rotateCube(tuples, dimension):
result = []
for item in tuples:
result.append((item[1], dimension - item[0] - 1))
return result
if __name__ == "__main__":
assert checkio([[
'X...',
'..X.',
'X..X',
'....'],[
'itdf',
'gdce',
'aton',
'qrdi']
]) == 'icantforgetiddqd', 'Test1'
assert checkio([[
'....',
'X..X',
'.X..',
'...X'],[
'xhwc',
'rsqx',
'xqzz',
'fyzr']
]) == 'rxqrwsfzxqxzhczy', "Test2"
|
<commit_before><commit_msg>Implement the cipher map problem.<commit_after>
|
import operator
def checkio(arr):
index = convertMapToTuples(arr[0])
cube = convertCubeToList(arr[1])
output = ''
dimension = len(arr[0])
for i in range(0, 4):
index.sort(key=operator.itemgetter(0, 1))
for idx in index:
output = '{0}{1}'.format(output, cube[idx[0]][idx[1]])
index = rotateCube(index, dimension)
return output
def convertCubeToList(arr):
result = []
for i in range(len(arr)):
row = []
for j in range(len(arr[i])):
row.append(arr[i][j])
result.append(row)
return result
def convertMapToTuples(arr):
result = []
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j] != '.':
result.append((i, j))
return result
def rotateCube(tuples, dimension):
result = []
for item in tuples:
result.append((item[1], dimension - item[0] - 1))
return result
if __name__ == "__main__":
assert checkio([[
'X...',
'..X.',
'X..X',
'....'],[
'itdf',
'gdce',
'aton',
'qrdi']
]) == 'icantforgetiddqd', 'Test1'
assert checkio([[
'....',
'X..X',
'.X..',
'...X'],[
'xhwc',
'rsqx',
'xqzz',
'fyzr']
]) == 'rxqrwsfzxqxzhczy', "Test2"
|
Implement the cipher map problem.import operator
def checkio(arr):
index = convertMapToTuples(arr[0])
cube = convertCubeToList(arr[1])
output = ''
dimension = len(arr[0])
for i in range(0, 4):
index.sort(key=operator.itemgetter(0, 1))
for idx in index:
output = '{0}{1}'.format(output, cube[idx[0]][idx[1]])
index = rotateCube(index, dimension)
return output
def convertCubeToList(arr):
result = []
for i in range(len(arr)):
row = []
for j in range(len(arr[i])):
row.append(arr[i][j])
result.append(row)
return result
def convertMapToTuples(arr):
result = []
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j] != '.':
result.append((i, j))
return result
def rotateCube(tuples, dimension):
result = []
for item in tuples:
result.append((item[1], dimension - item[0] - 1))
return result
if __name__ == "__main__":
assert checkio([[
'X...',
'..X.',
'X..X',
'....'],[
'itdf',
'gdce',
'aton',
'qrdi']
]) == 'icantforgetiddqd', 'Test1'
assert checkio([[
'....',
'X..X',
'.X..',
'...X'],[
'xhwc',
'rsqx',
'xqzz',
'fyzr']
]) == 'rxqrwsfzxqxzhczy', "Test2"
|
<commit_before><commit_msg>Implement the cipher map problem.<commit_after>import operator
def checkio(arr):
index = convertMapToTuples(arr[0])
cube = convertCubeToList(arr[1])
output = ''
dimension = len(arr[0])
for i in range(0, 4):
index.sort(key=operator.itemgetter(0, 1))
for idx in index:
output = '{0}{1}'.format(output, cube[idx[0]][idx[1]])
index = rotateCube(index, dimension)
return output
def convertCubeToList(arr):
result = []
for i in range(len(arr)):
row = []
for j in range(len(arr[i])):
row.append(arr[i][j])
result.append(row)
return result
def convertMapToTuples(arr):
result = []
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j] != '.':
result.append((i, j))
return result
def rotateCube(tuples, dimension):
result = []
for item in tuples:
result.append((item[1], dimension - item[0] - 1))
return result
if __name__ == "__main__":
assert checkio([[
'X...',
'..X.',
'X..X',
'....'],[
'itdf',
'gdce',
'aton',
'qrdi']
]) == 'icantforgetiddqd', 'Test1'
assert checkio([[
'....',
'X..X',
'.X..',
'...X'],[
'xhwc',
'rsqx',
'xqzz',
'fyzr']
]) == 'rxqrwsfzxqxzhczy', "Test2"
|
|
e583d977c7089f21841890b7eb50c824db153202
|
tests/functional/test_unicode.py
|
tests/functional/test_unicode.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from textx.metamodel import metamodel_from_str
def test_unicode_grammar_from_string():
"""
Test grammar with unicode char given in grammar string.
"""
grammar = """
First:
'first' a = Second
;
Second:
"Ω"|"±"|"♪"
;
"""
metamodel = metamodel_from_str(grammar)
assert metamodel
def test_unicode_grammar_from_file():
"""
"""
|
Test for unicode characters in grammars.
|
Test for unicode characters in grammars.
|
Python
|
mit
|
igordejanovic/textX,igordejanovic/textX,igordejanovic/textX
|
Test for unicode characters in grammars.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from textx.metamodel import metamodel_from_str
def test_unicode_grammar_from_string():
"""
Test grammar with unicode char given in grammar string.
"""
grammar = """
First:
'first' a = Second
;
Second:
"Ω"|"±"|"♪"
;
"""
metamodel = metamodel_from_str(grammar)
assert metamodel
def test_unicode_grammar_from_file():
"""
"""
|
<commit_before><commit_msg>Test for unicode characters in grammars.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from textx.metamodel import metamodel_from_str
def test_unicode_grammar_from_string():
"""
Test grammar with unicode char given in grammar string.
"""
grammar = """
First:
'first' a = Second
;
Second:
"Ω"|"±"|"♪"
;
"""
metamodel = metamodel_from_str(grammar)
assert metamodel
def test_unicode_grammar_from_file():
"""
"""
|
Test for unicode characters in grammars.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from textx.metamodel import metamodel_from_str
def test_unicode_grammar_from_string():
"""
Test grammar with unicode char given in grammar string.
"""
grammar = """
First:
'first' a = Second
;
Second:
"Ω"|"±"|"♪"
;
"""
metamodel = metamodel_from_str(grammar)
assert metamodel
def test_unicode_grammar_from_file():
"""
"""
|
<commit_before><commit_msg>Test for unicode characters in grammars.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from textx.metamodel import metamodel_from_str
def test_unicode_grammar_from_string():
"""
Test grammar with unicode char given in grammar string.
"""
grammar = """
First:
'first' a = Second
;
Second:
"Ω"|"±"|"♪"
;
"""
metamodel = metamodel_from_str(grammar)
assert metamodel
def test_unicode_grammar_from_file():
"""
"""
|
|
dcc64e9fd8bb3cb407959a30a2054fc180596bae
|
tests/test_pandas_integration.py
|
tests/test_pandas_integration.py
|
from unittest import TestCase
import numpy as np
import pandas as pd
import numpy.testing as npt
from nimble import Events
class TestAsPandasCondition(TestCase):
def setUp(self):
conditional_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_series > 0)
self.events = Events(condition)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
class TestAsNpArrCondition(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
|
Add Pandas integration unit tests
|
Add Pandas integration unit tests
|
Python
|
mit
|
rwhitt2049/nimble,rwhitt2049/trouve
|
Add Pandas integration unit tests
|
from unittest import TestCase
import numpy as np
import pandas as pd
import numpy.testing as npt
from nimble import Events
class TestAsPandasCondition(TestCase):
def setUp(self):
conditional_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_series > 0)
self.events = Events(condition)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
class TestAsNpArrCondition(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
|
<commit_before><commit_msg>Add Pandas integration unit tests<commit_after>
|
from unittest import TestCase
import numpy as np
import pandas as pd
import numpy.testing as npt
from nimble import Events
class TestAsPandasCondition(TestCase):
def setUp(self):
conditional_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_series > 0)
self.events = Events(condition)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
class TestAsNpArrCondition(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
|
Add Pandas integration unit testsfrom unittest import TestCase
import numpy as np
import pandas as pd
import numpy.testing as npt
from nimble import Events
class TestAsPandasCondition(TestCase):
def setUp(self):
conditional_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_series > 0)
self.events = Events(condition)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
class TestAsNpArrCondition(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
|
<commit_before><commit_msg>Add Pandas integration unit tests<commit_after>from unittest import TestCase
import numpy as np
import pandas as pd
import numpy.testing as npt
from nimble import Events
class TestAsPandasCondition(TestCase):
def setUp(self):
conditional_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_series > 0)
self.events = Events(condition)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
class TestAsNpArrCondition(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
|
|
1696ca33e644d3cb1138d7ee4c48239b7a757cfd
|
python_scripts/gpio_test.py
|
python_scripts/gpio_test.py
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
for x in range(0,10):
GPIO.output(7,True)
time.sleep(1)
GPIO.output(7,False)
time.sleep(1)
GPIO.cleanup()
|
Add the first script to try a led light
|
Add the first script to try a led light
|
Python
|
mit
|
mirontoli/tolle-rasp,mirontoli/tolle-rasp,mirontoli/tolle-rasp,mirontoli/tolle-rasp,mirontoli/tolle-rasp
|
Add the first script to try a led light
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
for x in range(0,10):
GPIO.output(7,True)
time.sleep(1)
GPIO.output(7,False)
time.sleep(1)
GPIO.cleanup()
|
<commit_before><commit_msg>Add the first script to try a led light<commit_after>
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
for x in range(0,10):
GPIO.output(7,True)
time.sleep(1)
GPIO.output(7,False)
time.sleep(1)
GPIO.cleanup()
|
Add the first script to try a led lightimport RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
for x in range(0,10):
GPIO.output(7,True)
time.sleep(1)
GPIO.output(7,False)
time.sleep(1)
GPIO.cleanup()
|
<commit_before><commit_msg>Add the first script to try a led light<commit_after>import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
for x in range(0,10):
GPIO.output(7,True)
time.sleep(1)
GPIO.output(7,False)
time.sleep(1)
GPIO.cleanup()
|
|
655bf4b4159e70b4a99185a1735ac63c3ee951dc
|
analysis/filter-by-result-type.py
|
analysis/filter-by-result-type.py
|
#!/usr/bin/env python
import argparse
import os
import logging
import pprint
import sys
import yaml
# HACK
_file = os.path.abspath(__file__)
_dir = os.path.dirname(os.path.dirname(_file))
sys.path.insert(0, _dir)
from BoogieRunner.ResultType import ResultType
def main(args):
resultTypes = [ r.name for r in list(ResultType)] # Get list of ResultTypes as strings
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('result_yml', type=argparse.FileType('r'), default='-')
parser.add_argument('result_type', choices=resultTypes)
pargs = parser.parse_args(args)
results = yaml.load(pargs.result_yml)
assert isinstance(results, list)
# Get out of requested type
resultCode = ResultType[pargs.result_type].value
count = 0
collected = [ ]
for r in results:
if r['result'] == resultCode:
count += 1
collected.append(r)
logging.info('Count of type {} : {}'.format(pargs.result_type, count))
print(yaml.dump(collected, default_flow_style=False))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Add script to filter result by result type.
|
Add script to filter result by result type.
|
Python
|
bsd-3-clause
|
symbooglix/boogie-runner,symbooglix/boogie-runner
|
Add script to filter result by result type.
|
#!/usr/bin/env python
import argparse
import os
import logging
import pprint
import sys
import yaml
# HACK
_file = os.path.abspath(__file__)
_dir = os.path.dirname(os.path.dirname(_file))
sys.path.insert(0, _dir)
from BoogieRunner.ResultType import ResultType
def main(args):
resultTypes = [ r.name for r in list(ResultType)] # Get list of ResultTypes as strings
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('result_yml', type=argparse.FileType('r'), default='-')
parser.add_argument('result_type', choices=resultTypes)
pargs = parser.parse_args(args)
results = yaml.load(pargs.result_yml)
assert isinstance(results, list)
# Get out of requested type
resultCode = ResultType[pargs.result_type].value
count = 0
collected = [ ]
for r in results:
if r['result'] == resultCode:
count += 1
collected.append(r)
logging.info('Count of type {} : {}'.format(pargs.result_type, count))
print(yaml.dump(collected, default_flow_style=False))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>Add script to filter result by result type.<commit_after>
|
#!/usr/bin/env python
import argparse
import os
import logging
import pprint
import sys
import yaml
# HACK
_file = os.path.abspath(__file__)
_dir = os.path.dirname(os.path.dirname(_file))
sys.path.insert(0, _dir)
from BoogieRunner.ResultType import ResultType
def main(args):
resultTypes = [ r.name for r in list(ResultType)] # Get list of ResultTypes as strings
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('result_yml', type=argparse.FileType('r'), default='-')
parser.add_argument('result_type', choices=resultTypes)
pargs = parser.parse_args(args)
results = yaml.load(pargs.result_yml)
assert isinstance(results, list)
# Get out of requested type
resultCode = ResultType[pargs.result_type].value
count = 0
collected = [ ]
for r in results:
if r['result'] == resultCode:
count += 1
collected.append(r)
logging.info('Count of type {} : {}'.format(pargs.result_type, count))
print(yaml.dump(collected, default_flow_style=False))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Add script to filter result by result type.#!/usr/bin/env python
import argparse
import os
import logging
import pprint
import sys
import yaml
# HACK
_file = os.path.abspath(__file__)
_dir = os.path.dirname(os.path.dirname(_file))
sys.path.insert(0, _dir)
from BoogieRunner.ResultType import ResultType
def main(args):
resultTypes = [ r.name for r in list(ResultType)] # Get list of ResultTypes as strings
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('result_yml', type=argparse.FileType('r'), default='-')
parser.add_argument('result_type', choices=resultTypes)
pargs = parser.parse_args(args)
results = yaml.load(pargs.result_yml)
assert isinstance(results, list)
# Get out of requested type
resultCode = ResultType[pargs.result_type].value
count = 0
collected = [ ]
for r in results:
if r['result'] == resultCode:
count += 1
collected.append(r)
logging.info('Count of type {} : {}'.format(pargs.result_type, count))
print(yaml.dump(collected, default_flow_style=False))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>Add script to filter result by result type.<commit_after>#!/usr/bin/env python
import argparse
import os
import logging
import pprint
import sys
import yaml
# HACK
_file = os.path.abspath(__file__)
_dir = os.path.dirname(os.path.dirname(_file))
sys.path.insert(0, _dir)
from BoogieRunner.ResultType import ResultType
def main(args):
resultTypes = [ r.name for r in list(ResultType)] # Get list of ResultTypes as strings
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('result_yml', type=argparse.FileType('r'), default='-')
parser.add_argument('result_type', choices=resultTypes)
pargs = parser.parse_args(args)
results = yaml.load(pargs.result_yml)
assert isinstance(results, list)
# Get out of requested type
resultCode = ResultType[pargs.result_type].value
count = 0
collected = [ ]
for r in results:
if r['result'] == resultCode:
count += 1
collected.append(r)
logging.info('Count of type {} : {}'.format(pargs.result_type, count))
print(yaml.dump(collected, default_flow_style=False))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
bf93b3b4c8965e31e5b9b8ebdbf3f1b1d258e15e
|
tools/cvs2svn/profile-cvs2svn.py
|
tools/cvs2svn/profile-cvs2svn.py
|
#!/usr/bin/env python
#
# Use this script to profile cvs2svn.py using Python's hotshot profiler.
#
# The profile data is stored in cvs2svn.hotshot. To view the data using
# hotshot, run the following in python:
#
# import hotshot.stats
# stats = hotshot.stats.load('cvs2svn.hotshot')
# stats.strip_dirs()
# stats.sort_stats('time', 'calls')
# stats.print_stats(20)
#
# It is also possible (and a lot better) to use kcachegrind to view the data.
# To do so, you must first convert the data to the cachegrind format using
# hotshot2cachegrind, which you can download from the following URL:
#
# http://kcachegrind.sourceforge.net/cgi-bin/show.cgi/KcacheGrindContribPython
#
# Convert the data using the following command:
#
# hotshot2cachegrind -o cachegrind.out cvs2svn.hotshot
#
# Depending on the size of the repository, this can take a long time. When
# the conversion is done, simply open cachegrind.out in kcachegrind.
import cvs2svn, hotshot
prof = hotshot.Profile('cvs2svn.hotshot')
prof.runcall(cvs2svn.main)
prof.close()
|
Add a new script to simplify profiling of cvs2svn.py. Document in the script how to use kcachegrind to view the results.
|
Add a new script to simplify profiling of cvs2svn.py. Document in the
script how to use kcachegrind to view the results.
* tools/cvs2svn/profile-cvs2svn.py: New script.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@848715 13f79535-47bb-0310-9956-ffa450edef68
|
Python
|
apache-2.0
|
YueLinHo/Subversion,YueLinHo/Subversion,wbond/subversion,YueLinHo/Subversion,YueLinHo/Subversion,wbond/subversion,wbond/subversion,YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,wbond/subversion,wbond/subversion,wbond/subversion,wbond/subversion
|
Add a new script to simplify profiling of cvs2svn.py. Document in the
script how to use kcachegrind to view the results.
* tools/cvs2svn/profile-cvs2svn.py: New script.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@848715 13f79535-47bb-0310-9956-ffa450edef68
|
#!/usr/bin/env python
#
# Use this script to profile cvs2svn.py using Python's hotshot profiler.
#
# The profile data is stored in cvs2svn.hotshot. To view the data using
# hotshot, run the following in python:
#
# import hotshot.stats
# stats = hotshot.stats.load('cvs2svn.hotshot')
# stats.strip_dirs()
# stats.sort_stats('time', 'calls')
# stats.print_stats(20)
#
# It is also possible (and a lot better) to use kcachegrind to view the data.
# To do so, you must first convert the data to the cachegrind format using
# hotshot2cachegrind, which you can download from the following URL:
#
# http://kcachegrind.sourceforge.net/cgi-bin/show.cgi/KcacheGrindContribPython
#
# Convert the data using the following command:
#
# hotshot2cachegrind -o cachegrind.out cvs2svn.hotshot
#
# Depending on the size of the repository, this can take a long time. When
# the conversion is done, simply open cachegrind.out in kcachegrind.
import cvs2svn, hotshot
prof = hotshot.Profile('cvs2svn.hotshot')
prof.runcall(cvs2svn.main)
prof.close()
|
<commit_before><commit_msg>Add a new script to simplify profiling of cvs2svn.py. Document in the
script how to use kcachegrind to view the results.
* tools/cvs2svn/profile-cvs2svn.py: New script.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@848715 13f79535-47bb-0310-9956-ffa450edef68<commit_after>
|
#!/usr/bin/env python
#
# Use this script to profile cvs2svn.py using Python's hotshot profiler.
#
# The profile data is stored in cvs2svn.hotshot. To view the data using
# hotshot, run the following in python:
#
# import hotshot.stats
# stats = hotshot.stats.load('cvs2svn.hotshot')
# stats.strip_dirs()
# stats.sort_stats('time', 'calls')
# stats.print_stats(20)
#
# It is also possible (and a lot better) to use kcachegrind to view the data.
# To do so, you must first convert the data to the cachegrind format using
# hotshot2cachegrind, which you can download from the following URL:
#
# http://kcachegrind.sourceforge.net/cgi-bin/show.cgi/KcacheGrindContribPython
#
# Convert the data using the following command:
#
# hotshot2cachegrind -o cachegrind.out cvs2svn.hotshot
#
# Depending on the size of the repository, this can take a long time. When
# the conversion is done, simply open cachegrind.out in kcachegrind.
import cvs2svn, hotshot
prof = hotshot.Profile('cvs2svn.hotshot')
prof.runcall(cvs2svn.main)
prof.close()
|
Add a new script to simplify profiling of cvs2svn.py. Document in the
script how to use kcachegrind to view the results.
* tools/cvs2svn/profile-cvs2svn.py: New script.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@848715 13f79535-47bb-0310-9956-ffa450edef68#!/usr/bin/env python
#
# Use this script to profile cvs2svn.py using Python's hotshot profiler.
#
# The profile data is stored in cvs2svn.hotshot. To view the data using
# hotshot, run the following in python:
#
# import hotshot.stats
# stats = hotshot.stats.load('cvs2svn.hotshot')
# stats.strip_dirs()
# stats.sort_stats('time', 'calls')
# stats.print_stats(20)
#
# It is also possible (and a lot better) to use kcachegrind to view the data.
# To do so, you must first convert the data to the cachegrind format using
# hotshot2cachegrind, which you can download from the following URL:
#
# http://kcachegrind.sourceforge.net/cgi-bin/show.cgi/KcacheGrindContribPython
#
# Convert the data using the following command:
#
# hotshot2cachegrind -o cachegrind.out cvs2svn.hotshot
#
# Depending on the size of the repository, this can take a long time. When
# the conversion is done, simply open cachegrind.out in kcachegrind.
import cvs2svn, hotshot
prof = hotshot.Profile('cvs2svn.hotshot')
prof.runcall(cvs2svn.main)
prof.close()
|
<commit_before><commit_msg>Add a new script to simplify profiling of cvs2svn.py. Document in the
script how to use kcachegrind to view the results.
* tools/cvs2svn/profile-cvs2svn.py: New script.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@848715 13f79535-47bb-0310-9956-ffa450edef68<commit_after>#!/usr/bin/env python
#
# Use this script to profile cvs2svn.py using Python's hotshot profiler.
#
# The profile data is stored in cvs2svn.hotshot. To view the data using
# hotshot, run the following in python:
#
# import hotshot.stats
# stats = hotshot.stats.load('cvs2svn.hotshot')
# stats.strip_dirs()
# stats.sort_stats('time', 'calls')
# stats.print_stats(20)
#
# It is also possible (and a lot better) to use kcachegrind to view the data.
# To do so, you must first convert the data to the cachegrind format using
# hotshot2cachegrind, which you can download from the following URL:
#
# http://kcachegrind.sourceforge.net/cgi-bin/show.cgi/KcacheGrindContribPython
#
# Convert the data using the following command:
#
# hotshot2cachegrind -o cachegrind.out cvs2svn.hotshot
#
# Depending on the size of the repository, this can take a long time. When
# the conversion is done, simply open cachegrind.out in kcachegrind.
import cvs2svn, hotshot
prof = hotshot.Profile('cvs2svn.hotshot')
prof.runcall(cvs2svn.main)
prof.close()
|
|
71a6d0a032896f4ef2e9a4cda541d142f2c48171
|
typhon/tests/test_environment.py
|
typhon/tests/test_environment.py
|
# -*- coding: utf-8 -*-
"""Testing the environment/configuration handler.
"""
import os
from copy import copy
import pytest
from typhon import environment
class TestEnvironment:
"""Testing the environment handler."""
def setup_method(self):
"""Run all test methods with an empty environment."""
self.env = copy(os.environ)
os.environ = {}
def teardown_method(self):
"""Restore old environment."""
os.environ = self.env
def test_get_environment_variables(self):
"""Test if environment variables are considered."""
os.environ['TYPHON_ENV_TEST'] = 'TEST_VALUE'
assert environment.environ['TYPHON_ENV_TEST'] == 'TEST_VALUE'
def test_set_environment_variables(self):
"""Test if environment variables are updated."""
environment.environ['TYPHON_ENV_TEST'] = 'TEST_VALUE'
assert os.environ['TYPHON_ENV_TEST'] == 'TEST_VALUE'
def test_undefined_variable(self):
"""Test behavior for undefined variables."""
with pytest.raises(KeyError):
environment.environ['TYPHON_ENV_TEST']
|
Add unittests for environment handler.
|
Add unittests for environment handler.
|
Python
|
mit
|
atmtools/typhon,atmtools/typhon
|
Add unittests for environment handler.
|
# -*- coding: utf-8 -*-
"""Testing the environment/configuration handler.
"""
import os
from copy import copy
import pytest
from typhon import environment
class TestEnvironment:
"""Testing the environment handler."""
def setup_method(self):
"""Run all test methods with an empty environment."""
self.env = copy(os.environ)
os.environ = {}
def teardown_method(self):
"""Restore old environment."""
os.environ = self.env
def test_get_environment_variables(self):
"""Test if environment variables are considered."""
os.environ['TYPHON_ENV_TEST'] = 'TEST_VALUE'
assert environment.environ['TYPHON_ENV_TEST'] == 'TEST_VALUE'
def test_set_environment_variables(self):
"""Test if environment variables are updated."""
environment.environ['TYPHON_ENV_TEST'] = 'TEST_VALUE'
assert os.environ['TYPHON_ENV_TEST'] == 'TEST_VALUE'
def test_undefined_variable(self):
"""Test behavior for undefined variables."""
with pytest.raises(KeyError):
environment.environ['TYPHON_ENV_TEST']
|
<commit_before><commit_msg>Add unittests for environment handler.<commit_after>
|
# -*- coding: utf-8 -*-
"""Testing the environment/configuration handler.
"""
import os
from copy import copy
import pytest
from typhon import environment
class TestEnvironment:
"""Testing the environment handler."""
def setup_method(self):
"""Run all test methods with an empty environment."""
self.env = copy(os.environ)
os.environ = {}
def teardown_method(self):
"""Restore old environment."""
os.environ = self.env
def test_get_environment_variables(self):
"""Test if environment variables are considered."""
os.environ['TYPHON_ENV_TEST'] = 'TEST_VALUE'
assert environment.environ['TYPHON_ENV_TEST'] == 'TEST_VALUE'
def test_set_environment_variables(self):
"""Test if environment variables are updated."""
environment.environ['TYPHON_ENV_TEST'] = 'TEST_VALUE'
assert os.environ['TYPHON_ENV_TEST'] == 'TEST_VALUE'
def test_undefined_variable(self):
"""Test behavior for undefined variables."""
with pytest.raises(KeyError):
environment.environ['TYPHON_ENV_TEST']
|
Add unittests for environment handler.# -*- coding: utf-8 -*-
"""Testing the environment/configuration handler.
"""
import os
from copy import copy
import pytest
from typhon import environment
class TestEnvironment:
"""Testing the environment handler."""
def setup_method(self):
"""Run all test methods with an empty environment."""
self.env = copy(os.environ)
os.environ = {}
def teardown_method(self):
"""Restore old environment."""
os.environ = self.env
def test_get_environment_variables(self):
"""Test if environment variables are considered."""
os.environ['TYPHON_ENV_TEST'] = 'TEST_VALUE'
assert environment.environ['TYPHON_ENV_TEST'] == 'TEST_VALUE'
def test_set_environment_variables(self):
"""Test if environment variables are updated."""
environment.environ['TYPHON_ENV_TEST'] = 'TEST_VALUE'
assert os.environ['TYPHON_ENV_TEST'] == 'TEST_VALUE'
def test_undefined_variable(self):
"""Test behavior for undefined variables."""
with pytest.raises(KeyError):
environment.environ['TYPHON_ENV_TEST']
|
<commit_before><commit_msg>Add unittests for environment handler.<commit_after># -*- coding: utf-8 -*-
"""Testing the environment/configuration handler.
"""
import os
from copy import copy
import pytest
from typhon import environment
class TestEnvironment:
"""Testing the environment handler."""
def setup_method(self):
"""Run all test methods with an empty environment."""
self.env = copy(os.environ)
os.environ = {}
def teardown_method(self):
"""Restore old environment."""
os.environ = self.env
def test_get_environment_variables(self):
"""Test if environment variables are considered."""
os.environ['TYPHON_ENV_TEST'] = 'TEST_VALUE'
assert environment.environ['TYPHON_ENV_TEST'] == 'TEST_VALUE'
def test_set_environment_variables(self):
"""Test if environment variables are updated."""
environment.environ['TYPHON_ENV_TEST'] = 'TEST_VALUE'
assert os.environ['TYPHON_ENV_TEST'] == 'TEST_VALUE'
def test_undefined_variable(self):
"""Test behavior for undefined variables."""
with pytest.raises(KeyError):
environment.environ['TYPHON_ENV_TEST']
|
|
d84a0b0d50fb4d01b2a2354d5317afd181f1053c
|
Regression/RandomForestRegression/regularRandomForestRegression.py
|
Regression/RandomForestRegression/regularRandomForestRegression.py
|
# -*- coding: utf-8 -*-
"""Random Forest Regression for machine learning.
Random forest algorithm is a supervised classification algorithm. As the name
suggest, this algorithm creates the forest with a number of decision trees.
In general, the more trees in the forest the more robust the forest looks like.
In the same way in the random forest classifier, the higher the number of trees
in the forest gives the high accuracy results.
Example:
$ python regularRandomForestRegression.py
Todo:
*
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
# from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import train_test_split
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
features = dataset.iloc[:, 1:2].values
labels = dataset.iloc[:, 2].values
# Splitting the Dataset into a Training set and a Test set
"""feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)
"""
# Feature scaling, normalize scale is important. Especially on algorithms
# involving euclidian distance. Two main feature scaling formulas are:
# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))
# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))
"""sc_feature = StandardScaler()
feature_train = sc_feature.fit_transform(feature_train)
feature_test = sc_feature.transform(feature_test)
sc_labels = StandardScaler()
labels_train = sc_labels.fit_transform(labels_train)
labels_test = sc_labels.transform(labels_test)
"""
# Fit the Random Forest Regression to the dataset
regressor = RandomForestRegressor(n_estimators=310, random_state=0)
regressor.fit(features, labels)
# Predict new result with the Random Forest Regression
y_pred = regressor.predict(6.5)
# Visualising the regression results with smoother curve
x_grid = np.arange(min(features), max(features), 0.01)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.scatter(features, labels, color='r')
plt.plot(x_grid, regressor.predict(x_grid), color='b')
plt.title('Truth or Bluff (Random Forest Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
Add Random Forest Regression in Python
|
Add Random Forest Regression in Python
|
Python
|
mit
|
a-holm/MachinelearningAlgorithms,a-holm/MachinelearningAlgorithms
|
Add Random Forest Regression in Python
|
# -*- coding: utf-8 -*-
"""Random Forest Regression for machine learning.
Random forest algorithm is a supervised classification algorithm. As the name
suggest, this algorithm creates the forest with a number of decision trees.
In general, the more trees in the forest the more robust the forest looks like.
In the same way in the random forest classifier, the higher the number of trees
in the forest gives the high accuracy results.
Example:
$ python regularRandomForestRegression.py
Todo:
*
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
# from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import train_test_split
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
features = dataset.iloc[:, 1:2].values
labels = dataset.iloc[:, 2].values
# Splitting the Dataset into a Training set and a Test set
"""feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)
"""
# Feature scaling, normalize scale is important. Especially on algorithms
# involving euclidian distance. Two main feature scaling formulas are:
# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))
# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))
"""sc_feature = StandardScaler()
feature_train = sc_feature.fit_transform(feature_train)
feature_test = sc_feature.transform(feature_test)
sc_labels = StandardScaler()
labels_train = sc_labels.fit_transform(labels_train)
labels_test = sc_labels.transform(labels_test)
"""
# Fit the Random Forest Regression to the dataset
regressor = RandomForestRegressor(n_estimators=310, random_state=0)
regressor.fit(features, labels)
# Predict new result with the Random Forest Regression
y_pred = regressor.predict(6.5)
# Visualising the regression results with smoother curve
x_grid = np.arange(min(features), max(features), 0.01)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.scatter(features, labels, color='r')
plt.plot(x_grid, regressor.predict(x_grid), color='b')
plt.title('Truth or Bluff (Random Forest Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
<commit_before><commit_msg>Add Random Forest Regression in Python<commit_after>
|
# -*- coding: utf-8 -*-
"""Random Forest Regression for machine learning.
Random forest algorithm is a supervised classification algorithm. As the name
suggest, this algorithm creates the forest with a number of decision trees.
In general, the more trees in the forest the more robust the forest looks like.
In the same way in the random forest classifier, the higher the number of trees
in the forest gives the high accuracy results.
Example:
$ python regularRandomForestRegression.py
Todo:
*
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
# from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import train_test_split
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
features = dataset.iloc[:, 1:2].values
labels = dataset.iloc[:, 2].values
# Splitting the Dataset into a Training set and a Test set
"""feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)
"""
# Feature scaling, normalize scale is important. Especially on algorithms
# involving euclidian distance. Two main feature scaling formulas are:
# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))
# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))
"""sc_feature = StandardScaler()
feature_train = sc_feature.fit_transform(feature_train)
feature_test = sc_feature.transform(feature_test)
sc_labels = StandardScaler()
labels_train = sc_labels.fit_transform(labels_train)
labels_test = sc_labels.transform(labels_test)
"""
# Fit the Random Forest Regression to the dataset
regressor = RandomForestRegressor(n_estimators=310, random_state=0)
regressor.fit(features, labels)
# Predict new result with the Random Forest Regression
y_pred = regressor.predict(6.5)
# Visualising the regression results with smoother curve
x_grid = np.arange(min(features), max(features), 0.01)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.scatter(features, labels, color='r')
plt.plot(x_grid, regressor.predict(x_grid), color='b')
plt.title('Truth or Bluff (Random Forest Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
Add Random Forest Regression in Python# -*- coding: utf-8 -*-
"""Random Forest Regression for machine learning.
Random forest algorithm is a supervised classification algorithm. As the name
suggest, this algorithm creates the forest with a number of decision trees.
In general, the more trees in the forest the more robust the forest looks like.
In the same way in the random forest classifier, the higher the number of trees
in the forest gives the high accuracy results.
Example:
$ python regularRandomForestRegression.py
Todo:
*
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
# from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import train_test_split
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
features = dataset.iloc[:, 1:2].values
labels = dataset.iloc[:, 2].values
# Splitting the Dataset into a Training set and a Test set
"""feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)
"""
# Feature scaling, normalize scale is important. Especially on algorithms
# involving euclidian distance. Two main feature scaling formulas are:
# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))
# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))
"""sc_feature = StandardScaler()
feature_train = sc_feature.fit_transform(feature_train)
feature_test = sc_feature.transform(feature_test)
sc_labels = StandardScaler()
labels_train = sc_labels.fit_transform(labels_train)
labels_test = sc_labels.transform(labels_test)
"""
# Fit the Random Forest Regression to the dataset
regressor = RandomForestRegressor(n_estimators=310, random_state=0)
regressor.fit(features, labels)
# Predict new result with the Random Forest Regression
y_pred = regressor.predict(6.5)
# Visualising the regression results with smoother curve
x_grid = np.arange(min(features), max(features), 0.01)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.scatter(features, labels, color='r')
plt.plot(x_grid, regressor.predict(x_grid), color='b')
plt.title('Truth or Bluff (Random Forest Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
<commit_before><commit_msg>Add Random Forest Regression in Python<commit_after># -*- coding: utf-8 -*-
"""Random Forest Regression for machine learning.
Random forest algorithm is a supervised classification algorithm. As the name
suggest, this algorithm creates the forest with a number of decision trees.
In general, the more trees in the forest the more robust the forest looks like.
In the same way in the random forest classifier, the higher the number of trees
in the forest gives the high accuracy results.
Example:
$ python regularRandomForestRegression.py
Todo:
*
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
# from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import train_test_split
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
features = dataset.iloc[:, 1:2].values
labels = dataset.iloc[:, 2].values
# Splitting the Dataset into a Training set and a Test set
"""feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)
"""
# Feature scaling, normalize scale is important. Especially on algorithms
# involving euclidian distance. Two main feature scaling formulas are:
# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))
# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))
"""sc_feature = StandardScaler()
feature_train = sc_feature.fit_transform(feature_train)
feature_test = sc_feature.transform(feature_test)
sc_labels = StandardScaler()
labels_train = sc_labels.fit_transform(labels_train)
labels_test = sc_labels.transform(labels_test)
"""
# Fit the Random Forest Regression to the dataset
regressor = RandomForestRegressor(n_estimators=310, random_state=0)
regressor.fit(features, labels)
# Predict new result with the Random Forest Regression
y_pred = regressor.predict(6.5)
# Visualising the regression results with smoother curve
x_grid = np.arange(min(features), max(features), 0.01)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.scatter(features, labels, color='r')
plt.plot(x_grid, regressor.predict(x_grid), color='b')
plt.title('Truth or Bluff (Random Forest Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
|
a152c7c48baa0f1c82e7d84bebbee674eb4f2761
|
tilequeue/commands.py
|
tilequeue/commands.py
|
from tilequeue.queues import make_sqs_queue
from tilequeue.tile import explode_with_parents
from tilequeue.tile import parse_expired_coord_string
import argparse
import os
def add_aws_cred_options(arg_parser):
arg_parser.add_argument('--aws_access_key_id')
arg_parser.add_argument('--aws_secret_access_key')
return arg_parser
def enqueue_arg_parser():
parser = argparse.ArgumentParser()
parser = add_aws_cred_options(parser)
parser.add_argument('--queue',
required=True,
help='Name of aws sqs queue, should already exist.',
)
parser.add_argument('--expired-tiles-file',
required=True,
help='Path to file containing list of expired tiles. Should be one per line, <zoom>/<column>/<row>',
)
return parser
def assert_aws_config(args):
if (args.aws_access_key_id is not None or
args.aws_secret_access_key is not None):
# assert that if either is specified, both are specified
assert (args.aws_access_key_id is not None and
args.aws_secret_access_key is not None), 'Must specify both aws key and secret'
else:
assert 'AWS_ACCESS_KEY_ID' in os.environ, 'Missing AWS_ACCESS_KEY_ID config'
assert 'AWS_SECRET_ACCESS_KEY' in os.environ, 'Missing AWS_SECRET_ACCESS_KEY config'
def enqueue_process_main():
parser = enqueue_arg_parser()
args = parser.parse_args()
assert_aws_config(args)
queue = make_sqs_queue(
args.queue, args.aws_access_key_id, args.aws_secret_access_key)
expired_tiles = []
with open(args.expired_tiles_file) as f:
for line in f:
line = line.strip()
if not line:
continue
coord = parse_expired_coord_string(line)
if coord is None:
print 'Could not parse coordinate from line: ' % line
continue
expired_tiles.append(coord)
print 'Number of expired tiles: %d' % len(expired_tiles)
exploded_coords = explode_with_parents(expired_tiles)
print 'Number of total expired tiles with all parents: %d' % len(exploded_coords)
print 'Queuing ... '
# sort in any way?
# zoom level strategy?
# only enqueue work for zooms > 10 if in metro extract area?
# exploded_coords is a set, but enqueue_batch expects a list for slicing
exploded_coords = list(exploded_coords)
queue.enqueue_batch(list(exploded_coords))
print 'Queuing ... Done'
if __name__ == '__main__':
enqueue_process_main()
|
Add command to queue expired tiles
|
Add command to queue expired tiles
|
Python
|
mit
|
tilezen/tilequeue,mapzen/tilequeue
|
Add command to queue expired tiles
|
from tilequeue.queues import make_sqs_queue
from tilequeue.tile import explode_with_parents
from tilequeue.tile import parse_expired_coord_string
import argparse
import os
def add_aws_cred_options(arg_parser):
arg_parser.add_argument('--aws_access_key_id')
arg_parser.add_argument('--aws_secret_access_key')
return arg_parser
def enqueue_arg_parser():
parser = argparse.ArgumentParser()
parser = add_aws_cred_options(parser)
parser.add_argument('--queue',
required=True,
help='Name of aws sqs queue, should already exist.',
)
parser.add_argument('--expired-tiles-file',
required=True,
help='Path to file containing list of expired tiles. Should be one per line, <zoom>/<column>/<row>',
)
return parser
def assert_aws_config(args):
if (args.aws_access_key_id is not None or
args.aws_secret_access_key is not None):
# assert that if either is specified, both are specified
assert (args.aws_access_key_id is not None and
args.aws_secret_access_key is not None), 'Must specify both aws key and secret'
else:
assert 'AWS_ACCESS_KEY_ID' in os.environ, 'Missing AWS_ACCESS_KEY_ID config'
assert 'AWS_SECRET_ACCESS_KEY' in os.environ, 'Missing AWS_SECRET_ACCESS_KEY config'
def enqueue_process_main():
parser = enqueue_arg_parser()
args = parser.parse_args()
assert_aws_config(args)
queue = make_sqs_queue(
args.queue, args.aws_access_key_id, args.aws_secret_access_key)
expired_tiles = []
with open(args.expired_tiles_file) as f:
for line in f:
line = line.strip()
if not line:
continue
coord = parse_expired_coord_string(line)
if coord is None:
print 'Could not parse coordinate from line: ' % line
continue
expired_tiles.append(coord)
print 'Number of expired tiles: %d' % len(expired_tiles)
exploded_coords = explode_with_parents(expired_tiles)
print 'Number of total expired tiles with all parents: %d' % len(exploded_coords)
print 'Queuing ... '
# sort in any way?
# zoom level strategy?
# only enqueue work for zooms > 10 if in metro extract area?
# exploded_coords is a set, but enqueue_batch expects a list for slicing
exploded_coords = list(exploded_coords)
queue.enqueue_batch(list(exploded_coords))
print 'Queuing ... Done'
if __name__ == '__main__':
enqueue_process_main()
|
<commit_before><commit_msg>Add command to queue expired tiles<commit_after>
|
from tilequeue.queues import make_sqs_queue
from tilequeue.tile import explode_with_parents
from tilequeue.tile import parse_expired_coord_string
import argparse
import os
def add_aws_cred_options(arg_parser):
arg_parser.add_argument('--aws_access_key_id')
arg_parser.add_argument('--aws_secret_access_key')
return arg_parser
def enqueue_arg_parser():
parser = argparse.ArgumentParser()
parser = add_aws_cred_options(parser)
parser.add_argument('--queue',
required=True,
help='Name of aws sqs queue, should already exist.',
)
parser.add_argument('--expired-tiles-file',
required=True,
help='Path to file containing list of expired tiles. Should be one per line, <zoom>/<column>/<row>',
)
return parser
def assert_aws_config(args):
if (args.aws_access_key_id is not None or
args.aws_secret_access_key is not None):
# assert that if either is specified, both are specified
assert (args.aws_access_key_id is not None and
args.aws_secret_access_key is not None), 'Must specify both aws key and secret'
else:
assert 'AWS_ACCESS_KEY_ID' in os.environ, 'Missing AWS_ACCESS_KEY_ID config'
assert 'AWS_SECRET_ACCESS_KEY' in os.environ, 'Missing AWS_SECRET_ACCESS_KEY config'
def enqueue_process_main():
parser = enqueue_arg_parser()
args = parser.parse_args()
assert_aws_config(args)
queue = make_sqs_queue(
args.queue, args.aws_access_key_id, args.aws_secret_access_key)
expired_tiles = []
with open(args.expired_tiles_file) as f:
for line in f:
line = line.strip()
if not line:
continue
coord = parse_expired_coord_string(line)
if coord is None:
print 'Could not parse coordinate from line: ' % line
continue
expired_tiles.append(coord)
print 'Number of expired tiles: %d' % len(expired_tiles)
exploded_coords = explode_with_parents(expired_tiles)
print 'Number of total expired tiles with all parents: %d' % len(exploded_coords)
print 'Queuing ... '
# sort in any way?
# zoom level strategy?
# only enqueue work for zooms > 10 if in metro extract area?
# exploded_coords is a set, but enqueue_batch expects a list for slicing
exploded_coords = list(exploded_coords)
queue.enqueue_batch(list(exploded_coords))
print 'Queuing ... Done'
if __name__ == '__main__':
enqueue_process_main()
|
Add command to queue expired tilesfrom tilequeue.queues import make_sqs_queue
from tilequeue.tile import explode_with_parents
from tilequeue.tile import parse_expired_coord_string
import argparse
import os
def add_aws_cred_options(arg_parser):
arg_parser.add_argument('--aws_access_key_id')
arg_parser.add_argument('--aws_secret_access_key')
return arg_parser
def enqueue_arg_parser():
parser = argparse.ArgumentParser()
parser = add_aws_cred_options(parser)
parser.add_argument('--queue',
required=True,
help='Name of aws sqs queue, should already exist.',
)
parser.add_argument('--expired-tiles-file',
required=True,
help='Path to file containing list of expired tiles. Should be one per line, <zoom>/<column>/<row>',
)
return parser
def assert_aws_config(args):
if (args.aws_access_key_id is not None or
args.aws_secret_access_key is not None):
# assert that if either is specified, both are specified
assert (args.aws_access_key_id is not None and
args.aws_secret_access_key is not None), 'Must specify both aws key and secret'
else:
assert 'AWS_ACCESS_KEY_ID' in os.environ, 'Missing AWS_ACCESS_KEY_ID config'
assert 'AWS_SECRET_ACCESS_KEY' in os.environ, 'Missing AWS_SECRET_ACCESS_KEY config'
def enqueue_process_main():
parser = enqueue_arg_parser()
args = parser.parse_args()
assert_aws_config(args)
queue = make_sqs_queue(
args.queue, args.aws_access_key_id, args.aws_secret_access_key)
expired_tiles = []
with open(args.expired_tiles_file) as f:
for line in f:
line = line.strip()
if not line:
continue
coord = parse_expired_coord_string(line)
if coord is None:
print 'Could not parse coordinate from line: ' % line
continue
expired_tiles.append(coord)
print 'Number of expired tiles: %d' % len(expired_tiles)
exploded_coords = explode_with_parents(expired_tiles)
print 'Number of total expired tiles with all parents: %d' % len(exploded_coords)
print 'Queuing ... '
# sort in any way?
# zoom level strategy?
# only enqueue work for zooms > 10 if in metro extract area?
# exploded_coords is a set, but enqueue_batch expects a list for slicing
exploded_coords = list(exploded_coords)
queue.enqueue_batch(list(exploded_coords))
print 'Queuing ... Done'
if __name__ == '__main__':
enqueue_process_main()
|
<commit_before><commit_msg>Add command to queue expired tiles<commit_after>from tilequeue.queues import make_sqs_queue
from tilequeue.tile import explode_with_parents
from tilequeue.tile import parse_expired_coord_string
import argparse
import os
def add_aws_cred_options(arg_parser):
arg_parser.add_argument('--aws_access_key_id')
arg_parser.add_argument('--aws_secret_access_key')
return arg_parser
def enqueue_arg_parser():
parser = argparse.ArgumentParser()
parser = add_aws_cred_options(parser)
parser.add_argument('--queue',
required=True,
help='Name of aws sqs queue, should already exist.',
)
parser.add_argument('--expired-tiles-file',
required=True,
help='Path to file containing list of expired tiles. Should be one per line, <zoom>/<column>/<row>',
)
return parser
def assert_aws_config(args):
if (args.aws_access_key_id is not None or
args.aws_secret_access_key is not None):
# assert that if either is specified, both are specified
assert (args.aws_access_key_id is not None and
args.aws_secret_access_key is not None), 'Must specify both aws key and secret'
else:
assert 'AWS_ACCESS_KEY_ID' in os.environ, 'Missing AWS_ACCESS_KEY_ID config'
assert 'AWS_SECRET_ACCESS_KEY' in os.environ, 'Missing AWS_SECRET_ACCESS_KEY config'
def enqueue_process_main():
parser = enqueue_arg_parser()
args = parser.parse_args()
assert_aws_config(args)
queue = make_sqs_queue(
args.queue, args.aws_access_key_id, args.aws_secret_access_key)
expired_tiles = []
with open(args.expired_tiles_file) as f:
for line in f:
line = line.strip()
if not line:
continue
coord = parse_expired_coord_string(line)
if coord is None:
print 'Could not parse coordinate from line: ' % line
continue
expired_tiles.append(coord)
print 'Number of expired tiles: %d' % len(expired_tiles)
exploded_coords = explode_with_parents(expired_tiles)
print 'Number of total expired tiles with all parents: %d' % len(exploded_coords)
print 'Queuing ... '
# sort in any way?
# zoom level strategy?
# only enqueue work for zooms > 10 if in metro extract area?
# exploded_coords is a set, but enqueue_batch expects a list for slicing
exploded_coords = list(exploded_coords)
queue.enqueue_batch(list(exploded_coords))
print 'Queuing ... Done'
if __name__ == '__main__':
enqueue_process_main()
|
|
3e0903ba2f74d5f73241d1ffc5056f2a77c709e0
|
tests/test_exports.py
|
tests/test_exports.py
|
#!/usr/bin/env python
from django_prometheus.exports import SetupPrometheusEndpointOnPortRange
import unittest
class ExportTest(unittest.TestCase):
def testPortRange(self):
port_range = [8000, 8001]
SetupPrometheusEndpointOnPortRange(port_range)
SetupPrometheusEndpointOnPortRange(port_range)
if __name__ == 'main':
unittest.main()
|
Add a simple test for SetupPrometheusEndpointOnPortRange
|
Add a simple test for SetupPrometheusEndpointOnPortRange
|
Python
|
apache-2.0
|
obytes/django-prometheus,korfuri/django-prometheus,obytes/django-prometheus,korfuri/django-prometheus
|
Add a simple test for SetupPrometheusEndpointOnPortRange
|
#!/usr/bin/env python
from django_prometheus.exports import SetupPrometheusEndpointOnPortRange
import unittest
class ExportTest(unittest.TestCase):
def testPortRange(self):
port_range = [8000, 8001]
SetupPrometheusEndpointOnPortRange(port_range)
SetupPrometheusEndpointOnPortRange(port_range)
if __name__ == 'main':
unittest.main()
|
<commit_before><commit_msg>Add a simple test for SetupPrometheusEndpointOnPortRange<commit_after>
|
#!/usr/bin/env python
from django_prometheus.exports import SetupPrometheusEndpointOnPortRange
import unittest
class ExportTest(unittest.TestCase):
def testPortRange(self):
port_range = [8000, 8001]
SetupPrometheusEndpointOnPortRange(port_range)
SetupPrometheusEndpointOnPortRange(port_range)
if __name__ == 'main':
unittest.main()
|
Add a simple test for SetupPrometheusEndpointOnPortRange#!/usr/bin/env python
from django_prometheus.exports import SetupPrometheusEndpointOnPortRange
import unittest
class ExportTest(unittest.TestCase):
def testPortRange(self):
port_range = [8000, 8001]
SetupPrometheusEndpointOnPortRange(port_range)
SetupPrometheusEndpointOnPortRange(port_range)
if __name__ == 'main':
unittest.main()
|
<commit_before><commit_msg>Add a simple test for SetupPrometheusEndpointOnPortRange<commit_after>#!/usr/bin/env python
from django_prometheus.exports import SetupPrometheusEndpointOnPortRange
import unittest
class ExportTest(unittest.TestCase):
def testPortRange(self):
port_range = [8000, 8001]
SetupPrometheusEndpointOnPortRange(port_range)
SetupPrometheusEndpointOnPortRange(port_range)
if __name__ == 'main':
unittest.main()
|
|
7039e4f25d8eecdf2d5d2b4a4a769e05c5075222
|
bluebottle/members/migrations/0020_auto_20171031_1048.py
|
bluebottle/members/migrations/0020_auto_20171031_1048.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-31 09:48
from __future__ import unicode_literals
from django.db import migrations
def rename_full_member_permission(apps, schema_editor):
Permission = apps.get_model('auth', 'Permission')
perm = Permission.objects.get(codename='api_read_full_member')
perm.name = 'Can view full members through the API'
perm.save()
class Migration(migrations.Migration):
dependencies = [
('members', '0019_auto_20170824_1812'),
]
operations = [
migrations.RunPython(rename_full_member_permission)
]
|
Fix description of 'api_read_full_member' permission
|
Fix description of 'api_read_full_member' permission
BB-11023 #resolve
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Fix description of 'api_read_full_member' permission
BB-11023 #resolve
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-31 09:48
from __future__ import unicode_literals
from django.db import migrations
def rename_full_member_permission(apps, schema_editor):
Permission = apps.get_model('auth', 'Permission')
perm = Permission.objects.get(codename='api_read_full_member')
perm.name = 'Can view full members through the API'
perm.save()
class Migration(migrations.Migration):
dependencies = [
('members', '0019_auto_20170824_1812'),
]
operations = [
migrations.RunPython(rename_full_member_permission)
]
|
<commit_before><commit_msg>Fix description of 'api_read_full_member' permission
BB-11023 #resolve<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-31 09:48
from __future__ import unicode_literals
from django.db import migrations
def rename_full_member_permission(apps, schema_editor):
Permission = apps.get_model('auth', 'Permission')
perm = Permission.objects.get(codename='api_read_full_member')
perm.name = 'Can view full members through the API'
perm.save()
class Migration(migrations.Migration):
dependencies = [
('members', '0019_auto_20170824_1812'),
]
operations = [
migrations.RunPython(rename_full_member_permission)
]
|
Fix description of 'api_read_full_member' permission
BB-11023 #resolve# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-31 09:48
from __future__ import unicode_literals
from django.db import migrations
def rename_full_member_permission(apps, schema_editor):
Permission = apps.get_model('auth', 'Permission')
perm = Permission.objects.get(codename='api_read_full_member')
perm.name = 'Can view full members through the API'
perm.save()
class Migration(migrations.Migration):
dependencies = [
('members', '0019_auto_20170824_1812'),
]
operations = [
migrations.RunPython(rename_full_member_permission)
]
|
<commit_before><commit_msg>Fix description of 'api_read_full_member' permission
BB-11023 #resolve<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-31 09:48
from __future__ import unicode_literals
from django.db import migrations
def rename_full_member_permission(apps, schema_editor):
Permission = apps.get_model('auth', 'Permission')
perm = Permission.objects.get(codename='api_read_full_member')
perm.name = 'Can view full members through the API'
perm.save()
class Migration(migrations.Migration):
dependencies = [
('members', '0019_auto_20170824_1812'),
]
operations = [
migrations.RunPython(rename_full_member_permission)
]
|
|
30be74075e761f932a10ea0806a08991b8fd9cb4
|
code/python/find-nodes-without-external-id.py
|
code/python/find-nodes-without-external-id.py
|
#!/usr/bin/env python
import httplib
import urllib
import json
import ssl
import argparse
import re
parser = argparse.ArgumentParser(description='Find any node that does not have an external ID set.')
parser.add_argument('--target-url', required=True, help='URL for the UpGuard instance. This should be the hostname only (appliance.upguard.org instead of https://appliance.upguard.org)')
parser.add_argument('--api-key', required=True, help='API key for the UpGuard instance')
parser.add_argument('--secret-key', required=True, help='Secret key for the UpGuard instance')
parser.add_argument('--insecure', action='store_true', help='Ignore SSL certificate check?')
parser.add_argument('--per-page', type=int, default=10, help='Number of nodes to retrieve in each call. (Default: 100)')
args = parser.parse_args()
# Initializations
browser = None
def getNodes(browser, method, endpoint, page=1, per_page=100):
"""
Return a JSON-parsed dictionary of nodes
"""
get_headers = {
"Authorization": "Token token=\"{}{}\"".format(args.api_key, args.secret_key),
"Accept": "application/json"}
browser.request("GET", "{}?page={}&per_page={}".format(endpoint, page, per_page), '', get_headers)
response = browser.getresponse()
if response.status >= 400:
raise httplib.HTTPException("{}: {}".format(str(response.status), str(response.reason)))
return json.loads(response.read())
try:
# Setup browser object
url = args.target_url
if 'http' in url:
# URL needs to be a hostname, so remove 'https://'
url = re.sub('https?:\/\/', '', url)
browser = httplib.HTTPConnection(url)
if args.insecure:
context = ssl._create_unverified_context()
browser = httplib.HTTPSConnection(url, context=context)
page = 1
nodes = getNodes(browser, "GET", "/api/v2/nodes.json", page=page, per_page=args.per_page)
print "Searching for nodes without an external ID..."
while nodes:
for node in nodes:
if not node['external_id']:
print "{} (hostname: {})".format(node['name'])
page += 1
nodes = getNodes(browser, "GET", "/api/v2/nodes.json", page=page, per_page=args.per_page)
except httplib.HTTPException as h:
print h.message;
finally:
if browser:
browser.close()
|
Add script to list nodes without an external ID
|
Add script to list nodes without an external ID
|
Python
|
mit
|
ScriptRock/content,ScriptRock/content,ScriptRock/content,ScriptRock/content,ScriptRock/content,ScriptRock/content
|
Add script to list nodes without an external ID
|
#!/usr/bin/env python
import httplib
import urllib
import json
import ssl
import argparse
import re
parser = argparse.ArgumentParser(description='Find any node that does not have an external ID set.')
parser.add_argument('--target-url', required=True, help='URL for the UpGuard instance. This should be the hostname only (appliance.upguard.org instead of https://appliance.upguard.org)')
parser.add_argument('--api-key', required=True, help='API key for the UpGuard instance')
parser.add_argument('--secret-key', required=True, help='Secret key for the UpGuard instance')
parser.add_argument('--insecure', action='store_true', help='Ignore SSL certificate check?')
parser.add_argument('--per-page', type=int, default=10, help='Number of nodes to retrieve in each call. (Default: 100)')
args = parser.parse_args()
# Initializations
browser = None
def getNodes(browser, method, endpoint, page=1, per_page=100):
"""
Return a JSON-parsed dictionary of nodes
"""
get_headers = {
"Authorization": "Token token=\"{}{}\"".format(args.api_key, args.secret_key),
"Accept": "application/json"}
browser.request("GET", "{}?page={}&per_page={}".format(endpoint, page, per_page), '', get_headers)
response = browser.getresponse()
if response.status >= 400:
raise httplib.HTTPException("{}: {}".format(str(response.status), str(response.reason)))
return json.loads(response.read())
try:
# Setup browser object
url = args.target_url
if 'http' in url:
# URL needs to be a hostname, so remove 'https://'
url = re.sub('https?:\/\/', '', url)
browser = httplib.HTTPConnection(url)
if args.insecure:
context = ssl._create_unverified_context()
browser = httplib.HTTPSConnection(url, context=context)
page = 1
nodes = getNodes(browser, "GET", "/api/v2/nodes.json", page=page, per_page=args.per_page)
print "Searching for nodes without an external ID..."
while nodes:
for node in nodes:
if not node['external_id']:
print "{} (hostname: {})".format(node['name'])
page += 1
nodes = getNodes(browser, "GET", "/api/v2/nodes.json", page=page, per_page=args.per_page)
except httplib.HTTPException as h:
print h.message;
finally:
if browser:
browser.close()
|
<commit_before><commit_msg>Add script to list nodes without an external ID<commit_after>
|
#!/usr/bin/env python
import httplib
import urllib
import json
import ssl
import argparse
import re
parser = argparse.ArgumentParser(description='Find any node that does not have an external ID set.')
parser.add_argument('--target-url', required=True, help='URL for the UpGuard instance. This should be the hostname only (appliance.upguard.org instead of https://appliance.upguard.org)')
parser.add_argument('--api-key', required=True, help='API key for the UpGuard instance')
parser.add_argument('--secret-key', required=True, help='Secret key for the UpGuard instance')
parser.add_argument('--insecure', action='store_true', help='Ignore SSL certificate check?')
parser.add_argument('--per-page', type=int, default=10, help='Number of nodes to retrieve in each call. (Default: 100)')
args = parser.parse_args()
# Initializations
browser = None
def getNodes(browser, method, endpoint, page=1, per_page=100):
"""
Return a JSON-parsed dictionary of nodes
"""
get_headers = {
"Authorization": "Token token=\"{}{}\"".format(args.api_key, args.secret_key),
"Accept": "application/json"}
browser.request("GET", "{}?page={}&per_page={}".format(endpoint, page, per_page), '', get_headers)
response = browser.getresponse()
if response.status >= 400:
raise httplib.HTTPException("{}: {}".format(str(response.status), str(response.reason)))
return json.loads(response.read())
try:
# Setup browser object
url = args.target_url
if 'http' in url:
# URL needs to be a hostname, so remove 'https://'
url = re.sub('https?:\/\/', '', url)
browser = httplib.HTTPConnection(url)
if args.insecure:
context = ssl._create_unverified_context()
browser = httplib.HTTPSConnection(url, context=context)
page = 1
nodes = getNodes(browser, "GET", "/api/v2/nodes.json", page=page, per_page=args.per_page)
print "Searching for nodes without an external ID..."
while nodes:
for node in nodes:
if not node['external_id']:
print "{} (hostname: {})".format(node['name'])
page += 1
nodes = getNodes(browser, "GET", "/api/v2/nodes.json", page=page, per_page=args.per_page)
except httplib.HTTPException as h:
print h.message;
finally:
if browser:
browser.close()
|
Add script to list nodes without an external ID#!/usr/bin/env python
import httplib
import urllib
import json
import ssl
import argparse
import re
parser = argparse.ArgumentParser(description='Find any node that does not have an external ID set.')
parser.add_argument('--target-url', required=True, help='URL for the UpGuard instance. This should be the hostname only (appliance.upguard.org instead of https://appliance.upguard.org)')
parser.add_argument('--api-key', required=True, help='API key for the UpGuard instance')
parser.add_argument('--secret-key', required=True, help='Secret key for the UpGuard instance')
parser.add_argument('--insecure', action='store_true', help='Ignore SSL certificate check?')
parser.add_argument('--per-page', type=int, default=10, help='Number of nodes to retrieve in each call. (Default: 100)')
args = parser.parse_args()
# Initializations
browser = None
def getNodes(browser, method, endpoint, page=1, per_page=100):
"""
Return a JSON-parsed dictionary of nodes
"""
get_headers = {
"Authorization": "Token token=\"{}{}\"".format(args.api_key, args.secret_key),
"Accept": "application/json"}
browser.request("GET", "{}?page={}&per_page={}".format(endpoint, page, per_page), '', get_headers)
response = browser.getresponse()
if response.status >= 400:
raise httplib.HTTPException("{}: {}".format(str(response.status), str(response.reason)))
return json.loads(response.read())
try:
# Setup browser object
url = args.target_url
if 'http' in url:
# URL needs to be a hostname, so remove 'https://'
url = re.sub('https?:\/\/', '', url)
browser = httplib.HTTPConnection(url)
if args.insecure:
context = ssl._create_unverified_context()
browser = httplib.HTTPSConnection(url, context=context)
page = 1
nodes = getNodes(browser, "GET", "/api/v2/nodes.json", page=page, per_page=args.per_page)
print "Searching for nodes without an external ID..."
while nodes:
for node in nodes:
if not node['external_id']:
print "{} (hostname: {})".format(node['name'])
page += 1
nodes = getNodes(browser, "GET", "/api/v2/nodes.json", page=page, per_page=args.per_page)
except httplib.HTTPException as h:
print h.message;
finally:
if browser:
browser.close()
|
<commit_before><commit_msg>Add script to list nodes without an external ID<commit_after>#!/usr/bin/env python
import httplib
import urllib
import json
import ssl
import argparse
import re
parser = argparse.ArgumentParser(description='Find any node that does not have an external ID set.')
parser.add_argument('--target-url', required=True, help='URL for the UpGuard instance. This should be the hostname only (appliance.upguard.org instead of https://appliance.upguard.org)')
parser.add_argument('--api-key', required=True, help='API key for the UpGuard instance')
parser.add_argument('--secret-key', required=True, help='Secret key for the UpGuard instance')
parser.add_argument('--insecure', action='store_true', help='Ignore SSL certificate check?')
parser.add_argument('--per-page', type=int, default=10, help='Number of nodes to retrieve in each call. (Default: 100)')
args = parser.parse_args()
# Initializations
browser = None
def getNodes(browser, method, endpoint, page=1, per_page=100):
"""
Return a JSON-parsed dictionary of nodes
"""
get_headers = {
"Authorization": "Token token=\"{}{}\"".format(args.api_key, args.secret_key),
"Accept": "application/json"}
browser.request("GET", "{}?page={}&per_page={}".format(endpoint, page, per_page), '', get_headers)
response = browser.getresponse()
if response.status >= 400:
raise httplib.HTTPException("{}: {}".format(str(response.status), str(response.reason)))
return json.loads(response.read())
try:
# Setup browser object
url = args.target_url
if 'http' in url:
# URL needs to be a hostname, so remove 'https://'
url = re.sub('https?:\/\/', '', url)
browser = httplib.HTTPConnection(url)
if args.insecure:
context = ssl._create_unverified_context()
browser = httplib.HTTPSConnection(url, context=context)
page = 1
nodes = getNodes(browser, "GET", "/api/v2/nodes.json", page=page, per_page=args.per_page)
print "Searching for nodes without an external ID..."
while nodes:
for node in nodes:
if not node['external_id']:
print "{} (hostname: {})".format(node['name'])
page += 1
nodes = getNodes(browser, "GET", "/api/v2/nodes.json", page=page, per_page=args.per_page)
except httplib.HTTPException as h:
print h.message;
finally:
if browser:
browser.close()
|
|
14aba0695514866439164f48fe1f66390719431f
|
scripts/select_gamma.py
|
scripts/select_gamma.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 10:13:48 2013
@author: amnon
### 80 char max please
Look at all the gammaproteobacteria and select candidate contamination sequence
OTUs
output: a list of sorted gammaproteobacteria (or other) otuids, according to
mean frequency
"""
import sys
import argparse
import numpy as np
# to load a BIOM table
from biom.parse import parse_biom_table
from biom.util import biom_open
def TestAll(biomfile, outputfile, taxonomyclass, taxonomyname,level):
"""doc string here, a one liner
...and then more detail
"""
odat=[]
t = parse_biom_table(biom_open(biomfile,'U'))
t2 = t.normObservationBySample()
# to iterate over the table by observation, doing something based on the
# taxonomy:
class_idx = taxonomyclass
for values, ids, metadata in t2.iterObservations():
tname=metadata['taxonomy'][class_idx].lstrip()
if tname == taxonomyname:
mv = np.mean(values)
odat.append((ids,mv))
# odat.sort(key=lambda tup: tup[1], reverse=True)
odat.sort(key=lambda tup: tup[1])
csum=[(odat[0][0],odat[0][1],odat[0][1])]
for cval in odat[1:]:
csum.append((cval[0],cval[1],csum[-1][2]+cval[1]))
# no get it from big to small
csum.reverse()
# and write everything above the threshold (to filter)
snames=open(outputfile,'w')
for cval in csum:
if cval[2]>=level:
snames.write(cval[0]+"\t"+str(cval[1])+"\t"+str(cval[2])+'\n')
snames.close()
def main(argv):
parser=argparse.ArgumentParser(description='Select Gammaproteobacteria (or other group) contamination candidates')
parser.add_argument('-i','--biom',help='biom file of the experiment')
parser.add_argument('-o','--output',help='output file name')
parser.add_argument('-c','--classpos',help='class of taxonomy name (0-kingdom,1-phylum etc.',default=2)
parser.add_argument('-t','--taxonomy',help='taxonomy name (including c__ or equivalent)',default='c__Gammaproteobacteria')
parser.add_argument('-l','--level',help='minimal cumulative level for OTUs to filter (use 0 to get all of them)',default='0.03')
args=parser.parse_args(argv)
TestAll(args.biom,args.output,int(args.classpos),args.taxonomy,float(args.level))
if __name__ == "__main__":
main(sys.argv[1:])
|
Add selcet_gamma.py (authored by Amnon)
|
Add selcet_gamma.py (authored by Amnon)
Used in the filtering notebook.
|
Python
|
bsd-3-clause
|
EmbrietteH/American-Gut,wasade/American-Gut,JWDebelius/American-Gut,mortonjt/American-Gut,wasade/American-Gut,biocore/American-Gut,EmbrietteH/American-Gut,biocore/American-Gut,JWDebelius/American-Gut
|
Add selcet_gamma.py (authored by Amnon)
Used in the filtering notebook.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 10:13:48 2013
@author: amnon
### 80 char max please
Look at all the gammaproteobacteria and select candidate contamination sequence
OTUs
output: a list of sorted gammaproteobacteria (or other) otuids, according to
mean frequency
"""
import sys
import argparse
import numpy as np
# to load a BIOM table
from biom.parse import parse_biom_table
from biom.util import biom_open
def TestAll(biomfile, outputfile, taxonomyclass, taxonomyname,level):
"""doc string here, a one liner
...and then more detail
"""
odat=[]
t = parse_biom_table(biom_open(biomfile,'U'))
t2 = t.normObservationBySample()
# to iterate over the table by observation, doing something based on the
# taxonomy:
class_idx = taxonomyclass
for values, ids, metadata in t2.iterObservations():
tname=metadata['taxonomy'][class_idx].lstrip()
if tname == taxonomyname:
mv = np.mean(values)
odat.append((ids,mv))
# odat.sort(key=lambda tup: tup[1], reverse=True)
odat.sort(key=lambda tup: tup[1])
csum=[(odat[0][0],odat[0][1],odat[0][1])]
for cval in odat[1:]:
csum.append((cval[0],cval[1],csum[-1][2]+cval[1]))
# no get it from big to small
csum.reverse()
# and write everything above the threshold (to filter)
snames=open(outputfile,'w')
for cval in csum:
if cval[2]>=level:
snames.write(cval[0]+"\t"+str(cval[1])+"\t"+str(cval[2])+'\n')
snames.close()
def main(argv):
parser=argparse.ArgumentParser(description='Select Gammaproteobacteria (or other group) contamination candidates')
parser.add_argument('-i','--biom',help='biom file of the experiment')
parser.add_argument('-o','--output',help='output file name')
parser.add_argument('-c','--classpos',help='class of taxonomy name (0-kingdom,1-phylum etc.',default=2)
parser.add_argument('-t','--taxonomy',help='taxonomy name (including c__ or equivalent)',default='c__Gammaproteobacteria')
parser.add_argument('-l','--level',help='minimal cumulative level for OTUs to filter (use 0 to get all of them)',default='0.03')
args=parser.parse_args(argv)
TestAll(args.biom,args.output,int(args.classpos),args.taxonomy,float(args.level))
if __name__ == "__main__":
main(sys.argv[1:])
|
<commit_before><commit_msg>Add selcet_gamma.py (authored by Amnon)
Used in the filtering notebook.<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 10:13:48 2013
@author: amnon
### 80 char max please
Look at all the gammaproteobacteria and select candidate contamination sequence
OTUs
output: a list of sorted gammaproteobacteria (or other) otuids, according to
mean frequency
"""
import sys
import argparse
import numpy as np
# to load a BIOM table
from biom.parse import parse_biom_table
from biom.util import biom_open
def TestAll(biomfile, outputfile, taxonomyclass, taxonomyname,level):
"""doc string here, a one liner
...and then more detail
"""
odat=[]
t = parse_biom_table(biom_open(biomfile,'U'))
t2 = t.normObservationBySample()
# to iterate over the table by observation, doing something based on the
# taxonomy:
class_idx = taxonomyclass
for values, ids, metadata in t2.iterObservations():
tname=metadata['taxonomy'][class_idx].lstrip()
if tname == taxonomyname:
mv = np.mean(values)
odat.append((ids,mv))
# odat.sort(key=lambda tup: tup[1], reverse=True)
odat.sort(key=lambda tup: tup[1])
csum=[(odat[0][0],odat[0][1],odat[0][1])]
for cval in odat[1:]:
csum.append((cval[0],cval[1],csum[-1][2]+cval[1]))
# no get it from big to small
csum.reverse()
# and write everything above the threshold (to filter)
snames=open(outputfile,'w')
for cval in csum:
if cval[2]>=level:
snames.write(cval[0]+"\t"+str(cval[1])+"\t"+str(cval[2])+'\n')
snames.close()
def main(argv):
parser=argparse.ArgumentParser(description='Select Gammaproteobacteria (or other group) contamination candidates')
parser.add_argument('-i','--biom',help='biom file of the experiment')
parser.add_argument('-o','--output',help='output file name')
parser.add_argument('-c','--classpos',help='class of taxonomy name (0-kingdom,1-phylum etc.',default=2)
parser.add_argument('-t','--taxonomy',help='taxonomy name (including c__ or equivalent)',default='c__Gammaproteobacteria')
parser.add_argument('-l','--level',help='minimal cumulative level for OTUs to filter (use 0 to get all of them)',default='0.03')
args=parser.parse_args(argv)
TestAll(args.biom,args.output,int(args.classpos),args.taxonomy,float(args.level))
if __name__ == "__main__":
main(sys.argv[1:])
|
Add selcet_gamma.py (authored by Amnon)
Used in the filtering notebook.#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 10:13:48 2013
@author: amnon
### 80 char max please
Look at all the gammaproteobacteria and select candidate contamination sequence
OTUs
output: a list of sorted gammaproteobacteria (or other) otuids, according to
mean frequency
"""
import sys
import argparse
import numpy as np
# to load a BIOM table
from biom.parse import parse_biom_table
from biom.util import biom_open
def TestAll(biomfile, outputfile, taxonomyclass, taxonomyname,level):
"""doc string here, a one liner
...and then more detail
"""
odat=[]
t = parse_biom_table(biom_open(biomfile,'U'))
t2 = t.normObservationBySample()
# to iterate over the table by observation, doing something based on the
# taxonomy:
class_idx = taxonomyclass
for values, ids, metadata in t2.iterObservations():
tname=metadata['taxonomy'][class_idx].lstrip()
if tname == taxonomyname:
mv = np.mean(values)
odat.append((ids,mv))
# odat.sort(key=lambda tup: tup[1], reverse=True)
odat.sort(key=lambda tup: tup[1])
csum=[(odat[0][0],odat[0][1],odat[0][1])]
for cval in odat[1:]:
csum.append((cval[0],cval[1],csum[-1][2]+cval[1]))
# no get it from big to small
csum.reverse()
# and write everything above the threshold (to filter)
snames=open(outputfile,'w')
for cval in csum:
if cval[2]>=level:
snames.write(cval[0]+"\t"+str(cval[1])+"\t"+str(cval[2])+'\n')
snames.close()
def main(argv):
parser=argparse.ArgumentParser(description='Select Gammaproteobacteria (or other group) contamination candidates')
parser.add_argument('-i','--biom',help='biom file of the experiment')
parser.add_argument('-o','--output',help='output file name')
parser.add_argument('-c','--classpos',help='class of taxonomy name (0-kingdom,1-phylum etc.',default=2)
parser.add_argument('-t','--taxonomy',help='taxonomy name (including c__ or equivalent)',default='c__Gammaproteobacteria')
parser.add_argument('-l','--level',help='minimal cumulative level for OTUs to filter (use 0 to get all of them)',default='0.03')
args=parser.parse_args(argv)
TestAll(args.biom,args.output,int(args.classpos),args.taxonomy,float(args.level))
if __name__ == "__main__":
main(sys.argv[1:])
|
<commit_before><commit_msg>Add selcet_gamma.py (authored by Amnon)
Used in the filtering notebook.<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 10:13:48 2013
@author: amnon
### 80 char max please
Look at all the gammaproteobacteria and select candidate contamination sequence
OTUs
output: a list of sorted gammaproteobacteria (or other) otuids, according to
mean frequency
"""
import sys
import argparse
import numpy as np
# to load a BIOM table
from biom.parse import parse_biom_table
from biom.util import biom_open
def TestAll(biomfile, outputfile, taxonomyclass, taxonomyname,level):
"""doc string here, a one liner
...and then more detail
"""
odat=[]
t = parse_biom_table(biom_open(biomfile,'U'))
t2 = t.normObservationBySample()
# to iterate over the table by observation, doing something based on the
# taxonomy:
class_idx = taxonomyclass
for values, ids, metadata in t2.iterObservations():
tname=metadata['taxonomy'][class_idx].lstrip()
if tname == taxonomyname:
mv = np.mean(values)
odat.append((ids,mv))
# odat.sort(key=lambda tup: tup[1], reverse=True)
odat.sort(key=lambda tup: tup[1])
csum=[(odat[0][0],odat[0][1],odat[0][1])]
for cval in odat[1:]:
csum.append((cval[0],cval[1],csum[-1][2]+cval[1]))
# no get it from big to small
csum.reverse()
# and write everything above the threshold (to filter)
snames=open(outputfile,'w')
for cval in csum:
if cval[2]>=level:
snames.write(cval[0]+"\t"+str(cval[1])+"\t"+str(cval[2])+'\n')
snames.close()
def main(argv):
parser=argparse.ArgumentParser(description='Select Gammaproteobacteria (or other group) contamination candidates')
parser.add_argument('-i','--biom',help='biom file of the experiment')
parser.add_argument('-o','--output',help='output file name')
parser.add_argument('-c','--classpos',help='class of taxonomy name (0-kingdom,1-phylum etc.',default=2)
parser.add_argument('-t','--taxonomy',help='taxonomy name (including c__ or equivalent)',default='c__Gammaproteobacteria')
parser.add_argument('-l','--level',help='minimal cumulative level for OTUs to filter (use 0 to get all of them)',default='0.03')
args=parser.parse_args(argv)
TestAll(args.biom,args.output,int(args.classpos),args.taxonomy,float(args.level))
if __name__ == "__main__":
main(sys.argv[1:])
|
|
6119f7998d918d3b38f129b7afd720f9a35e35c1
|
audio-metadata.py
|
audio-metadata.py
|
#! /usr/bin/env python
import os
import sys
import re
import tempfile
def getVideoDetails(filepath):
tmpf = tempfile.NamedTemporaryFile()
os.system("ffmpeg -i \"%s\" 2> %s" % (filepath, tmpf.name))
lines = tmpf.readlines()
tmpf.close()
metadata = {}
for l in lines:
l = l.strip()
if l.startswith('Duration'):
metadata['duration'] = re.search('Duration: (.*?),', l).group(0).split(':',1)[1].strip(' ,')
metadata['bitrate'] = re.search("bitrate: (\d+ kb/s)", l).group(0).split(':')[1].strip()
if l.startswith('Stream #0:0'):
metadata['video'] = {}
metadata['video']['codec'], metadata['video']['profile'] = \
[e.strip(' ,()') for e in re.search('Video: (.*? \(.*?\)),? ', l).group(0).split(':')[1].split('(')]
metadata['video']['resolution'] = re.search('([1-9]\d+x\d+)', l).group(1)
metadata['video']['bitrate'] = re.search('(\d+ kb/s)', l).group(1)
metadata['video']['fps'] = re.search('(\d+ fps)', l).group(1)
if l.startswith('Stream #0:1'):
metadata['audio'] = {}
metadata['audio']['codec'] = re.search('Audio: (.*?) ', l).group(1)
metadata['audio']['frequency'] = re.search(', (.*? Hz),', l).group(1)
metadata['audio']['bitrate'] = re.search(', (\d+ kb/s)', l).group(1)
return metadata
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: ./getVideoDetails.py <filepath(absolute or relative)>")
sys.exit("Syntax Error")
print( getVideoDetails(sys.argv[1]) )
|
Add script for fetching metadata from audio file
|
Add script for fetching metadata from audio file
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add script for fetching metadata from audio file
|
#! /usr/bin/env python
import os
import sys
import re
import tempfile
def getVideoDetails(filepath):
tmpf = tempfile.NamedTemporaryFile()
os.system("ffmpeg -i \"%s\" 2> %s" % (filepath, tmpf.name))
lines = tmpf.readlines()
tmpf.close()
metadata = {}
for l in lines:
l = l.strip()
if l.startswith('Duration'):
metadata['duration'] = re.search('Duration: (.*?),', l).group(0).split(':',1)[1].strip(' ,')
metadata['bitrate'] = re.search("bitrate: (\d+ kb/s)", l).group(0).split(':')[1].strip()
if l.startswith('Stream #0:0'):
metadata['video'] = {}
metadata['video']['codec'], metadata['video']['profile'] = \
[e.strip(' ,()') for e in re.search('Video: (.*? \(.*?\)),? ', l).group(0).split(':')[1].split('(')]
metadata['video']['resolution'] = re.search('([1-9]\d+x\d+)', l).group(1)
metadata['video']['bitrate'] = re.search('(\d+ kb/s)', l).group(1)
metadata['video']['fps'] = re.search('(\d+ fps)', l).group(1)
if l.startswith('Stream #0:1'):
metadata['audio'] = {}
metadata['audio']['codec'] = re.search('Audio: (.*?) ', l).group(1)
metadata['audio']['frequency'] = re.search(', (.*? Hz),', l).group(1)
metadata['audio']['bitrate'] = re.search(', (\d+ kb/s)', l).group(1)
return metadata
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: ./getVideoDetails.py <filepath(absolute or relative)>")
sys.exit("Syntax Error")
print( getVideoDetails(sys.argv[1]) )
|
<commit_before><commit_msg>Add script for fetching metadata from audio file<commit_after>
|
#! /usr/bin/env python
import os
import sys
import re
import tempfile
def getVideoDetails(filepath):
tmpf = tempfile.NamedTemporaryFile()
os.system("ffmpeg -i \"%s\" 2> %s" % (filepath, tmpf.name))
lines = tmpf.readlines()
tmpf.close()
metadata = {}
for l in lines:
l = l.strip()
if l.startswith('Duration'):
metadata['duration'] = re.search('Duration: (.*?),', l).group(0).split(':',1)[1].strip(' ,')
metadata['bitrate'] = re.search("bitrate: (\d+ kb/s)", l).group(0).split(':')[1].strip()
if l.startswith('Stream #0:0'):
metadata['video'] = {}
metadata['video']['codec'], metadata['video']['profile'] = \
[e.strip(' ,()') for e in re.search('Video: (.*? \(.*?\)),? ', l).group(0).split(':')[1].split('(')]
metadata['video']['resolution'] = re.search('([1-9]\d+x\d+)', l).group(1)
metadata['video']['bitrate'] = re.search('(\d+ kb/s)', l).group(1)
metadata['video']['fps'] = re.search('(\d+ fps)', l).group(1)
if l.startswith('Stream #0:1'):
metadata['audio'] = {}
metadata['audio']['codec'] = re.search('Audio: (.*?) ', l).group(1)
metadata['audio']['frequency'] = re.search(', (.*? Hz),', l).group(1)
metadata['audio']['bitrate'] = re.search(', (\d+ kb/s)', l).group(1)
return metadata
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: ./getVideoDetails.py <filepath(absolute or relative)>")
sys.exit("Syntax Error")
print( getVideoDetails(sys.argv[1]) )
|
Add script for fetching metadata from audio file#! /usr/bin/env python
import os
import sys
import re
import tempfile
def getVideoDetails(filepath):
tmpf = tempfile.NamedTemporaryFile()
os.system("ffmpeg -i \"%s\" 2> %s" % (filepath, tmpf.name))
lines = tmpf.readlines()
tmpf.close()
metadata = {}
for l in lines:
l = l.strip()
if l.startswith('Duration'):
metadata['duration'] = re.search('Duration: (.*?),', l).group(0).split(':',1)[1].strip(' ,')
metadata['bitrate'] = re.search("bitrate: (\d+ kb/s)", l).group(0).split(':')[1].strip()
if l.startswith('Stream #0:0'):
metadata['video'] = {}
metadata['video']['codec'], metadata['video']['profile'] = \
[e.strip(' ,()') for e in re.search('Video: (.*? \(.*?\)),? ', l).group(0).split(':')[1].split('(')]
metadata['video']['resolution'] = re.search('([1-9]\d+x\d+)', l).group(1)
metadata['video']['bitrate'] = re.search('(\d+ kb/s)', l).group(1)
metadata['video']['fps'] = re.search('(\d+ fps)', l).group(1)
if l.startswith('Stream #0:1'):
metadata['audio'] = {}
metadata['audio']['codec'] = re.search('Audio: (.*?) ', l).group(1)
metadata['audio']['frequency'] = re.search(', (.*? Hz),', l).group(1)
metadata['audio']['bitrate'] = re.search(', (\d+ kb/s)', l).group(1)
return metadata
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: ./getVideoDetails.py <filepath(absolute or relative)>")
sys.exit("Syntax Error")
print( getVideoDetails(sys.argv[1]) )
|
<commit_before><commit_msg>Add script for fetching metadata from audio file<commit_after>#! /usr/bin/env python
import os
import sys
import re
import tempfile
def getVideoDetails(filepath):
tmpf = tempfile.NamedTemporaryFile()
os.system("ffmpeg -i \"%s\" 2> %s" % (filepath, tmpf.name))
lines = tmpf.readlines()
tmpf.close()
metadata = {}
for l in lines:
l = l.strip()
if l.startswith('Duration'):
metadata['duration'] = re.search('Duration: (.*?),', l).group(0).split(':',1)[1].strip(' ,')
metadata['bitrate'] = re.search("bitrate: (\d+ kb/s)", l).group(0).split(':')[1].strip()
if l.startswith('Stream #0:0'):
metadata['video'] = {}
metadata['video']['codec'], metadata['video']['profile'] = \
[e.strip(' ,()') for e in re.search('Video: (.*? \(.*?\)),? ', l).group(0).split(':')[1].split('(')]
metadata['video']['resolution'] = re.search('([1-9]\d+x\d+)', l).group(1)
metadata['video']['bitrate'] = re.search('(\d+ kb/s)', l).group(1)
metadata['video']['fps'] = re.search('(\d+ fps)', l).group(1)
if l.startswith('Stream #0:1'):
metadata['audio'] = {}
metadata['audio']['codec'] = re.search('Audio: (.*?) ', l).group(1)
metadata['audio']['frequency'] = re.search(', (.*? Hz),', l).group(1)
metadata['audio']['bitrate'] = re.search(', (\d+ kb/s)', l).group(1)
return metadata
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: ./getVideoDetails.py <filepath(absolute or relative)>")
sys.exit("Syntax Error")
print( getVideoDetails(sys.argv[1]) )
|
|
98398398f590c3a98733193fc0ea45a1948edd0e
|
examples/recurrent-text.py
|
examples/recurrent-text.py
|
#!/usr/bin/env python
import climate
import matplotlib.pyplot as plt
import numpy as np
import theanets
import utils
climate.enable_default_logging()
COLORS = ['#d62728', '#1f77b4', '#2ca02c', '#9467bd', '#ff7f0e',
'#e377c2', '#8c564b', '#bcbd22', '#7f7f7f', '#17becf']
URL = 'http://www.gutenberg.org/cache/epub/2701/pg2701.txt'
with open(utils.find('moby.txt', URL)) as handle:
text = theanets.recurrent.Text(handle.read().lower().replace('\n', ' '))
seed = text.encode(text.text[200000:200010])
for i, layer in enumerate((
dict(form='rnn', activation='sigmoid'),
dict(form='gru', activation='sigmoid'),
dict(form='scrn', activation='linear'),
dict(form='lstm'),
dict(form='mrnn', activation='sigmoid', factors=len(text.alpha)),
dict(form='clockwork', activation='linear', periods=(1, 2, 4, 8, 16)))):
losses = []
layer.update(size=100)
net = theanets.recurrent.Classifier([
1 + len(text.alpha), layer, 1000, 1 + len(text.alpha)])
for tm, _ in net.itertrain(text.classifier_batches(30, 16),
min_improvement=0.99,
validate_every=50,
patience=0,
algo='rmsprop',
learning_rate=0.0001):
if np.isnan(tm['loss']):
break
print('{}|{} ({:.1f}%)'.format(
text.decode(seed),
text.decode(net.predict_sequence(seed, 30)),
100 * tm['acc']))
losses.append(tm['loss'])
plt.plot(losses, label=layer['form'], alpha=0.7, color=COLORS[i])
plt.gca().xaxis.tick_bottom()
plt.gca().yaxis.tick_left()
plt.gca().spines['top'].set_color('none')
plt.gca().spines['right'].set_color('none')
plt.gca().spines['bottom'].set_position(('outward', 6))
plt.gca().spines['left'].set_position(('outward', 6))
plt.gca().set_ylabel('Loss')
plt.gca().set_xlabel('Training Epoch')
plt.gca().grid(True)
plt.legend()
plt.show()
|
Add example to compare layers in a char-rnn task.
|
Add example to compare layers in a char-rnn task.
|
Python
|
mit
|
lmjohns3/theanets,chrinide/theanets
|
Add example to compare layers in a char-rnn task.
|
#!/usr/bin/env python
import climate
import matplotlib.pyplot as plt
import numpy as np
import theanets
import utils
climate.enable_default_logging()
COLORS = ['#d62728', '#1f77b4', '#2ca02c', '#9467bd', '#ff7f0e',
'#e377c2', '#8c564b', '#bcbd22', '#7f7f7f', '#17becf']
URL = 'http://www.gutenberg.org/cache/epub/2701/pg2701.txt'
with open(utils.find('moby.txt', URL)) as handle:
text = theanets.recurrent.Text(handle.read().lower().replace('\n', ' '))
seed = text.encode(text.text[200000:200010])
for i, layer in enumerate((
dict(form='rnn', activation='sigmoid'),
dict(form='gru', activation='sigmoid'),
dict(form='scrn', activation='linear'),
dict(form='lstm'),
dict(form='mrnn', activation='sigmoid', factors=len(text.alpha)),
dict(form='clockwork', activation='linear', periods=(1, 2, 4, 8, 16)))):
losses = []
layer.update(size=100)
net = theanets.recurrent.Classifier([
1 + len(text.alpha), layer, 1000, 1 + len(text.alpha)])
for tm, _ in net.itertrain(text.classifier_batches(30, 16),
min_improvement=0.99,
validate_every=50,
patience=0,
algo='rmsprop',
learning_rate=0.0001):
if np.isnan(tm['loss']):
break
print('{}|{} ({:.1f}%)'.format(
text.decode(seed),
text.decode(net.predict_sequence(seed, 30)),
100 * tm['acc']))
losses.append(tm['loss'])
plt.plot(losses, label=layer['form'], alpha=0.7, color=COLORS[i])
plt.gca().xaxis.tick_bottom()
plt.gca().yaxis.tick_left()
plt.gca().spines['top'].set_color('none')
plt.gca().spines['right'].set_color('none')
plt.gca().spines['bottom'].set_position(('outward', 6))
plt.gca().spines['left'].set_position(('outward', 6))
plt.gca().set_ylabel('Loss')
plt.gca().set_xlabel('Training Epoch')
plt.gca().grid(True)
plt.legend()
plt.show()
|
<commit_before><commit_msg>Add example to compare layers in a char-rnn task.<commit_after>
|
#!/usr/bin/env python
import climate
import matplotlib.pyplot as plt
import numpy as np
import theanets
import utils
climate.enable_default_logging()
COLORS = ['#d62728', '#1f77b4', '#2ca02c', '#9467bd', '#ff7f0e',
'#e377c2', '#8c564b', '#bcbd22', '#7f7f7f', '#17becf']
URL = 'http://www.gutenberg.org/cache/epub/2701/pg2701.txt'
with open(utils.find('moby.txt', URL)) as handle:
text = theanets.recurrent.Text(handle.read().lower().replace('\n', ' '))
seed = text.encode(text.text[200000:200010])
for i, layer in enumerate((
dict(form='rnn', activation='sigmoid'),
dict(form='gru', activation='sigmoid'),
dict(form='scrn', activation='linear'),
dict(form='lstm'),
dict(form='mrnn', activation='sigmoid', factors=len(text.alpha)),
dict(form='clockwork', activation='linear', periods=(1, 2, 4, 8, 16)))):
losses = []
layer.update(size=100)
net = theanets.recurrent.Classifier([
1 + len(text.alpha), layer, 1000, 1 + len(text.alpha)])
for tm, _ in net.itertrain(text.classifier_batches(30, 16),
min_improvement=0.99,
validate_every=50,
patience=0,
algo='rmsprop',
learning_rate=0.0001):
if np.isnan(tm['loss']):
break
print('{}|{} ({:.1f}%)'.format(
text.decode(seed),
text.decode(net.predict_sequence(seed, 30)),
100 * tm['acc']))
losses.append(tm['loss'])
plt.plot(losses, label=layer['form'], alpha=0.7, color=COLORS[i])
plt.gca().xaxis.tick_bottom()
plt.gca().yaxis.tick_left()
plt.gca().spines['top'].set_color('none')
plt.gca().spines['right'].set_color('none')
plt.gca().spines['bottom'].set_position(('outward', 6))
plt.gca().spines['left'].set_position(('outward', 6))
plt.gca().set_ylabel('Loss')
plt.gca().set_xlabel('Training Epoch')
plt.gca().grid(True)
plt.legend()
plt.show()
|
Add example to compare layers in a char-rnn task.#!/usr/bin/env python
import climate
import matplotlib.pyplot as plt
import numpy as np
import theanets
import utils
climate.enable_default_logging()
COLORS = ['#d62728', '#1f77b4', '#2ca02c', '#9467bd', '#ff7f0e',
'#e377c2', '#8c564b', '#bcbd22', '#7f7f7f', '#17becf']
URL = 'http://www.gutenberg.org/cache/epub/2701/pg2701.txt'
with open(utils.find('moby.txt', URL)) as handle:
text = theanets.recurrent.Text(handle.read().lower().replace('\n', ' '))
seed = text.encode(text.text[200000:200010])
for i, layer in enumerate((
dict(form='rnn', activation='sigmoid'),
dict(form='gru', activation='sigmoid'),
dict(form='scrn', activation='linear'),
dict(form='lstm'),
dict(form='mrnn', activation='sigmoid', factors=len(text.alpha)),
dict(form='clockwork', activation='linear', periods=(1, 2, 4, 8, 16)))):
losses = []
layer.update(size=100)
net = theanets.recurrent.Classifier([
1 + len(text.alpha), layer, 1000, 1 + len(text.alpha)])
for tm, _ in net.itertrain(text.classifier_batches(30, 16),
min_improvement=0.99,
validate_every=50,
patience=0,
algo='rmsprop',
learning_rate=0.0001):
if np.isnan(tm['loss']):
break
print('{}|{} ({:.1f}%)'.format(
text.decode(seed),
text.decode(net.predict_sequence(seed, 30)),
100 * tm['acc']))
losses.append(tm['loss'])
plt.plot(losses, label=layer['form'], alpha=0.7, color=COLORS[i])
plt.gca().xaxis.tick_bottom()
plt.gca().yaxis.tick_left()
plt.gca().spines['top'].set_color('none')
plt.gca().spines['right'].set_color('none')
plt.gca().spines['bottom'].set_position(('outward', 6))
plt.gca().spines['left'].set_position(('outward', 6))
plt.gca().set_ylabel('Loss')
plt.gca().set_xlabel('Training Epoch')
plt.gca().grid(True)
plt.legend()
plt.show()
|
<commit_before><commit_msg>Add example to compare layers in a char-rnn task.<commit_after>#!/usr/bin/env python
import climate
import matplotlib.pyplot as plt
import numpy as np
import theanets
import utils
climate.enable_default_logging()
COLORS = ['#d62728', '#1f77b4', '#2ca02c', '#9467bd', '#ff7f0e',
'#e377c2', '#8c564b', '#bcbd22', '#7f7f7f', '#17becf']
URL = 'http://www.gutenberg.org/cache/epub/2701/pg2701.txt'
with open(utils.find('moby.txt', URL)) as handle:
text = theanets.recurrent.Text(handle.read().lower().replace('\n', ' '))
seed = text.encode(text.text[200000:200010])
for i, layer in enumerate((
dict(form='rnn', activation='sigmoid'),
dict(form='gru', activation='sigmoid'),
dict(form='scrn', activation='linear'),
dict(form='lstm'),
dict(form='mrnn', activation='sigmoid', factors=len(text.alpha)),
dict(form='clockwork', activation='linear', periods=(1, 2, 4, 8, 16)))):
losses = []
layer.update(size=100)
net = theanets.recurrent.Classifier([
1 + len(text.alpha), layer, 1000, 1 + len(text.alpha)])
for tm, _ in net.itertrain(text.classifier_batches(30, 16),
min_improvement=0.99,
validate_every=50,
patience=0,
algo='rmsprop',
learning_rate=0.0001):
if np.isnan(tm['loss']):
break
print('{}|{} ({:.1f}%)'.format(
text.decode(seed),
text.decode(net.predict_sequence(seed, 30)),
100 * tm['acc']))
losses.append(tm['loss'])
plt.plot(losses, label=layer['form'], alpha=0.7, color=COLORS[i])
plt.gca().xaxis.tick_bottom()
plt.gca().yaxis.tick_left()
plt.gca().spines['top'].set_color('none')
plt.gca().spines['right'].set_color('none')
plt.gca().spines['bottom'].set_position(('outward', 6))
plt.gca().spines['left'].set_position(('outward', 6))
plt.gca().set_ylabel('Loss')
plt.gca().set_xlabel('Training Epoch')
plt.gca().grid(True)
plt.legend()
plt.show()
|
|
248023106d4e881110a646e9d078ecad4f58e24d
|
pipelogger.py
|
pipelogger.py
|
#!/usr/bin/env python
#
import argparse
import os
import syslog
parser = argparse.ArgumentParser(
description='Syslog messages as read from a pipe')
parser.add_argument('-i', '--ident',
help='Use the given identifier for syslogging',
required=True)
parser.add_argument('pipe', help='Pipe file to read log records from')
args = parser.parse_args()
syslog.openlog(args.ident, 0)
if not os.path.exists(args.pipe):
os.mkfifo(args.pipe)
while os.path.exists(args.pipe):
f = open(args.pipe, 'r')
for l in f:
syslog.syslog(l)
f.close()
syslog.closelog()
|
Add a Python program which reads from a pipe and writes the data it gets to syslog.
|
Add a Python program which reads from a pipe and writes the data it gets
to syslog.
|
Python
|
bsd-3-clause
|
tonnerre/pipelogger
|
Add a Python program which reads from a pipe and writes the data it gets
to syslog.
|
#!/usr/bin/env python
#
import argparse
import os
import syslog
parser = argparse.ArgumentParser(
description='Syslog messages as read from a pipe')
parser.add_argument('-i', '--ident',
help='Use the given identifier for syslogging',
required=True)
parser.add_argument('pipe', help='Pipe file to read log records from')
args = parser.parse_args()
syslog.openlog(args.ident, 0)
if not os.path.exists(args.pipe):
os.mkfifo(args.pipe)
while os.path.exists(args.pipe):
f = open(args.pipe, 'r')
for l in f:
syslog.syslog(l)
f.close()
syslog.closelog()
|
<commit_before><commit_msg>Add a Python program which reads from a pipe and writes the data it gets
to syslog.<commit_after>
|
#!/usr/bin/env python
#
import argparse
import os
import syslog
parser = argparse.ArgumentParser(
description='Syslog messages as read from a pipe')
parser.add_argument('-i', '--ident',
help='Use the given identifier for syslogging',
required=True)
parser.add_argument('pipe', help='Pipe file to read log records from')
args = parser.parse_args()
syslog.openlog(args.ident, 0)
if not os.path.exists(args.pipe):
os.mkfifo(args.pipe)
while os.path.exists(args.pipe):
f = open(args.pipe, 'r')
for l in f:
syslog.syslog(l)
f.close()
syslog.closelog()
|
Add a Python program which reads from a pipe and writes the data it gets
to syslog.#!/usr/bin/env python
#
import argparse
import os
import syslog
parser = argparse.ArgumentParser(
description='Syslog messages as read from a pipe')
parser.add_argument('-i', '--ident',
help='Use the given identifier for syslogging',
required=True)
parser.add_argument('pipe', help='Pipe file to read log records from')
args = parser.parse_args()
syslog.openlog(args.ident, 0)
if not os.path.exists(args.pipe):
os.mkfifo(args.pipe)
while os.path.exists(args.pipe):
f = open(args.pipe, 'r')
for l in f:
syslog.syslog(l)
f.close()
syslog.closelog()
|
<commit_before><commit_msg>Add a Python program which reads from a pipe and writes the data it gets
to syslog.<commit_after>#!/usr/bin/env python
#
import argparse
import os
import syslog
parser = argparse.ArgumentParser(
description='Syslog messages as read from a pipe')
parser.add_argument('-i', '--ident',
help='Use the given identifier for syslogging',
required=True)
parser.add_argument('pipe', help='Pipe file to read log records from')
args = parser.parse_args()
syslog.openlog(args.ident, 0)
if not os.path.exists(args.pipe):
os.mkfifo(args.pipe)
while os.path.exists(args.pipe):
f = open(args.pipe, 'r')
for l in f:
syslog.syslog(l)
f.close()
syslog.closelog()
|
|
a61d37449f8000a83942513f2ad71151ef26822d
|
synapse/tests/test_cells.py
|
synapse/tests/test_cells.py
|
import synapse.axon as s_axon
import synapse.cells as s_cells
import synapse.cryotank as s_cryotank
from synapse.tests.common import *
class CellTest(SynTest):
def test_cell_cryo(self):
with self.getTestDir() as dirn:
with s_cells.cryo(dirn) as cryo:
self.isinstance(cryo, s_cryotank.CryoCell)
def test_cell_axon(self):
with self.getTestDir() as dirn:
with s_cells.axon(dirn) as axon:
self.isinstance(axon, s_axon.AxonCell)
|
Add unit tests for synapse.cells
|
Add unit tests for synapse.cells
|
Python
|
apache-2.0
|
vertexproject/synapse,vivisect/synapse,vertexproject/synapse,vertexproject/synapse
|
Add unit tests for synapse.cells
|
import synapse.axon as s_axon
import synapse.cells as s_cells
import synapse.cryotank as s_cryotank
from synapse.tests.common import *
class CellTest(SynTest):
def test_cell_cryo(self):
with self.getTestDir() as dirn:
with s_cells.cryo(dirn) as cryo:
self.isinstance(cryo, s_cryotank.CryoCell)
def test_cell_axon(self):
with self.getTestDir() as dirn:
with s_cells.axon(dirn) as axon:
self.isinstance(axon, s_axon.AxonCell)
|
<commit_before><commit_msg>Add unit tests for synapse.cells<commit_after>
|
import synapse.axon as s_axon
import synapse.cells as s_cells
import synapse.cryotank as s_cryotank
from synapse.tests.common import *
class CellTest(SynTest):
def test_cell_cryo(self):
with self.getTestDir() as dirn:
with s_cells.cryo(dirn) as cryo:
self.isinstance(cryo, s_cryotank.CryoCell)
def test_cell_axon(self):
with self.getTestDir() as dirn:
with s_cells.axon(dirn) as axon:
self.isinstance(axon, s_axon.AxonCell)
|
Add unit tests for synapse.cellsimport synapse.axon as s_axon
import synapse.cells as s_cells
import synapse.cryotank as s_cryotank
from synapse.tests.common import *
class CellTest(SynTest):
def test_cell_cryo(self):
with self.getTestDir() as dirn:
with s_cells.cryo(dirn) as cryo:
self.isinstance(cryo, s_cryotank.CryoCell)
def test_cell_axon(self):
with self.getTestDir() as dirn:
with s_cells.axon(dirn) as axon:
self.isinstance(axon, s_axon.AxonCell)
|
<commit_before><commit_msg>Add unit tests for synapse.cells<commit_after>import synapse.axon as s_axon
import synapse.cells as s_cells
import synapse.cryotank as s_cryotank
from synapse.tests.common import *
class CellTest(SynTest):
def test_cell_cryo(self):
with self.getTestDir() as dirn:
with s_cells.cryo(dirn) as cryo:
self.isinstance(cryo, s_cryotank.CryoCell)
def test_cell_axon(self):
with self.getTestDir() as dirn:
with s_cells.axon(dirn) as axon:
self.isinstance(axon, s_axon.AxonCell)
|
|
a17efdceeeec0932ff403ebeb6f787ea8b08a3a4
|
Problems/printLists.py
|
Problems/printLists.py
|
#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
test_list_1 = ["puppy", "kitten", "lion cub"]
test_list_2 = ["lettuce",
"bacon",
"turkey",
"mayonnaise",
"tomato",
"white bread"]
pretty_print_lists(test_list_1)
pretty_print_lists(test_list_2)
def pretty_print_lists(l):
output = ""
last_index = len(l) - 1
for i, item in enumerate(l):
if i == last_index:
output += "and {}".format(item)
else:
output += "{}, ".format(item)
print(output)
return None
if __name__ == '__main__':
main()
|
Add print lists function practice problem
|
Add print lists function practice problem
|
Python
|
mit
|
HKuz/Test_Code
|
Add print lists function practice problem
|
#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
test_list_1 = ["puppy", "kitten", "lion cub"]
test_list_2 = ["lettuce",
"bacon",
"turkey",
"mayonnaise",
"tomato",
"white bread"]
pretty_print_lists(test_list_1)
pretty_print_lists(test_list_2)
def pretty_print_lists(l):
output = ""
last_index = len(l) - 1
for i, item in enumerate(l):
if i == last_index:
output += "and {}".format(item)
else:
output += "{}, ".format(item)
print(output)
return None
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add print lists function practice problem<commit_after>
|
#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
test_list_1 = ["puppy", "kitten", "lion cub"]
test_list_2 = ["lettuce",
"bacon",
"turkey",
"mayonnaise",
"tomato",
"white bread"]
pretty_print_lists(test_list_1)
pretty_print_lists(test_list_2)
def pretty_print_lists(l):
output = ""
last_index = len(l) - 1
for i, item in enumerate(l):
if i == last_index:
output += "and {}".format(item)
else:
output += "{}, ".format(item)
print(output)
return None
if __name__ == '__main__':
main()
|
Add print lists function practice problem#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
test_list_1 = ["puppy", "kitten", "lion cub"]
test_list_2 = ["lettuce",
"bacon",
"turkey",
"mayonnaise",
"tomato",
"white bread"]
pretty_print_lists(test_list_1)
pretty_print_lists(test_list_2)
def pretty_print_lists(l):
output = ""
last_index = len(l) - 1
for i, item in enumerate(l):
if i == last_index:
output += "and {}".format(item)
else:
output += "{}, ".format(item)
print(output)
return None
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add print lists function practice problem<commit_after>#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
test_list_1 = ["puppy", "kitten", "lion cub"]
test_list_2 = ["lettuce",
"bacon",
"turkey",
"mayonnaise",
"tomato",
"white bread"]
pretty_print_lists(test_list_1)
pretty_print_lists(test_list_2)
def pretty_print_lists(l):
output = ""
last_index = len(l) - 1
for i, item in enumerate(l):
if i == last_index:
output += "and {}".format(item)
else:
output += "{}, ".format(item)
print(output)
return None
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.