commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f3e68a8c256a13316e96a45a956a632a14221d3f
|
Files/MoveData.py
|
Files/MoveData.py
|
def move_data(token, member_id, from_path, to_path):
url = 'https://api.dropboxapi.com/1/fileops/move'
headers = {'Authorization': 'Bearer %s' % token, 'X-Dropbox-Perform-As-Team-Member': member_id}
data = {'root': 'auto', 'from_path': from_path, 'to_path': to_path}
print 'Moving "%s" to "%s" (member_id: %s)' % (from_path, to_path, member_id)
r = requests.post(url, headers=headers, data=data)
if r.status_code == 200:
print 'Success!'
return True
else:
print 'HTTP error %s - %s (%s)' % (r.status_code, r.reason, r.text)
return False
|
Move Data Function for Python
|
Move Data Function for Python
|
Python
|
apache-2.0
|
dropbox/DropboxBusinessScripts,dropbox/DropboxBusinessScripts
|
Move Data Function for Python
|
def move_data(token, member_id, from_path, to_path):
url = 'https://api.dropboxapi.com/1/fileops/move'
headers = {'Authorization': 'Bearer %s' % token, 'X-Dropbox-Perform-As-Team-Member': member_id}
data = {'root': 'auto', 'from_path': from_path, 'to_path': to_path}
print 'Moving "%s" to "%s" (member_id: %s)' % (from_path, to_path, member_id)
r = requests.post(url, headers=headers, data=data)
if r.status_code == 200:
print 'Success!'
return True
else:
print 'HTTP error %s - %s (%s)' % (r.status_code, r.reason, r.text)
return False
|
<commit_before><commit_msg>Move Data Function for Python<commit_after>
|
def move_data(token, member_id, from_path, to_path):
url = 'https://api.dropboxapi.com/1/fileops/move'
headers = {'Authorization': 'Bearer %s' % token, 'X-Dropbox-Perform-As-Team-Member': member_id}
data = {'root': 'auto', 'from_path': from_path, 'to_path': to_path}
print 'Moving "%s" to "%s" (member_id: %s)' % (from_path, to_path, member_id)
r = requests.post(url, headers=headers, data=data)
if r.status_code == 200:
print 'Success!'
return True
else:
print 'HTTP error %s - %s (%s)' % (r.status_code, r.reason, r.text)
return False
|
Move Data Function for Pythondef move_data(token, member_id, from_path, to_path):
url = 'https://api.dropboxapi.com/1/fileops/move'
headers = {'Authorization': 'Bearer %s' % token, 'X-Dropbox-Perform-As-Team-Member': member_id}
data = {'root': 'auto', 'from_path': from_path, 'to_path': to_path}
print 'Moving "%s" to "%s" (member_id: %s)' % (from_path, to_path, member_id)
r = requests.post(url, headers=headers, data=data)
if r.status_code == 200:
print 'Success!'
return True
else:
print 'HTTP error %s - %s (%s)' % (r.status_code, r.reason, r.text)
return False
|
<commit_before><commit_msg>Move Data Function for Python<commit_after>def move_data(token, member_id, from_path, to_path):
url = 'https://api.dropboxapi.com/1/fileops/move'
headers = {'Authorization': 'Bearer %s' % token, 'X-Dropbox-Perform-As-Team-Member': member_id}
data = {'root': 'auto', 'from_path': from_path, 'to_path': to_path}
print 'Moving "%s" to "%s" (member_id: %s)' % (from_path, to_path, member_id)
r = requests.post(url, headers=headers, data=data)
if r.status_code == 200:
print 'Success!'
return True
else:
print 'HTTP error %s - %s (%s)' % (r.status_code, r.reason, r.text)
return False
|
|
db5f287a7581294bffd49b0f10d7f1d5f73ba1cd
|
fileuploads/migrations/0038_auto_20170110_1622.py
|
fileuploads/migrations/0038_auto_20170110_1622.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.dev20160107235441 on 2017-01-10 21:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signals', '0016_process_file_id'),
('fileuploads', '0037_auto_20170106_1358'),
]
operations = [
migrations.RemoveField(
model_name='video',
name='outputs',
),
migrations.AddField(
model_name='video',
name='file_processes',
field=models.ManyToManyField(to='signals.Process'),
),
]
|
Add migration files for previous previous commit (file graph link change)
|
Add migration files for previous previous commit (file graph link change)
|
Python
|
mit
|
yayoiukai/signalserver,yayoiukai/signalserver,yayoiukai/signalserver,yayoiukai/signalserver
|
Add migration files for previous previous commit (file graph link change)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.dev20160107235441 on 2017-01-10 21:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signals', '0016_process_file_id'),
('fileuploads', '0037_auto_20170106_1358'),
]
operations = [
migrations.RemoveField(
model_name='video',
name='outputs',
),
migrations.AddField(
model_name='video',
name='file_processes',
field=models.ManyToManyField(to='signals.Process'),
),
]
|
<commit_before><commit_msg>Add migration files for previous previous commit (file graph link change)<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.dev20160107235441 on 2017-01-10 21:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signals', '0016_process_file_id'),
('fileuploads', '0037_auto_20170106_1358'),
]
operations = [
migrations.RemoveField(
model_name='video',
name='outputs',
),
migrations.AddField(
model_name='video',
name='file_processes',
field=models.ManyToManyField(to='signals.Process'),
),
]
|
Add migration files for previous previous commit (file graph link change)# -*- coding: utf-8 -*-
# Generated by Django 1.10.dev20160107235441 on 2017-01-10 21:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signals', '0016_process_file_id'),
('fileuploads', '0037_auto_20170106_1358'),
]
operations = [
migrations.RemoveField(
model_name='video',
name='outputs',
),
migrations.AddField(
model_name='video',
name='file_processes',
field=models.ManyToManyField(to='signals.Process'),
),
]
|
<commit_before><commit_msg>Add migration files for previous previous commit (file graph link change)<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.dev20160107235441 on 2017-01-10 21:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signals', '0016_process_file_id'),
('fileuploads', '0037_auto_20170106_1358'),
]
operations = [
migrations.RemoveField(
model_name='video',
name='outputs',
),
migrations.AddField(
model_name='video',
name='file_processes',
field=models.ManyToManyField(to='signals.Process'),
),
]
|
|
3bfc40c0e0c81723ae43b3dfd4932ba48c3b67e7
|
yak/rest_social_auth/backends/yak_instagram.py
|
yak/rest_social_auth/backends/yak_instagram.py
|
from instagram import InstagramAPI
from social.backends.instagram import InstagramOAuth2
from yak.rest_social_auth.backends.base import ExtraDataAbstractMixin, ExtraActionsAbstractMixin
class Instagram(ExtraActionsAbstractMixin, ExtraDataAbstractMixin, InstagramOAuth2):
@staticmethod
def save_extra_data(response, user):
if response['data']['bio']:
user.about = response['bio']
user.save()
@staticmethod
def get_profile_image(strategy, details, response, uid, user, social, is_new=False, *args, **kwargs):
image_url = response['data']['profile_picture']
return image_url
@staticmethod
def post(user_social_auth, social_obj):
return
@staticmethod
def get_friends(user_social_auth):
return
@staticmethod
def get_posts(user_social_auth, last_updated_time):
api = InstagramAPI(access_token=user_social_auth.extra_data['access_token'])
recent_media, next_ = api.user_recent_media(user_id=user_social_auth.uid, min_time=last_updated_time)
return recent_media
|
from instagram import InstagramAPI, helper
from social.backends.instagram import InstagramOAuth2
from yak.rest_social_auth.backends.base import ExtraDataAbstractMixin, ExtraActionsAbstractMixin
class Instagram(ExtraActionsAbstractMixin, ExtraDataAbstractMixin, InstagramOAuth2):
@staticmethod
def save_extra_data(response, user):
if response['data']['bio']:
user.about = response['bio']
user.save()
@staticmethod
def get_profile_image(strategy, details, response, uid, user, social, is_new=False, *args, **kwargs):
image_url = response['data']['profile_picture']
return image_url
@staticmethod
def post(user_social_auth, social_obj):
return
@staticmethod
def get_friends(user_social_auth):
return
@staticmethod
def get_posts(user_social_auth, last_updated_time):
api = InstagramAPI(access_token=user_social_auth.extra_data['access_token'])
formatted_time = helper.datetime_to_timestamp(last_updated_time)
recent_media, next_ = api.user_recent_media(user_id=user_social_auth.uid, min_timestamp=formatted_time)
return recent_media
|
Fix for undocumented timestamp filtering in python-instagram
|
Fix for undocumented timestamp filtering in python-instagram
|
Python
|
mit
|
yeti/YAK-server,sventech/YAK-server,ParableSciences/YAK-server,sventech/YAK-server,ParableSciences/YAK-server,yeti/YAK-server
|
from instagram import InstagramAPI
from social.backends.instagram import InstagramOAuth2
from yak.rest_social_auth.backends.base import ExtraDataAbstractMixin, ExtraActionsAbstractMixin
class Instagram(ExtraActionsAbstractMixin, ExtraDataAbstractMixin, InstagramOAuth2):
@staticmethod
def save_extra_data(response, user):
if response['data']['bio']:
user.about = response['bio']
user.save()
@staticmethod
def get_profile_image(strategy, details, response, uid, user, social, is_new=False, *args, **kwargs):
image_url = response['data']['profile_picture']
return image_url
@staticmethod
def post(user_social_auth, social_obj):
return
@staticmethod
def get_friends(user_social_auth):
return
@staticmethod
def get_posts(user_social_auth, last_updated_time):
api = InstagramAPI(access_token=user_social_auth.extra_data['access_token'])
recent_media, next_ = api.user_recent_media(user_id=user_social_auth.uid, min_time=last_updated_time)
return recent_media
Fix for undocumented timestamp filtering in python-instagram
|
from instagram import InstagramAPI, helper
from social.backends.instagram import InstagramOAuth2
from yak.rest_social_auth.backends.base import ExtraDataAbstractMixin, ExtraActionsAbstractMixin
class Instagram(ExtraActionsAbstractMixin, ExtraDataAbstractMixin, InstagramOAuth2):
@staticmethod
def save_extra_data(response, user):
if response['data']['bio']:
user.about = response['bio']
user.save()
@staticmethod
def get_profile_image(strategy, details, response, uid, user, social, is_new=False, *args, **kwargs):
image_url = response['data']['profile_picture']
return image_url
@staticmethod
def post(user_social_auth, social_obj):
return
@staticmethod
def get_friends(user_social_auth):
return
@staticmethod
def get_posts(user_social_auth, last_updated_time):
api = InstagramAPI(access_token=user_social_auth.extra_data['access_token'])
formatted_time = helper.datetime_to_timestamp(last_updated_time)
recent_media, next_ = api.user_recent_media(user_id=user_social_auth.uid, min_timestamp=formatted_time)
return recent_media
|
<commit_before>from instagram import InstagramAPI
from social.backends.instagram import InstagramOAuth2
from yak.rest_social_auth.backends.base import ExtraDataAbstractMixin, ExtraActionsAbstractMixin
class Instagram(ExtraActionsAbstractMixin, ExtraDataAbstractMixin, InstagramOAuth2):
@staticmethod
def save_extra_data(response, user):
if response['data']['bio']:
user.about = response['bio']
user.save()
@staticmethod
def get_profile_image(strategy, details, response, uid, user, social, is_new=False, *args, **kwargs):
image_url = response['data']['profile_picture']
return image_url
@staticmethod
def post(user_social_auth, social_obj):
return
@staticmethod
def get_friends(user_social_auth):
return
@staticmethod
def get_posts(user_social_auth, last_updated_time):
api = InstagramAPI(access_token=user_social_auth.extra_data['access_token'])
recent_media, next_ = api.user_recent_media(user_id=user_social_auth.uid, min_time=last_updated_time)
return recent_media
<commit_msg>Fix for undocumented timestamp filtering in python-instagram<commit_after>
|
from instagram import InstagramAPI, helper
from social.backends.instagram import InstagramOAuth2
from yak.rest_social_auth.backends.base import ExtraDataAbstractMixin, ExtraActionsAbstractMixin
class Instagram(ExtraActionsAbstractMixin, ExtraDataAbstractMixin, InstagramOAuth2):
@staticmethod
def save_extra_data(response, user):
if response['data']['bio']:
user.about = response['bio']
user.save()
@staticmethod
def get_profile_image(strategy, details, response, uid, user, social, is_new=False, *args, **kwargs):
image_url = response['data']['profile_picture']
return image_url
@staticmethod
def post(user_social_auth, social_obj):
return
@staticmethod
def get_friends(user_social_auth):
return
@staticmethod
def get_posts(user_social_auth, last_updated_time):
api = InstagramAPI(access_token=user_social_auth.extra_data['access_token'])
formatted_time = helper.datetime_to_timestamp(last_updated_time)
recent_media, next_ = api.user_recent_media(user_id=user_social_auth.uid, min_timestamp=formatted_time)
return recent_media
|
from instagram import InstagramAPI
from social.backends.instagram import InstagramOAuth2
from yak.rest_social_auth.backends.base import ExtraDataAbstractMixin, ExtraActionsAbstractMixin
class Instagram(ExtraActionsAbstractMixin, ExtraDataAbstractMixin, InstagramOAuth2):
@staticmethod
def save_extra_data(response, user):
if response['data']['bio']:
user.about = response['bio']
user.save()
@staticmethod
def get_profile_image(strategy, details, response, uid, user, social, is_new=False, *args, **kwargs):
image_url = response['data']['profile_picture']
return image_url
@staticmethod
def post(user_social_auth, social_obj):
return
@staticmethod
def get_friends(user_social_auth):
return
@staticmethod
def get_posts(user_social_auth, last_updated_time):
api = InstagramAPI(access_token=user_social_auth.extra_data['access_token'])
recent_media, next_ = api.user_recent_media(user_id=user_social_auth.uid, min_time=last_updated_time)
return recent_media
Fix for undocumented timestamp filtering in python-instagramfrom instagram import InstagramAPI, helper
from social.backends.instagram import InstagramOAuth2
from yak.rest_social_auth.backends.base import ExtraDataAbstractMixin, ExtraActionsAbstractMixin
class Instagram(ExtraActionsAbstractMixin, ExtraDataAbstractMixin, InstagramOAuth2):
@staticmethod
def save_extra_data(response, user):
if response['data']['bio']:
user.about = response['bio']
user.save()
@staticmethod
def get_profile_image(strategy, details, response, uid, user, social, is_new=False, *args, **kwargs):
image_url = response['data']['profile_picture']
return image_url
@staticmethod
def post(user_social_auth, social_obj):
return
@staticmethod
def get_friends(user_social_auth):
return
@staticmethod
def get_posts(user_social_auth, last_updated_time):
api = InstagramAPI(access_token=user_social_auth.extra_data['access_token'])
formatted_time = helper.datetime_to_timestamp(last_updated_time)
recent_media, next_ = api.user_recent_media(user_id=user_social_auth.uid, min_timestamp=formatted_time)
return recent_media
|
<commit_before>from instagram import InstagramAPI
from social.backends.instagram import InstagramOAuth2
from yak.rest_social_auth.backends.base import ExtraDataAbstractMixin, ExtraActionsAbstractMixin
class Instagram(ExtraActionsAbstractMixin, ExtraDataAbstractMixin, InstagramOAuth2):
@staticmethod
def save_extra_data(response, user):
if response['data']['bio']:
user.about = response['bio']
user.save()
@staticmethod
def get_profile_image(strategy, details, response, uid, user, social, is_new=False, *args, **kwargs):
image_url = response['data']['profile_picture']
return image_url
@staticmethod
def post(user_social_auth, social_obj):
return
@staticmethod
def get_friends(user_social_auth):
return
@staticmethod
def get_posts(user_social_auth, last_updated_time):
api = InstagramAPI(access_token=user_social_auth.extra_data['access_token'])
recent_media, next_ = api.user_recent_media(user_id=user_social_auth.uid, min_time=last_updated_time)
return recent_media
<commit_msg>Fix for undocumented timestamp filtering in python-instagram<commit_after>from instagram import InstagramAPI, helper
from social.backends.instagram import InstagramOAuth2
from yak.rest_social_auth.backends.base import ExtraDataAbstractMixin, ExtraActionsAbstractMixin
class Instagram(ExtraActionsAbstractMixin, ExtraDataAbstractMixin, InstagramOAuth2):
@staticmethod
def save_extra_data(response, user):
if response['data']['bio']:
user.about = response['bio']
user.save()
@staticmethod
def get_profile_image(strategy, details, response, uid, user, social, is_new=False, *args, **kwargs):
image_url = response['data']['profile_picture']
return image_url
@staticmethod
def post(user_social_auth, social_obj):
return
@staticmethod
def get_friends(user_social_auth):
return
@staticmethod
def get_posts(user_social_auth, last_updated_time):
api = InstagramAPI(access_token=user_social_auth.extra_data['access_token'])
formatted_time = helper.datetime_to_timestamp(last_updated_time)
recent_media, next_ = api.user_recent_media(user_id=user_social_auth.uid, min_timestamp=formatted_time)
return recent_media
|
a9663c9164bf1ef4ba276a499e4802afb363b1e3
|
tests/unit/test_hashable.py
|
tests/unit/test_hashable.py
|
from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(Obj):
o = Obj.__new__(Obj)
return o
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
@pytest.mark.unit
def test_equals_same(obj1, obj2):
obj1._fd = 1
obj2._fd = 1
assert obj1 == obj2, '2 Identical objects are comparing as diffrent'
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
def test_equals_diffrent(obj1, obj2):
obj1._fd = 1
obj2._fd = 2
assert obj1 != obj2, '2 Diffrent objects are comparing as equivlent'
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
def test_hashable(obj1, obj2):
obj2 = None # we are not using this
assert isinstance(hash(obj), int), 'hash of object is not an int'
|
Add test for hashable objects
|
Add test for hashable objects
|
Python
|
bsd-3-clause
|
dasSOZO/python-butter,wdv4758h/butter
|
Add test for hashable objects
|
from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(Obj):
o = Obj.__new__(Obj)
return o
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
@pytest.mark.unit
def test_equals_same(obj1, obj2):
obj1._fd = 1
obj2._fd = 1
assert obj1 == obj2, '2 Identical objects are comparing as diffrent'
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
def test_equals_diffrent(obj1, obj2):
obj1._fd = 1
obj2._fd = 2
assert obj1 != obj2, '2 Diffrent objects are comparing as equivlent'
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
def test_hashable(obj1, obj2):
obj2 = None # we are not using this
assert isinstance(hash(obj), int), 'hash of object is not an int'
|
<commit_before><commit_msg>Add test for hashable objects<commit_after>
|
from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(Obj):
o = Obj.__new__(Obj)
return o
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
@pytest.mark.unit
def test_equals_same(obj1, obj2):
obj1._fd = 1
obj2._fd = 1
assert obj1 == obj2, '2 Identical objects are comparing as diffrent'
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
def test_equals_diffrent(obj1, obj2):
obj1._fd = 1
obj2._fd = 2
assert obj1 != obj2, '2 Diffrent objects are comparing as equivlent'
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
def test_hashable(obj1, obj2):
obj2 = None # we are not using this
assert isinstance(hash(obj), int), 'hash of object is not an int'
|
Add test for hashable objectsfrom butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(Obj):
o = Obj.__new__(Obj)
return o
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
@pytest.mark.unit
def test_equals_same(obj1, obj2):
obj1._fd = 1
obj2._fd = 1
assert obj1 == obj2, '2 Identical objects are comparing as diffrent'
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
def test_equals_diffrent(obj1, obj2):
obj1._fd = 1
obj2._fd = 2
assert obj1 != obj2, '2 Diffrent objects are comparing as equivlent'
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
def test_hashable(obj1, obj2):
obj2 = None # we are not using this
assert isinstance(hash(obj), int), 'hash of object is not an int'
|
<commit_before><commit_msg>Add test for hashable objects<commit_after>from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(Obj):
o = Obj.__new__(Obj)
return o
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
@pytest.mark.unit
def test_equals_same(obj1, obj2):
obj1._fd = 1
obj2._fd = 1
assert obj1 == obj2, '2 Identical objects are comparing as diffrent'
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
def test_equals_diffrent(obj1, obj2):
obj1._fd = 1
obj2._fd = 2
assert obj1 != obj2, '2 Diffrent objects are comparing as equivlent'
@pytest.mark.parametrize('obj1,obj2', [
(obj(Eventfd), obj(Eventfd) ),
(obj(Fanotify), obj(Fanotify) ),
(obj(Inotify), obj(Inotify) ),
(obj(Signalfd), obj(Signalfd) ),
(obj(Timerfd), obj(Timerfd) ),
])
def test_hashable(obj1, obj2):
obj2 = None # we are not using this
assert isinstance(hash(obj), int), 'hash of object is not an int'
|
|
8d3f235af5acd47857939c19563080a1a74b0924
|
pxe_manager/tests/test_pxemanager.py
|
pxe_manager/tests/test_pxemanager.py
|
from pxe_manager.pxemanager import PxeManager
from resource_manager.client import ResourceManagerClient
import httpretty
@httpretty.activate
def test_defaults():
client = ResourceManagerClient()
cobbler_url = "http://cobbler.example.com/cobbler_api"
cobbler_user = "user"
cobbler_password = "password"
response_body = '''<?xml version="1.0"?>
<methodResponse>
<params>
<param>
<value><string>Some Value</string></value>
</param>
</params>
</methodResponse>
'''
distro_map = {'esxi51': 'qa-vmwareesxi51u0-x86_64',
'esxi50': 'qa-vmwareesxi50u1-x86_64',
'centos': 'qa-centos6-x86_64-striped-drives',
'rhel': 'qa-rhel6u5-x86_64-striped-drives'}
httpretty.register_uri(httpretty.POST, cobbler_url,
body=response_body)
pxe_manager = PxeManager(cobbler_url, cobbler_user, cobbler_password, client)
for key, value in distro_map.iteritems():
assert pxe_manager.distro[key] == value
|
Add pxemanager basic unit test
|
Add pxemanager basic unit test
|
Python
|
apache-2.0
|
tbeckham/DeploymentManager,tbeckham/DeploymentManager,tbeckham/DeploymentManager,ccassler/DeploymentManager,ccassler/DeploymentManager,ccassler/DeploymentManager
|
Add pxemanager basic unit test
|
from pxe_manager.pxemanager import PxeManager
from resource_manager.client import ResourceManagerClient
import httpretty
@httpretty.activate
def test_defaults():
client = ResourceManagerClient()
cobbler_url = "http://cobbler.example.com/cobbler_api"
cobbler_user = "user"
cobbler_password = "password"
response_body = '''<?xml version="1.0"?>
<methodResponse>
<params>
<param>
<value><string>Some Value</string></value>
</param>
</params>
</methodResponse>
'''
distro_map = {'esxi51': 'qa-vmwareesxi51u0-x86_64',
'esxi50': 'qa-vmwareesxi50u1-x86_64',
'centos': 'qa-centos6-x86_64-striped-drives',
'rhel': 'qa-rhel6u5-x86_64-striped-drives'}
httpretty.register_uri(httpretty.POST, cobbler_url,
body=response_body)
pxe_manager = PxeManager(cobbler_url, cobbler_user, cobbler_password, client)
for key, value in distro_map.iteritems():
assert pxe_manager.distro[key] == value
|
<commit_before><commit_msg>Add pxemanager basic unit test<commit_after>
|
from pxe_manager.pxemanager import PxeManager
from resource_manager.client import ResourceManagerClient
import httpretty
@httpretty.activate
def test_defaults():
client = ResourceManagerClient()
cobbler_url = "http://cobbler.example.com/cobbler_api"
cobbler_user = "user"
cobbler_password = "password"
response_body = '''<?xml version="1.0"?>
<methodResponse>
<params>
<param>
<value><string>Some Value</string></value>
</param>
</params>
</methodResponse>
'''
distro_map = {'esxi51': 'qa-vmwareesxi51u0-x86_64',
'esxi50': 'qa-vmwareesxi50u1-x86_64',
'centos': 'qa-centos6-x86_64-striped-drives',
'rhel': 'qa-rhel6u5-x86_64-striped-drives'}
httpretty.register_uri(httpretty.POST, cobbler_url,
body=response_body)
pxe_manager = PxeManager(cobbler_url, cobbler_user, cobbler_password, client)
for key, value in distro_map.iteritems():
assert pxe_manager.distro[key] == value
|
Add pxemanager basic unit testfrom pxe_manager.pxemanager import PxeManager
from resource_manager.client import ResourceManagerClient
import httpretty
@httpretty.activate
def test_defaults():
client = ResourceManagerClient()
cobbler_url = "http://cobbler.example.com/cobbler_api"
cobbler_user = "user"
cobbler_password = "password"
response_body = '''<?xml version="1.0"?>
<methodResponse>
<params>
<param>
<value><string>Some Value</string></value>
</param>
</params>
</methodResponse>
'''
distro_map = {'esxi51': 'qa-vmwareesxi51u0-x86_64',
'esxi50': 'qa-vmwareesxi50u1-x86_64',
'centos': 'qa-centos6-x86_64-striped-drives',
'rhel': 'qa-rhel6u5-x86_64-striped-drives'}
httpretty.register_uri(httpretty.POST, cobbler_url,
body=response_body)
pxe_manager = PxeManager(cobbler_url, cobbler_user, cobbler_password, client)
for key, value in distro_map.iteritems():
assert pxe_manager.distro[key] == value
|
<commit_before><commit_msg>Add pxemanager basic unit test<commit_after>from pxe_manager.pxemanager import PxeManager
from resource_manager.client import ResourceManagerClient
import httpretty
@httpretty.activate
def test_defaults():
client = ResourceManagerClient()
cobbler_url = "http://cobbler.example.com/cobbler_api"
cobbler_user = "user"
cobbler_password = "password"
response_body = '''<?xml version="1.0"?>
<methodResponse>
<params>
<param>
<value><string>Some Value</string></value>
</param>
</params>
</methodResponse>
'''
distro_map = {'esxi51': 'qa-vmwareesxi51u0-x86_64',
'esxi50': 'qa-vmwareesxi50u1-x86_64',
'centos': 'qa-centos6-x86_64-striped-drives',
'rhel': 'qa-rhel6u5-x86_64-striped-drives'}
httpretty.register_uri(httpretty.POST, cobbler_url,
body=response_body)
pxe_manager = PxeManager(cobbler_url, cobbler_user, cobbler_password, client)
for key, value in distro_map.iteritems():
assert pxe_manager.distro[key] == value
|
|
ed9a02a769a64edae6aa0e4a06acf7a8fe31cd89
|
scripts/create_windows_virtenv.py
|
scripts/create_windows_virtenv.py
|
#!/usr/bin/env python
import platform
import shutil
import subprocess
import sys
from os.path import dirname, isdir, join as join_path
file_path = dirname(__file__)
requirements_path = '/'.join(file_path.split('/')[:-1]) + "/requirements"
root_path = file_path.split('/')[:-2]
root_path = '/'.join(root_path)
class VirtualEnvironmentBuilder(object):
def __init__(self, virt_env_name):
self.virt_env_name = virt_env_name
@property
def virt_env_path(self):
print(join_path(root_path, self.virt_env_name))
return join_path(root_path, self.virt_env_name)
@property
def virt_env_path(self):
return root_path + "/" + self.virt_env_name
def clean_build(self):
self.delete_env()
self.build()
def build(self):
# Create a fresh virtual environment if it doesn't exist
self.create_venv()
try:
print(requirements_path)
self.run_in_venv('pip', ['install', '-r', requirements_path])
except Exception:
print("Erorrrr")
self.delete_env()
def create_venv(self):
if isdir(self.virt_env_path):
return
print(self.virt_env_path)
try:
subprocess.check_call([sys.executable, '-m', 'virtualenv', self.virt_env_path, '--no-site-packages'])
except Exception:
print("Something is wrong!")
self.delete_env()
if isdir(self.virt_env_name):
print("Environment {} created".format(self.virt_env_path))
def delete_env(self):
print("Deleting env!")
try:
if isdir(self.virt_env_path):
shutil.rmtree(self.virt_env_path)
except Exception:
print("Could not delete environment!")
def run_in_venv(self, cmd, args):
virtual_env_bin_path = self.virt_env_path
if platform.system() == 'Windows':
cmd += '.exe'
virtual_env_bin_path += r'\Scripts'
else:
virtual_env_bin_path += r'/bin'
print("here")
print(virtual_env_bin_path)
print(cmd)
a = join_path[virtual_env_bin_path, cmd]
print(a)
subprocess.check_call(join_path[file_path, virtual_env_bin_path, cmd] + args)
if __name__ == '__main__':
builder = VirtualEnvironmentBuilder('hack33-virtenv')
builder.build()
|
Create virtual builder for windows OS
|
Create virtual builder for windows OS
|
Python
|
mit
|
Rositsazz/hack33,Rositsazz/hack33,Rositsazz/hack33,Rositsazz/hack33
|
Create virtual builder for windows OS
|
#!/usr/bin/env python
import platform
import shutil
import subprocess
import sys
from os.path import dirname, isdir, join as join_path
file_path = dirname(__file__)
requirements_path = '/'.join(file_path.split('/')[:-1]) + "/requirements"
root_path = file_path.split('/')[:-2]
root_path = '/'.join(root_path)
class VirtualEnvironmentBuilder(object):
def __init__(self, virt_env_name):
self.virt_env_name = virt_env_name
@property
def virt_env_path(self):
print(join_path(root_path, self.virt_env_name))
return join_path(root_path, self.virt_env_name)
@property
def virt_env_path(self):
return root_path + "/" + self.virt_env_name
def clean_build(self):
self.delete_env()
self.build()
def build(self):
# Create a fresh virtual environment if it doesn't exist
self.create_venv()
try:
print(requirements_path)
self.run_in_venv('pip', ['install', '-r', requirements_path])
except Exception:
print("Erorrrr")
self.delete_env()
def create_venv(self):
if isdir(self.virt_env_path):
return
print(self.virt_env_path)
try:
subprocess.check_call([sys.executable, '-m', 'virtualenv', self.virt_env_path, '--no-site-packages'])
except Exception:
print("Something is wrong!")
self.delete_env()
if isdir(self.virt_env_name):
print("Environment {} created".format(self.virt_env_path))
def delete_env(self):
print("Deleting env!")
try:
if isdir(self.virt_env_path):
shutil.rmtree(self.virt_env_path)
except Exception:
print("Could not delete environment!")
def run_in_venv(self, cmd, args):
virtual_env_bin_path = self.virt_env_path
if platform.system() == 'Windows':
cmd += '.exe'
virtual_env_bin_path += r'\Scripts'
else:
virtual_env_bin_path += r'/bin'
print("here")
print(virtual_env_bin_path)
print(cmd)
a = join_path[virtual_env_bin_path, cmd]
print(a)
subprocess.check_call(join_path[file_path, virtual_env_bin_path, cmd] + args)
if __name__ == '__main__':
builder = VirtualEnvironmentBuilder('hack33-virtenv')
builder.build()
|
<commit_before><commit_msg>Create virtual builder for windows OS<commit_after>
|
#!/usr/bin/env python
import platform
import shutil
import subprocess
import sys
from os.path import dirname, isdir, join as join_path
file_path = dirname(__file__)
requirements_path = '/'.join(file_path.split('/')[:-1]) + "/requirements"
root_path = file_path.split('/')[:-2]
root_path = '/'.join(root_path)
class VirtualEnvironmentBuilder(object):
def __init__(self, virt_env_name):
self.virt_env_name = virt_env_name
@property
def virt_env_path(self):
print(join_path(root_path, self.virt_env_name))
return join_path(root_path, self.virt_env_name)
@property
def virt_env_path(self):
return root_path + "/" + self.virt_env_name
def clean_build(self):
self.delete_env()
self.build()
def build(self):
# Create a fresh virtual environment if it doesn't exist
self.create_venv()
try:
print(requirements_path)
self.run_in_venv('pip', ['install', '-r', requirements_path])
except Exception:
print("Erorrrr")
self.delete_env()
def create_venv(self):
if isdir(self.virt_env_path):
return
print(self.virt_env_path)
try:
subprocess.check_call([sys.executable, '-m', 'virtualenv', self.virt_env_path, '--no-site-packages'])
except Exception:
print("Something is wrong!")
self.delete_env()
if isdir(self.virt_env_name):
print("Environment {} created".format(self.virt_env_path))
def delete_env(self):
print("Deleting env!")
try:
if isdir(self.virt_env_path):
shutil.rmtree(self.virt_env_path)
except Exception:
print("Could not delete environment!")
def run_in_venv(self, cmd, args):
virtual_env_bin_path = self.virt_env_path
if platform.system() == 'Windows':
cmd += '.exe'
virtual_env_bin_path += r'\Scripts'
else:
virtual_env_bin_path += r'/bin'
print("here")
print(virtual_env_bin_path)
print(cmd)
a = join_path[virtual_env_bin_path, cmd]
print(a)
subprocess.check_call(join_path[file_path, virtual_env_bin_path, cmd] + args)
if __name__ == '__main__':
builder = VirtualEnvironmentBuilder('hack33-virtenv')
builder.build()
|
Create virtual builder for windows OS#!/usr/bin/env python
import platform
import shutil
import subprocess
import sys
from os.path import dirname, isdir, join as join_path
file_path = dirname(__file__)
requirements_path = '/'.join(file_path.split('/')[:-1]) + "/requirements"
root_path = file_path.split('/')[:-2]
root_path = '/'.join(root_path)
class VirtualEnvironmentBuilder(object):
def __init__(self, virt_env_name):
self.virt_env_name = virt_env_name
@property
def virt_env_path(self):
print(join_path(root_path, self.virt_env_name))
return join_path(root_path, self.virt_env_name)
@property
def virt_env_path(self):
return root_path + "/" + self.virt_env_name
def clean_build(self):
self.delete_env()
self.build()
def build(self):
# Create a fresh virtual environment if it doesn't exist
self.create_venv()
try:
print(requirements_path)
self.run_in_venv('pip', ['install', '-r', requirements_path])
except Exception:
print("Erorrrr")
self.delete_env()
def create_venv(self):
if isdir(self.virt_env_path):
return
print(self.virt_env_path)
try:
subprocess.check_call([sys.executable, '-m', 'virtualenv', self.virt_env_path, '--no-site-packages'])
except Exception:
print("Something is wrong!")
self.delete_env()
if isdir(self.virt_env_name):
print("Environment {} created".format(self.virt_env_path))
def delete_env(self):
print("Deleting env!")
try:
if isdir(self.virt_env_path):
shutil.rmtree(self.virt_env_path)
except Exception:
print("Could not delete environment!")
def run_in_venv(self, cmd, args):
virtual_env_bin_path = self.virt_env_path
if platform.system() == 'Windows':
cmd += '.exe'
virtual_env_bin_path += r'\Scripts'
else:
virtual_env_bin_path += r'/bin'
print("here")
print(virtual_env_bin_path)
print(cmd)
a = join_path[virtual_env_bin_path, cmd]
print(a)
subprocess.check_call(join_path[file_path, virtual_env_bin_path, cmd] + args)
if __name__ == '__main__':
builder = VirtualEnvironmentBuilder('hack33-virtenv')
builder.build()
|
<commit_before><commit_msg>Create virtual builder for windows OS<commit_after>#!/usr/bin/env python
import platform
import shutil
import subprocess
import sys
from os.path import dirname, isdir, join as join_path
file_path = dirname(__file__)
requirements_path = '/'.join(file_path.split('/')[:-1]) + "/requirements"
root_path = file_path.split('/')[:-2]
root_path = '/'.join(root_path)
class VirtualEnvironmentBuilder(object):
def __init__(self, virt_env_name):
self.virt_env_name = virt_env_name
@property
def virt_env_path(self):
print(join_path(root_path, self.virt_env_name))
return join_path(root_path, self.virt_env_name)
@property
def virt_env_path(self):
return root_path + "/" + self.virt_env_name
def clean_build(self):
self.delete_env()
self.build()
def build(self):
# Create a fresh virtual environment if it doesn't exist
self.create_venv()
try:
print(requirements_path)
self.run_in_venv('pip', ['install', '-r', requirements_path])
except Exception:
print("Erorrrr")
self.delete_env()
def create_venv(self):
if isdir(self.virt_env_path):
return
print(self.virt_env_path)
try:
subprocess.check_call([sys.executable, '-m', 'virtualenv', self.virt_env_path, '--no-site-packages'])
except Exception:
print("Something is wrong!")
self.delete_env()
if isdir(self.virt_env_name):
print("Environment {} created".format(self.virt_env_path))
def delete_env(self):
print("Deleting env!")
try:
if isdir(self.virt_env_path):
shutil.rmtree(self.virt_env_path)
except Exception:
print("Could not delete environment!")
def run_in_venv(self, cmd, args):
virtual_env_bin_path = self.virt_env_path
if platform.system() == 'Windows':
cmd += '.exe'
virtual_env_bin_path += r'\Scripts'
else:
virtual_env_bin_path += r'/bin'
print("here")
print(virtual_env_bin_path)
print(cmd)
a = join_path[virtual_env_bin_path, cmd]
print(a)
subprocess.check_call(join_path[file_path, virtual_env_bin_path, cmd] + args)
if __name__ == '__main__':
builder = VirtualEnvironmentBuilder('hack33-virtenv')
builder.build()
|
|
c08a7f6d095fd68213efefa2f969fb0b02189794
|
tests/test_deconv_sanity.py
|
tests/test_deconv_sanity.py
|
"""
Simple sanity check for deconvolution layer.
"""
import numpy as np
from numpy.testing import assert_array_almost_equal
from pylearn2.models.mlp import MLP
from pylearn2.space import Conv2DSpace
import theano
from adversarial.deconv import Deconv
input_space = Conv2DSpace(shape=(2, 1), num_channels=16, axes=('c', 0, 1, 'b'))
deconv = Deconv(layer_name='deconv',
num_channels=1,
kernel_shape=(4, 4),
output_stride=(2, 2),
irange=0.)
mlp = MLP(input_space=input_space, layers=[deconv])
X = input_space.get_theano_batch()
f = theano.function([X], mlp.fprop(X))
# Construct dummy filters.
# Just use two for simplicity.
filter1 = np.array([[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0]])
filter2 = np.array([[-1, 0, -1, 0],
[0, -1, 0, -1],
[-1, 0, -1, 0],
[0, -1, 0, -1]])
filters_dest = deconv.transformer._filters
new_filters = np.zeros((16, 4, 4), dtype=filters_dest.dtype)
new_filters[0] = filter1
new_filters[1] = filter2
new_filters = new_filters.reshape(16, 4, 4, 1).swapaxes(0, 3)
deconv.transformer._filters.set_value(new_filters)
def test_deconv_simple():
# Now try a feedforward
input = np.zeros((16, 2, 1, 1), dtype=filters_dest.dtype)
input[0, 0, 0, 0] = 1
input[1, 0, 0, 0] = -0.5
input[0, 1, 0, 0] = 2
input[1, 1, 0, 0] = 1
deconvolution = f(input).reshape((6, 4))
# Above deconvolution should be equivalent to overlapping the two below
# layers (each layer produced from one kernel-wise slice of the input
# layer)
out0 = np.concatenate([1 * filter1 - 0.5 * filter2, np.zeros((2, 4))])
out1 = np.concatenate([np.zeros((2, 4)), 2 * filter1 + 1 * filter2])
check = out0 + out1
assert_array_almost_equal(deconvolution, check)
|
Add simple sanity check test for deconvolution
|
Add simple sanity check test for deconvolution
|
Python
|
bsd-3-clause
|
hans/adversarial
|
Add simple sanity check test for deconvolution
|
"""
Simple sanity check for deconvolution layer.
"""
import numpy as np
from numpy.testing import assert_array_almost_equal
from pylearn2.models.mlp import MLP
from pylearn2.space import Conv2DSpace
import theano
from adversarial.deconv import Deconv
input_space = Conv2DSpace(shape=(2, 1), num_channels=16, axes=('c', 0, 1, 'b'))
deconv = Deconv(layer_name='deconv',
num_channels=1,
kernel_shape=(4, 4),
output_stride=(2, 2),
irange=0.)
mlp = MLP(input_space=input_space, layers=[deconv])
X = input_space.get_theano_batch()
f = theano.function([X], mlp.fprop(X))
# Construct dummy filters.
# Just use two for simplicity.
filter1 = np.array([[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0]])
filter2 = np.array([[-1, 0, -1, 0],
[0, -1, 0, -1],
[-1, 0, -1, 0],
[0, -1, 0, -1]])
filters_dest = deconv.transformer._filters
new_filters = np.zeros((16, 4, 4), dtype=filters_dest.dtype)
new_filters[0] = filter1
new_filters[1] = filter2
new_filters = new_filters.reshape(16, 4, 4, 1).swapaxes(0, 3)
deconv.transformer._filters.set_value(new_filters)
def test_deconv_simple():
# Now try a feedforward
input = np.zeros((16, 2, 1, 1), dtype=filters_dest.dtype)
input[0, 0, 0, 0] = 1
input[1, 0, 0, 0] = -0.5
input[0, 1, 0, 0] = 2
input[1, 1, 0, 0] = 1
deconvolution = f(input).reshape((6, 4))
# Above deconvolution should be equivalent to overlapping the two below
# layers (each layer produced from one kernel-wise slice of the input
# layer)
out0 = np.concatenate([1 * filter1 - 0.5 * filter2, np.zeros((2, 4))])
out1 = np.concatenate([np.zeros((2, 4)), 2 * filter1 + 1 * filter2])
check = out0 + out1
assert_array_almost_equal(deconvolution, check)
|
<commit_before><commit_msg>Add simple sanity check test for deconvolution<commit_after>
|
"""
Simple sanity check for deconvolution layer.
"""
import numpy as np
from numpy.testing import assert_array_almost_equal
from pylearn2.models.mlp import MLP
from pylearn2.space import Conv2DSpace
import theano
from adversarial.deconv import Deconv
input_space = Conv2DSpace(shape=(2, 1), num_channels=16, axes=('c', 0, 1, 'b'))
deconv = Deconv(layer_name='deconv',
num_channels=1,
kernel_shape=(4, 4),
output_stride=(2, 2),
irange=0.)
mlp = MLP(input_space=input_space, layers=[deconv])
X = input_space.get_theano_batch()
f = theano.function([X], mlp.fprop(X))
# Construct dummy filters.
# Just use two for simplicity.
filter1 = np.array([[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0]])
filter2 = np.array([[-1, 0, -1, 0],
[0, -1, 0, -1],
[-1, 0, -1, 0],
[0, -1, 0, -1]])
filters_dest = deconv.transformer._filters
new_filters = np.zeros((16, 4, 4), dtype=filters_dest.dtype)
new_filters[0] = filter1
new_filters[1] = filter2
new_filters = new_filters.reshape(16, 4, 4, 1).swapaxes(0, 3)
deconv.transformer._filters.set_value(new_filters)
def test_deconv_simple():
# Now try a feedforward
input = np.zeros((16, 2, 1, 1), dtype=filters_dest.dtype)
input[0, 0, 0, 0] = 1
input[1, 0, 0, 0] = -0.5
input[0, 1, 0, 0] = 2
input[1, 1, 0, 0] = 1
deconvolution = f(input).reshape((6, 4))
# Above deconvolution should be equivalent to overlapping the two below
# layers (each layer produced from one kernel-wise slice of the input
# layer)
out0 = np.concatenate([1 * filter1 - 0.5 * filter2, np.zeros((2, 4))])
out1 = np.concatenate([np.zeros((2, 4)), 2 * filter1 + 1 * filter2])
check = out0 + out1
assert_array_almost_equal(deconvolution, check)
|
Add simple sanity check test for deconvolution"""
Simple sanity check for deconvolution layer.
"""
import numpy as np
from numpy.testing import assert_array_almost_equal
from pylearn2.models.mlp import MLP
from pylearn2.space import Conv2DSpace
import theano
from adversarial.deconv import Deconv
input_space = Conv2DSpace(shape=(2, 1), num_channels=16, axes=('c', 0, 1, 'b'))
deconv = Deconv(layer_name='deconv',
num_channels=1,
kernel_shape=(4, 4),
output_stride=(2, 2),
irange=0.)
mlp = MLP(input_space=input_space, layers=[deconv])
X = input_space.get_theano_batch()
f = theano.function([X], mlp.fprop(X))
# Construct dummy filters.
# Just use two for simplicity.
filter1 = np.array([[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0]])
filter2 = np.array([[-1, 0, -1, 0],
[0, -1, 0, -1],
[-1, 0, -1, 0],
[0, -1, 0, -1]])
filters_dest = deconv.transformer._filters
new_filters = np.zeros((16, 4, 4), dtype=filters_dest.dtype)
new_filters[0] = filter1
new_filters[1] = filter2
new_filters = new_filters.reshape(16, 4, 4, 1).swapaxes(0, 3)
deconv.transformer._filters.set_value(new_filters)
def test_deconv_simple():
# Now try a feedforward
input = np.zeros((16, 2, 1, 1), dtype=filters_dest.dtype)
input[0, 0, 0, 0] = 1
input[1, 0, 0, 0] = -0.5
input[0, 1, 0, 0] = 2
input[1, 1, 0, 0] = 1
deconvolution = f(input).reshape((6, 4))
# Above deconvolution should be equivalent to overlapping the two below
# layers (each layer produced from one kernel-wise slice of the input
# layer)
out0 = np.concatenate([1 * filter1 - 0.5 * filter2, np.zeros((2, 4))])
out1 = np.concatenate([np.zeros((2, 4)), 2 * filter1 + 1 * filter2])
check = out0 + out1
assert_array_almost_equal(deconvolution, check)
|
<commit_before><commit_msg>Add simple sanity check test for deconvolution<commit_after>"""
Simple sanity check for deconvolution layer.
"""
import numpy as np
from numpy.testing import assert_array_almost_equal
from pylearn2.models.mlp import MLP
from pylearn2.space import Conv2DSpace
import theano
from adversarial.deconv import Deconv
input_space = Conv2DSpace(shape=(2, 1), num_channels=16, axes=('c', 0, 1, 'b'))
deconv = Deconv(layer_name='deconv',
num_channels=1,
kernel_shape=(4, 4),
output_stride=(2, 2),
irange=0.)
mlp = MLP(input_space=input_space, layers=[deconv])
X = input_space.get_theano_batch()
f = theano.function([X], mlp.fprop(X))
# Construct dummy filters.
# Just use two for simplicity.
filter1 = np.array([[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0]])
filter2 = np.array([[-1, 0, -1, 0],
[0, -1, 0, -1],
[-1, 0, -1, 0],
[0, -1, 0, -1]])
filters_dest = deconv.transformer._filters
new_filters = np.zeros((16, 4, 4), dtype=filters_dest.dtype)
new_filters[0] = filter1
new_filters[1] = filter2
new_filters = new_filters.reshape(16, 4, 4, 1).swapaxes(0, 3)
deconv.transformer._filters.set_value(new_filters)
def test_deconv_simple():
# Now try a feedforward
input = np.zeros((16, 2, 1, 1), dtype=filters_dest.dtype)
input[0, 0, 0, 0] = 1
input[1, 0, 0, 0] = -0.5
input[0, 1, 0, 0] = 2
input[1, 1, 0, 0] = 1
deconvolution = f(input).reshape((6, 4))
# Above deconvolution should be equivalent to overlapping the two below
# layers (each layer produced from one kernel-wise slice of the input
# layer)
out0 = np.concatenate([1 * filter1 - 0.5 * filter2, np.zeros((2, 4))])
out1 = np.concatenate([np.zeros((2, 4)), 2 * filter1 + 1 * filter2])
check = out0 + out1
assert_array_almost_equal(deconvolution, check)
|
|
b848c73bf4f7bfbb5c4256457131e480ed6b82e9
|
VS_BeforeBuild.py
|
VS_BeforeBuild.py
|
"""
This routine is executed on Before Build every time user build a Visual Studio project.
TODO:
CHM web template with logo.
Custom Icon Strip for CHM Files.
Author: Tony Ho @ AR-MA 2018
"""
from subprocess import call
import os
import fnmatch
def Run():
GenerateDocsInRhino()
GenerateHTMLHelp()
ChangeCHMIconToBooks()
GenerateHTMLHelpCHM()
return True
docDirectory = os.path.dirname(__file__)
def GenerateDocsInRhino():
print "Generating documentation via Rhino"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "generateDocsInRhino.bat")
# print command
rc = call(command, cwd=docDirectory)
def GenerateHTMLHelp():
print "Building HTML Help package"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "make-htmlhelp.bat")
# print command
rc = call(command, cwd=docDirectory)
def GenerateHTMLHelpCHM():
print "Compiling CHM file from HTML Help package"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "make-htmlhelpCHM.bat")
# print command
rc = call(command, cwd=docDirectory)
def ChangeCHMIconToBooks():
"""Change CHM icon to book icon.
Read HHC file and delete the line <param name="ImageType" value="Folder">
"""
filename = os.path.join(docDirectory, "build\\htmlhelp\\armacode.hhc")
#Open Read the file, remove all lines starting with _static
with open(filename, "r+") as f:
lines = f.readlines()
for i, line in enumerate(lines):
rc = fnmatch.fnmatch(line, "*<param name=\"ImageType\" value=\"Folder\">")
if rc:
lines[i] = ""
newContent = "".join(lines) #all lines already have return characters
f.seek(0)
f.write(newContent)
return rc
if __name__ == "__main__":
pass
|
Integrate with armacode Visual Studio project. This generate htmlhelp CHM file to be distributed with armacode.
|
Integrate with armacode Visual Studio project.
This generate htmlhelp CHM file to be distributed with armacode.
|
Python
|
mit
|
theTonyHo/armacode,theTonyHo/armacode,theTonyHo/armacode,theTonyHo/armacode
|
Integrate with armacode Visual Studio project.
This generate htmlhelp CHM file to be distributed with armacode.
|
"""
This routine is executed on Before Build every time user build a Visual Studio project.
TODO:
CHM web template with logo.
Custom Icon Strip for CHM Files.
Author: Tony Ho @ AR-MA 2018
"""
from subprocess import call
import os
import fnmatch
def Run():
GenerateDocsInRhino()
GenerateHTMLHelp()
ChangeCHMIconToBooks()
GenerateHTMLHelpCHM()
return True
docDirectory = os.path.dirname(__file__)
def GenerateDocsInRhino():
print "Generating documentation via Rhino"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "generateDocsInRhino.bat")
# print command
rc = call(command, cwd=docDirectory)
def GenerateHTMLHelp():
print "Building HTML Help package"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "make-htmlhelp.bat")
# print command
rc = call(command, cwd=docDirectory)
def GenerateHTMLHelpCHM():
print "Compiling CHM file from HTML Help package"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "make-htmlhelpCHM.bat")
# print command
rc = call(command, cwd=docDirectory)
def ChangeCHMIconToBooks():
"""Change CHM icon to book icon.
Read HHC file and delete the line <param name="ImageType" value="Folder">
"""
filename = os.path.join(docDirectory, "build\\htmlhelp\\armacode.hhc")
#Open Read the file, remove all lines starting with _static
with open(filename, "r+") as f:
lines = f.readlines()
for i, line in enumerate(lines):
rc = fnmatch.fnmatch(line, "*<param name=\"ImageType\" value=\"Folder\">")
if rc:
lines[i] = ""
newContent = "".join(lines) #all lines already have return characters
f.seek(0)
f.write(newContent)
return rc
if __name__ == "__main__":
pass
|
<commit_before><commit_msg>Integrate with armacode Visual Studio project.
This generate htmlhelp CHM file to be distributed with armacode.<commit_after>
|
"""
This routine is executed on Before Build every time user build a Visual Studio project.
TODO:
CHM web template with logo.
Custom Icon Strip for CHM Files.
Author: Tony Ho @ AR-MA 2018
"""
from subprocess import call
import os
import fnmatch
def Run():
GenerateDocsInRhino()
GenerateHTMLHelp()
ChangeCHMIconToBooks()
GenerateHTMLHelpCHM()
return True
docDirectory = os.path.dirname(__file__)
def GenerateDocsInRhino():
print "Generating documentation via Rhino"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "generateDocsInRhino.bat")
# print command
rc = call(command, cwd=docDirectory)
def GenerateHTMLHelp():
print "Building HTML Help package"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "make-htmlhelp.bat")
# print command
rc = call(command, cwd=docDirectory)
def GenerateHTMLHelpCHM():
print "Compiling CHM file from HTML Help package"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "make-htmlhelpCHM.bat")
# print command
rc = call(command, cwd=docDirectory)
def ChangeCHMIconToBooks():
"""Change CHM icon to book icon.
Read HHC file and delete the line <param name="ImageType" value="Folder">
"""
filename = os.path.join(docDirectory, "build\\htmlhelp\\armacode.hhc")
#Open Read the file, remove all lines starting with _static
with open(filename, "r+") as f:
lines = f.readlines()
for i, line in enumerate(lines):
rc = fnmatch.fnmatch(line, "*<param name=\"ImageType\" value=\"Folder\">")
if rc:
lines[i] = ""
newContent = "".join(lines) #all lines already have return characters
f.seek(0)
f.write(newContent)
return rc
if __name__ == "__main__":
pass
|
Integrate with armacode Visual Studio project.
This generate htmlhelp CHM file to be distributed with armacode."""
This routine is executed on Before Build every time user build a Visual Studio project.
TODO:
CHM web template with logo.
Custom Icon Strip for CHM Files.
Author: Tony Ho @ AR-MA 2018
"""
from subprocess import call
import os
import fnmatch
def Run():
GenerateDocsInRhino()
GenerateHTMLHelp()
ChangeCHMIconToBooks()
GenerateHTMLHelpCHM()
return True
docDirectory = os.path.dirname(__file__)
def GenerateDocsInRhino():
print "Generating documentation via Rhino"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "generateDocsInRhino.bat")
# print command
rc = call(command, cwd=docDirectory)
def GenerateHTMLHelp():
print "Building HTML Help package"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "make-htmlhelp.bat")
# print command
rc = call(command, cwd=docDirectory)
def GenerateHTMLHelpCHM():
print "Compiling CHM file from HTML Help package"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "make-htmlhelpCHM.bat")
# print command
rc = call(command, cwd=docDirectory)
def ChangeCHMIconToBooks():
"""Change CHM icon to book icon.
Read HHC file and delete the line <param name="ImageType" value="Folder">
"""
filename = os.path.join(docDirectory, "build\\htmlhelp\\armacode.hhc")
#Open Read the file, remove all lines starting with _static
with open(filename, "r+") as f:
lines = f.readlines()
for i, line in enumerate(lines):
rc = fnmatch.fnmatch(line, "*<param name=\"ImageType\" value=\"Folder\">")
if rc:
lines[i] = ""
newContent = "".join(lines) #all lines already have return characters
f.seek(0)
f.write(newContent)
return rc
if __name__ == "__main__":
pass
|
<commit_before><commit_msg>Integrate with armacode Visual Studio project.
This generate htmlhelp CHM file to be distributed with armacode.<commit_after>"""
This routine is executed on Before Build every time user build a Visual Studio project.
TODO:
CHM web template with logo.
Custom Icon Strip for CHM Files.
Author: Tony Ho @ AR-MA 2018
"""
from subprocess import call
import os
import fnmatch
def Run():
GenerateDocsInRhino()
GenerateHTMLHelp()
ChangeCHMIconToBooks()
GenerateHTMLHelpCHM()
return True
docDirectory = os.path.dirname(__file__)
def GenerateDocsInRhino():
print "Generating documentation via Rhino"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "generateDocsInRhino.bat")
# print command
rc = call(command, cwd=docDirectory)
def GenerateHTMLHelp():
print "Building HTML Help package"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "make-htmlhelp.bat")
# print command
rc = call(command, cwd=docDirectory)
def GenerateHTMLHelpCHM():
print "Compiling CHM file from HTML Help package"
print "---------------------------------------"
# How to compile a chm file in Python?
# ---------------------------------
command = os.path.join(docDirectory, "make-htmlhelpCHM.bat")
# print command
rc = call(command, cwd=docDirectory)
def ChangeCHMIconToBooks():
"""Change CHM icon to book icon.
Read HHC file and delete the line <param name="ImageType" value="Folder">
"""
filename = os.path.join(docDirectory, "build\\htmlhelp\\armacode.hhc")
#Open Read the file, remove all lines starting with _static
with open(filename, "r+") as f:
lines = f.readlines()
for i, line in enumerate(lines):
rc = fnmatch.fnmatch(line, "*<param name=\"ImageType\" value=\"Folder\">")
if rc:
lines[i] = ""
newContent = "".join(lines) #all lines already have return characters
f.seek(0)
f.write(newContent)
return rc
if __name__ == "__main__":
pass
|
|
5229d5bbf1f823b9a7e49a4004e87da043643af7
|
scripts/remove_after_use/remove_duped_subject.py
|
scripts/remove_after_use/remove_duped_subject.py
|
# -*- coding: utf-8 -*-
import sys
import logging
from dateutil.parser import parse
from website.app import setup_django
setup_django()
from osf.models import PreprintService, Subject
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
CUSTOM_TAXONOMY_APPLIED_DATE = '2018-07-17T22:56:02.270217+00:00'
def main(dry=True):
date_of_interest = parse(CUSTOM_TAXONOMY_APPLIED_DATE)
bad_subj = Subject.objects.get(text=' Social and Personality Psychology', provider___id='psyarxiv')
good_subj = Subject.objects.get(text='Social and Personality Psychology', provider___id='psyarxiv')
existing_preprints_with_bad_subj = PreprintService.objects.filter(created__lte=date_of_interest, subjects__in=[bad_subj])
new_preprints_with_bad_subj = PreprintService.objects.filter(created__gt=date_of_interest, subjects__in=[bad_subj])
num_existing = existing_preprints_with_bad_subj.count()
for preprint in existing_preprints_with_bad_subj:
assert preprint.subjects.exclude(id=bad_subj.id).filter(bepress_subject=bad_subj.bepress_subject).exists()
preprint.subjects.remove(bad_subj)
logger.info('Removed subject "{}" from preprint {}'.format(bad_subj.text, preprint._id))
logger.info('Subject "{}" removed from {} preprints'.format(bad_subj.text, num_existing))
num_new = new_preprints_with_bad_subj.count()
for preprint in new_preprints_with_bad_subj:
preprint.subjects.remove(bad_subj)
preprint.subjects.add(good_subj)
logger.info('Replaced subject "{}" with subject "{}" on preprint {}'.format(bad_subj.text, good_subj.text, preprint._id))
logger.info('Subject "{}" replaced with "{}" on {} preprints'.format(bad_subj.text, good_subj.text, num_new))
logger.info('Deleting subject "{}" with id {}'.format(bad_subj.text, bad_subj.id))
bad_subj.delete()
logger.info('Done.')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
Remove duplicate psyarxiv custom subject.
|
Remove duplicate psyarxiv custom subject.
* Remove from existing preprints.
* Replace with correct subject on new preprints.
h/t @mfraezz
|
Python
|
apache-2.0
|
saradbowman/osf.io,felliott/osf.io,cslzchen/osf.io,baylee-d/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,adlius/osf.io,baylee-d/osf.io,caseyrollins/osf.io,felliott/osf.io,aaxelb/osf.io,cslzchen/osf.io,mfraezz/osf.io,pattisdr/osf.io,adlius/osf.io,pattisdr/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,HalcyonChimera/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,sloria/osf.io,caseyrollins/osf.io,mattclark/osf.io,baylee-d/osf.io,erinspace/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,saradbowman/osf.io,adlius/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,erinspace/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,sloria/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,adlius/osf.io,mattclark/osf.io,felliott/osf.io,felliott/osf.io
|
Remove duplicate psyarxiv custom subject.
* Remove from existing preprints.
* Replace with correct subject on new preprints.
h/t @mfraezz
|
# -*- coding: utf-8 -*-
import sys
import logging
from dateutil.parser import parse
from website.app import setup_django
setup_django()
from osf.models import PreprintService, Subject
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
CUSTOM_TAXONOMY_APPLIED_DATE = '2018-07-17T22:56:02.270217+00:00'
def main(dry=True):
date_of_interest = parse(CUSTOM_TAXONOMY_APPLIED_DATE)
bad_subj = Subject.objects.get(text=' Social and Personality Psychology', provider___id='psyarxiv')
good_subj = Subject.objects.get(text='Social and Personality Psychology', provider___id='psyarxiv')
existing_preprints_with_bad_subj = PreprintService.objects.filter(created__lte=date_of_interest, subjects__in=[bad_subj])
new_preprints_with_bad_subj = PreprintService.objects.filter(created__gt=date_of_interest, subjects__in=[bad_subj])
num_existing = existing_preprints_with_bad_subj.count()
for preprint in existing_preprints_with_bad_subj:
assert preprint.subjects.exclude(id=bad_subj.id).filter(bepress_subject=bad_subj.bepress_subject).exists()
preprint.subjects.remove(bad_subj)
logger.info('Removed subject "{}" from preprint {}'.format(bad_subj.text, preprint._id))
logger.info('Subject "{}" removed from {} preprints'.format(bad_subj.text, num_existing))
num_new = new_preprints_with_bad_subj.count()
for preprint in new_preprints_with_bad_subj:
preprint.subjects.remove(bad_subj)
preprint.subjects.add(good_subj)
logger.info('Replaced subject "{}" with subject "{}" on preprint {}'.format(bad_subj.text, good_subj.text, preprint._id))
logger.info('Subject "{}" replaced with "{}" on {} preprints'.format(bad_subj.text, good_subj.text, num_new))
logger.info('Deleting subject "{}" with id {}'.format(bad_subj.text, bad_subj.id))
bad_subj.delete()
logger.info('Done.')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
<commit_before><commit_msg>Remove duplicate psyarxiv custom subject.
* Remove from existing preprints.
* Replace with correct subject on new preprints.
h/t @mfraezz<commit_after>
|
# -*- coding: utf-8 -*-
import sys
import logging
from dateutil.parser import parse
from website.app import setup_django
setup_django()
from osf.models import PreprintService, Subject
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
CUSTOM_TAXONOMY_APPLIED_DATE = '2018-07-17T22:56:02.270217+00:00'
def main(dry=True):
date_of_interest = parse(CUSTOM_TAXONOMY_APPLIED_DATE)
bad_subj = Subject.objects.get(text=' Social and Personality Psychology', provider___id='psyarxiv')
good_subj = Subject.objects.get(text='Social and Personality Psychology', provider___id='psyarxiv')
existing_preprints_with_bad_subj = PreprintService.objects.filter(created__lte=date_of_interest, subjects__in=[bad_subj])
new_preprints_with_bad_subj = PreprintService.objects.filter(created__gt=date_of_interest, subjects__in=[bad_subj])
num_existing = existing_preprints_with_bad_subj.count()
for preprint in existing_preprints_with_bad_subj:
assert preprint.subjects.exclude(id=bad_subj.id).filter(bepress_subject=bad_subj.bepress_subject).exists()
preprint.subjects.remove(bad_subj)
logger.info('Removed subject "{}" from preprint {}'.format(bad_subj.text, preprint._id))
logger.info('Subject "{}" removed from {} preprints'.format(bad_subj.text, num_existing))
num_new = new_preprints_with_bad_subj.count()
for preprint in new_preprints_with_bad_subj:
preprint.subjects.remove(bad_subj)
preprint.subjects.add(good_subj)
logger.info('Replaced subject "{}" with subject "{}" on preprint {}'.format(bad_subj.text, good_subj.text, preprint._id))
logger.info('Subject "{}" replaced with "{}" on {} preprints'.format(bad_subj.text, good_subj.text, num_new))
logger.info('Deleting subject "{}" with id {}'.format(bad_subj.text, bad_subj.id))
bad_subj.delete()
logger.info('Done.')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
Remove duplicate psyarxiv custom subject.
* Remove from existing preprints.
* Replace with correct subject on new preprints.
h/t @mfraezz# -*- coding: utf-8 -*-
import sys
import logging
from dateutil.parser import parse
from website.app import setup_django
setup_django()
from osf.models import PreprintService, Subject
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
CUSTOM_TAXONOMY_APPLIED_DATE = '2018-07-17T22:56:02.270217+00:00'
def main(dry=True):
date_of_interest = parse(CUSTOM_TAXONOMY_APPLIED_DATE)
bad_subj = Subject.objects.get(text=' Social and Personality Psychology', provider___id='psyarxiv')
good_subj = Subject.objects.get(text='Social and Personality Psychology', provider___id='psyarxiv')
existing_preprints_with_bad_subj = PreprintService.objects.filter(created__lte=date_of_interest, subjects__in=[bad_subj])
new_preprints_with_bad_subj = PreprintService.objects.filter(created__gt=date_of_interest, subjects__in=[bad_subj])
num_existing = existing_preprints_with_bad_subj.count()
for preprint in existing_preprints_with_bad_subj:
assert preprint.subjects.exclude(id=bad_subj.id).filter(bepress_subject=bad_subj.bepress_subject).exists()
preprint.subjects.remove(bad_subj)
logger.info('Removed subject "{}" from preprint {}'.format(bad_subj.text, preprint._id))
logger.info('Subject "{}" removed from {} preprints'.format(bad_subj.text, num_existing))
num_new = new_preprints_with_bad_subj.count()
for preprint in new_preprints_with_bad_subj:
preprint.subjects.remove(bad_subj)
preprint.subjects.add(good_subj)
logger.info('Replaced subject "{}" with subject "{}" on preprint {}'.format(bad_subj.text, good_subj.text, preprint._id))
logger.info('Subject "{}" replaced with "{}" on {} preprints'.format(bad_subj.text, good_subj.text, num_new))
logger.info('Deleting subject "{}" with id {}'.format(bad_subj.text, bad_subj.id))
bad_subj.delete()
logger.info('Done.')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
<commit_before><commit_msg>Remove duplicate psyarxiv custom subject.
* Remove from existing preprints.
* Replace with correct subject on new preprints.
h/t @mfraezz<commit_after># -*- coding: utf-8 -*-
import sys
import logging
from dateutil.parser import parse
from website.app import setup_django
setup_django()
from osf.models import PreprintService, Subject
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
CUSTOM_TAXONOMY_APPLIED_DATE = '2018-07-17T22:56:02.270217+00:00'
def main(dry=True):
date_of_interest = parse(CUSTOM_TAXONOMY_APPLIED_DATE)
bad_subj = Subject.objects.get(text=' Social and Personality Psychology', provider___id='psyarxiv')
good_subj = Subject.objects.get(text='Social and Personality Psychology', provider___id='psyarxiv')
existing_preprints_with_bad_subj = PreprintService.objects.filter(created__lte=date_of_interest, subjects__in=[bad_subj])
new_preprints_with_bad_subj = PreprintService.objects.filter(created__gt=date_of_interest, subjects__in=[bad_subj])
num_existing = existing_preprints_with_bad_subj.count()
for preprint in existing_preprints_with_bad_subj:
assert preprint.subjects.exclude(id=bad_subj.id).filter(bepress_subject=bad_subj.bepress_subject).exists()
preprint.subjects.remove(bad_subj)
logger.info('Removed subject "{}" from preprint {}'.format(bad_subj.text, preprint._id))
logger.info('Subject "{}" removed from {} preprints'.format(bad_subj.text, num_existing))
num_new = new_preprints_with_bad_subj.count()
for preprint in new_preprints_with_bad_subj:
preprint.subjects.remove(bad_subj)
preprint.subjects.add(good_subj)
logger.info('Replaced subject "{}" with subject "{}" on preprint {}'.format(bad_subj.text, good_subj.text, preprint._id))
logger.info('Subject "{}" replaced with "{}" on {} preprints'.format(bad_subj.text, good_subj.text, num_new))
logger.info('Deleting subject "{}" with id {}'.format(bad_subj.text, bad_subj.id))
bad_subj.delete()
logger.info('Done.')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
|
3f17c3aa44b281b4b33d2c587f7bfb498df3ac0f
|
deepLearningWithNeuralNetworks/create_sentiment_featuresets.py
|
deepLearningWithNeuralNetworks/create_sentiment_featuresets.py
|
# -*- coding: utf-8 -*-
"""Creates sentiment featuresets by preprocessing the data with nltk."""
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import numpy as np
import random
from collections import Counter
lemmatizer = WordNetLemmatizer()
hm_lines = 10000000
def create_lexicon(pos, neg):
"""Create Lexicon."""
lexicon = []
for fi in [pos, neg]:
with open(fi, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
all_words = word_tokenize(l.lower())
lexicon += list(all_words)
lexicon = [lemmatizer.lemmatize(i) for i in lexicon]
w_counts = Counter(lexicon)
resultlexicon = []
for w in w_counts:
if 1000 > w_counts[w] > 50: # No super common words or too rare
resultlexicon.append(w)
return resultlexicon
def sample_handling(sample, lexicon, classification):
"""Handle samples and return a suitable format."""
featureset = []
with open(sample, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
current_words = word_tokenize(l.lower())
current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words:
if word.lower() in lexicon:
index_value = lexicon.index(word.lower())
features[index_value] += 1
features = list(features)
featureset.append([features, classification])
return featureset
def create_feature_sets_and_labels(pos, neg, test_size=0.1):
"""Create featuresets and labels (training and testing data)."""
lexicon = create_lexicon(pos, neg)
features = []
features += sample_handling('positive.txt', lexicon, [1, 0])
features += sample_handling('negative.txt', lexicon, [0, 1])
random.shuffle(features)
features = np.array(features)
# create training data.
testing_size = int(test_size * len(features))
train_x = list(features[:, 0][:-testing_size])
train_y = list(features[:, 1][:-testing_size])
# create testing data.
test_x = list(features[:, 0][-testing_size:])
test_y = list(features[:, 1][-testing_size:])
return train_x, train_y, test_x, test_y
if __name__ == '__main__':
p = 'positive.txt'
n = 'negative.txt'
train_x, train_y, test_x, test_y = create_feature_sets_and_labels(p, n)
with open('sentiment_set.pickle', 'wb') as f:
pickle.dump([train_x, train_y, test_x, test_y], f)
|
Add script to pre-process data for deep learning
|
Add script to pre-process data for deep learning
|
Python
|
mit
|
a-holm/MachinelearningAlgorithms,a-holm/MachinelearningAlgorithms
|
Add script to pre-process data for deep learning
|
# -*- coding: utf-8 -*-
"""Creates sentiment featuresets by preprocessing the data with nltk."""
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import numpy as np
import random
from collections import Counter
lemmatizer = WordNetLemmatizer()
hm_lines = 10000000
def create_lexicon(pos, neg):
"""Create Lexicon."""
lexicon = []
for fi in [pos, neg]:
with open(fi, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
all_words = word_tokenize(l.lower())
lexicon += list(all_words)
lexicon = [lemmatizer.lemmatize(i) for i in lexicon]
w_counts = Counter(lexicon)
resultlexicon = []
for w in w_counts:
if 1000 > w_counts[w] > 50: # No super common words or too rare
resultlexicon.append(w)
return resultlexicon
def sample_handling(sample, lexicon, classification):
"""Handle samples and return a suitable format."""
featureset = []
with open(sample, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
current_words = word_tokenize(l.lower())
current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words:
if word.lower() in lexicon:
index_value = lexicon.index(word.lower())
features[index_value] += 1
features = list(features)
featureset.append([features, classification])
return featureset
def create_feature_sets_and_labels(pos, neg, test_size=0.1):
"""Create featuresets and labels (training and testing data)."""
lexicon = create_lexicon(pos, neg)
features = []
features += sample_handling('positive.txt', lexicon, [1, 0])
features += sample_handling('negative.txt', lexicon, [0, 1])
random.shuffle(features)
features = np.array(features)
# create training data.
testing_size = int(test_size * len(features))
train_x = list(features[:, 0][:-testing_size])
train_y = list(features[:, 1][:-testing_size])
# create testing data.
test_x = list(features[:, 0][-testing_size:])
test_y = list(features[:, 1][-testing_size:])
return train_x, train_y, test_x, test_y
if __name__ == '__main__':
p = 'positive.txt'
n = 'negative.txt'
train_x, train_y, test_x, test_y = create_feature_sets_and_labels(p, n)
with open('sentiment_set.pickle', 'wb') as f:
pickle.dump([train_x, train_y, test_x, test_y], f)
|
<commit_before><commit_msg>Add script to pre-process data for deep learning<commit_after>
|
# -*- coding: utf-8 -*-
"""Creates sentiment featuresets by preprocessing the data with nltk."""
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import numpy as np
import random
from collections import Counter
lemmatizer = WordNetLemmatizer()
hm_lines = 10000000
def create_lexicon(pos, neg):
"""Create Lexicon."""
lexicon = []
for fi in [pos, neg]:
with open(fi, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
all_words = word_tokenize(l.lower())
lexicon += list(all_words)
lexicon = [lemmatizer.lemmatize(i) for i in lexicon]
w_counts = Counter(lexicon)
resultlexicon = []
for w in w_counts:
if 1000 > w_counts[w] > 50: # No super common words or too rare
resultlexicon.append(w)
return resultlexicon
def sample_handling(sample, lexicon, classification):
"""Handle samples and return a suitable format."""
featureset = []
with open(sample, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
current_words = word_tokenize(l.lower())
current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words:
if word.lower() in lexicon:
index_value = lexicon.index(word.lower())
features[index_value] += 1
features = list(features)
featureset.append([features, classification])
return featureset
def create_feature_sets_and_labels(pos, neg, test_size=0.1):
"""Create featuresets and labels (training and testing data)."""
lexicon = create_lexicon(pos, neg)
features = []
features += sample_handling('positive.txt', lexicon, [1, 0])
features += sample_handling('negative.txt', lexicon, [0, 1])
random.shuffle(features)
features = np.array(features)
# create training data.
testing_size = int(test_size * len(features))
train_x = list(features[:, 0][:-testing_size])
train_y = list(features[:, 1][:-testing_size])
# create testing data.
test_x = list(features[:, 0][-testing_size:])
test_y = list(features[:, 1][-testing_size:])
return train_x, train_y, test_x, test_y
if __name__ == '__main__':
p = 'positive.txt'
n = 'negative.txt'
train_x, train_y, test_x, test_y = create_feature_sets_and_labels(p, n)
with open('sentiment_set.pickle', 'wb') as f:
pickle.dump([train_x, train_y, test_x, test_y], f)
|
Add script to pre-process data for deep learning# -*- coding: utf-8 -*-
"""Creates sentiment featuresets by preprocessing the data with nltk."""
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import numpy as np
import random
from collections import Counter
lemmatizer = WordNetLemmatizer()
hm_lines = 10000000
def create_lexicon(pos, neg):
"""Create Lexicon."""
lexicon = []
for fi in [pos, neg]:
with open(fi, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
all_words = word_tokenize(l.lower())
lexicon += list(all_words)
lexicon = [lemmatizer.lemmatize(i) for i in lexicon]
w_counts = Counter(lexicon)
resultlexicon = []
for w in w_counts:
if 1000 > w_counts[w] > 50: # No super common words or too rare
resultlexicon.append(w)
return resultlexicon
def sample_handling(sample, lexicon, classification):
"""Handle samples and return a suitable format."""
featureset = []
with open(sample, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
current_words = word_tokenize(l.lower())
current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words:
if word.lower() in lexicon:
index_value = lexicon.index(word.lower())
features[index_value] += 1
features = list(features)
featureset.append([features, classification])
return featureset
def create_feature_sets_and_labels(pos, neg, test_size=0.1):
"""Create featuresets and labels (training and testing data)."""
lexicon = create_lexicon(pos, neg)
features = []
features += sample_handling('positive.txt', lexicon, [1, 0])
features += sample_handling('negative.txt', lexicon, [0, 1])
random.shuffle(features)
features = np.array(features)
# create training data.
testing_size = int(test_size * len(features))
train_x = list(features[:, 0][:-testing_size])
train_y = list(features[:, 1][:-testing_size])
# create testing data.
test_x = list(features[:, 0][-testing_size:])
test_y = list(features[:, 1][-testing_size:])
return train_x, train_y, test_x, test_y
if __name__ == '__main__':
p = 'positive.txt'
n = 'negative.txt'
train_x, train_y, test_x, test_y = create_feature_sets_and_labels(p, n)
with open('sentiment_set.pickle', 'wb') as f:
pickle.dump([train_x, train_y, test_x, test_y], f)
|
<commit_before><commit_msg>Add script to pre-process data for deep learning<commit_after># -*- coding: utf-8 -*-
"""Creates sentiment featuresets by preprocessing the data with nltk."""
import pickle
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import numpy as np
import random
from collections import Counter
lemmatizer = WordNetLemmatizer()
hm_lines = 10000000
def create_lexicon(pos, neg):
"""Create Lexicon."""
lexicon = []
for fi in [pos, neg]:
with open(fi, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
all_words = word_tokenize(l.lower())
lexicon += list(all_words)
lexicon = [lemmatizer.lemmatize(i) for i in lexicon]
w_counts = Counter(lexicon)
resultlexicon = []
for w in w_counts:
if 1000 > w_counts[w] > 50: # No super common words or too rare
resultlexicon.append(w)
return resultlexicon
def sample_handling(sample, lexicon, classification):
"""Handle samples and return a suitable format."""
featureset = []
with open(sample, 'r') as f:
contents = f.readlines()
for l in contents[:hm_lines]:
current_words = word_tokenize(l.lower())
current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words:
if word.lower() in lexicon:
index_value = lexicon.index(word.lower())
features[index_value] += 1
features = list(features)
featureset.append([features, classification])
return featureset
def create_feature_sets_and_labels(pos, neg, test_size=0.1):
"""Create featuresets and labels (training and testing data)."""
lexicon = create_lexicon(pos, neg)
features = []
features += sample_handling('positive.txt', lexicon, [1, 0])
features += sample_handling('negative.txt', lexicon, [0, 1])
random.shuffle(features)
features = np.array(features)
# create training data.
testing_size = int(test_size * len(features))
train_x = list(features[:, 0][:-testing_size])
train_y = list(features[:, 1][:-testing_size])
# create testing data.
test_x = list(features[:, 0][-testing_size:])
test_y = list(features[:, 1][-testing_size:])
return train_x, train_y, test_x, test_y
if __name__ == '__main__':
p = 'positive.txt'
n = 'negative.txt'
train_x, train_y, test_x, test_y = create_feature_sets_and_labels(p, n)
with open('sentiment_set.pickle', 'wb') as f:
pickle.dump([train_x, train_y, test_x, test_y], f)
|
|
9540d890991eb60838a927df6d83e0f89fd7f7ea
|
distarray/tests/ipcluster.py
|
distarray/tests/ipcluster.py
|
"""
Simple runner for `ipcluster start` or `ipcluster stop` on Python 2 or 3, as
appropriate.
"""
import sys
import six
from subprocess import Popen, PIPE
if six.PY2:
ipcluster_cmd = 'ipcluster'
elif six.PY3:
ipcluster_cmd = 'ipcluster3'
else:
raise NotImplementedError("Not run with Python 2 *or* 3?")
def start(n=12):
"""Convenient way to start an ipcluster for testing.
You have to wait for it to start, however.
"""
# FIXME: This should be reimplemented to signal when the cluster has
# successfully started
engines = "--engines=MPIEngineSetLauncher"
Popen([ipcluster_cmd, 'start', '-n', str(n), engines, str('&')],
stdout=PIPE, stderr=PIPE)
def stop():
"""Convenient way to stop an ipcluster."""
Popen([ipcluster_cmd, 'stop'], stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
cmd = sys.argv[1]
fn = eval(cmd)
fn()
|
"""
Simple runner for `ipcluster start` or `ipcluster stop` on Python 2 or 3, as
appropriate.
"""
import sys
import six
from subprocess import Popen, PIPE
if six.PY2:
ipcluster_cmd = 'ipcluster'
elif six.PY3:
ipcluster_cmd = 'ipcluster3'
else:
raise NotImplementedError("Not run with Python 2 *or* 3?")
def start(n=4):
"""Convenient way to start an ipcluster for testing.
You have to wait for it to start, however.
"""
# FIXME: This should be reimplemented to signal when the cluster has
# successfully started
engines = "--engines=MPIEngineSetLauncher"
Popen([ipcluster_cmd, 'start', '-n', str(n), engines, str('&')],
stdout=PIPE, stderr=PIPE)
def stop():
"""Convenient way to stop an ipcluster."""
Popen([ipcluster_cmd, 'stop'], stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
cmd = sys.argv[1]
fn = eval(cmd)
fn()
|
Change the default number of engines to 4.
|
Change the default number of engines to 4.
Only 4 are needed for our current tests.
|
Python
|
bsd-3-clause
|
RaoUmer/distarray,enthought/distarray,RaoUmer/distarray,enthought/distarray
|
"""
Simple runner for `ipcluster start` or `ipcluster stop` on Python 2 or 3, as
appropriate.
"""
import sys
import six
from subprocess import Popen, PIPE
if six.PY2:
ipcluster_cmd = 'ipcluster'
elif six.PY3:
ipcluster_cmd = 'ipcluster3'
else:
raise NotImplementedError("Not run with Python 2 *or* 3?")
def start(n=12):
"""Convenient way to start an ipcluster for testing.
You have to wait for it to start, however.
"""
# FIXME: This should be reimplemented to signal when the cluster has
# successfully started
engines = "--engines=MPIEngineSetLauncher"
Popen([ipcluster_cmd, 'start', '-n', str(n), engines, str('&')],
stdout=PIPE, stderr=PIPE)
def stop():
"""Convenient way to stop an ipcluster."""
Popen([ipcluster_cmd, 'stop'], stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
cmd = sys.argv[1]
fn = eval(cmd)
fn()
Change the default number of engines to 4.
Only 4 are needed for our current tests.
|
"""
Simple runner for `ipcluster start` or `ipcluster stop` on Python 2 or 3, as
appropriate.
"""
import sys
import six
from subprocess import Popen, PIPE
if six.PY2:
ipcluster_cmd = 'ipcluster'
elif six.PY3:
ipcluster_cmd = 'ipcluster3'
else:
raise NotImplementedError("Not run with Python 2 *or* 3?")
def start(n=4):
"""Convenient way to start an ipcluster for testing.
You have to wait for it to start, however.
"""
# FIXME: This should be reimplemented to signal when the cluster has
# successfully started
engines = "--engines=MPIEngineSetLauncher"
Popen([ipcluster_cmd, 'start', '-n', str(n), engines, str('&')],
stdout=PIPE, stderr=PIPE)
def stop():
"""Convenient way to stop an ipcluster."""
Popen([ipcluster_cmd, 'stop'], stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
cmd = sys.argv[1]
fn = eval(cmd)
fn()
|
<commit_before>"""
Simple runner for `ipcluster start` or `ipcluster stop` on Python 2 or 3, as
appropriate.
"""
import sys
import six
from subprocess import Popen, PIPE
if six.PY2:
ipcluster_cmd = 'ipcluster'
elif six.PY3:
ipcluster_cmd = 'ipcluster3'
else:
raise NotImplementedError("Not run with Python 2 *or* 3?")
def start(n=12):
"""Convenient way to start an ipcluster for testing.
You have to wait for it to start, however.
"""
# FIXME: This should be reimplemented to signal when the cluster has
# successfully started
engines = "--engines=MPIEngineSetLauncher"
Popen([ipcluster_cmd, 'start', '-n', str(n), engines, str('&')],
stdout=PIPE, stderr=PIPE)
def stop():
"""Convenient way to stop an ipcluster."""
Popen([ipcluster_cmd, 'stop'], stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
cmd = sys.argv[1]
fn = eval(cmd)
fn()
<commit_msg>Change the default number of engines to 4.
Only 4 are needed for our current tests.<commit_after>
|
"""
Simple runner for `ipcluster start` or `ipcluster stop` on Python 2 or 3, as
appropriate.
"""
import sys
import six
from subprocess import Popen, PIPE
if six.PY2:
ipcluster_cmd = 'ipcluster'
elif six.PY3:
ipcluster_cmd = 'ipcluster3'
else:
raise NotImplementedError("Not run with Python 2 *or* 3?")
def start(n=4):
"""Convenient way to start an ipcluster for testing.
You have to wait for it to start, however.
"""
# FIXME: This should be reimplemented to signal when the cluster has
# successfully started
engines = "--engines=MPIEngineSetLauncher"
Popen([ipcluster_cmd, 'start', '-n', str(n), engines, str('&')],
stdout=PIPE, stderr=PIPE)
def stop():
"""Convenient way to stop an ipcluster."""
Popen([ipcluster_cmd, 'stop'], stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
cmd = sys.argv[1]
fn = eval(cmd)
fn()
|
"""
Simple runner for `ipcluster start` or `ipcluster stop` on Python 2 or 3, as
appropriate.
"""
import sys
import six
from subprocess import Popen, PIPE
if six.PY2:
ipcluster_cmd = 'ipcluster'
elif six.PY3:
ipcluster_cmd = 'ipcluster3'
else:
raise NotImplementedError("Not run with Python 2 *or* 3?")
def start(n=12):
"""Convenient way to start an ipcluster for testing.
You have to wait for it to start, however.
"""
# FIXME: This should be reimplemented to signal when the cluster has
# successfully started
engines = "--engines=MPIEngineSetLauncher"
Popen([ipcluster_cmd, 'start', '-n', str(n), engines, str('&')],
stdout=PIPE, stderr=PIPE)
def stop():
"""Convenient way to stop an ipcluster."""
Popen([ipcluster_cmd, 'stop'], stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
cmd = sys.argv[1]
fn = eval(cmd)
fn()
Change the default number of engines to 4.
Only 4 are needed for our current tests."""
Simple runner for `ipcluster start` or `ipcluster stop` on Python 2 or 3, as
appropriate.
"""
import sys
import six
from subprocess import Popen, PIPE
if six.PY2:
ipcluster_cmd = 'ipcluster'
elif six.PY3:
ipcluster_cmd = 'ipcluster3'
else:
raise NotImplementedError("Not run with Python 2 *or* 3?")
def start(n=4):
"""Convenient way to start an ipcluster for testing.
You have to wait for it to start, however.
"""
# FIXME: This should be reimplemented to signal when the cluster has
# successfully started
engines = "--engines=MPIEngineSetLauncher"
Popen([ipcluster_cmd, 'start', '-n', str(n), engines, str('&')],
stdout=PIPE, stderr=PIPE)
def stop():
"""Convenient way to stop an ipcluster."""
Popen([ipcluster_cmd, 'stop'], stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
cmd = sys.argv[1]
fn = eval(cmd)
fn()
|
<commit_before>"""
Simple runner for `ipcluster start` or `ipcluster stop` on Python 2 or 3, as
appropriate.
"""
import sys
import six
from subprocess import Popen, PIPE
if six.PY2:
ipcluster_cmd = 'ipcluster'
elif six.PY3:
ipcluster_cmd = 'ipcluster3'
else:
raise NotImplementedError("Not run with Python 2 *or* 3?")
def start(n=12):
"""Convenient way to start an ipcluster for testing.
You have to wait for it to start, however.
"""
# FIXME: This should be reimplemented to signal when the cluster has
# successfully started
engines = "--engines=MPIEngineSetLauncher"
Popen([ipcluster_cmd, 'start', '-n', str(n), engines, str('&')],
stdout=PIPE, stderr=PIPE)
def stop():
"""Convenient way to stop an ipcluster."""
Popen([ipcluster_cmd, 'stop'], stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
cmd = sys.argv[1]
fn = eval(cmd)
fn()
<commit_msg>Change the default number of engines to 4.
Only 4 are needed for our current tests.<commit_after>"""
Simple runner for `ipcluster start` or `ipcluster stop` on Python 2 or 3, as
appropriate.
"""
import sys
import six
from subprocess import Popen, PIPE
if six.PY2:
ipcluster_cmd = 'ipcluster'
elif six.PY3:
ipcluster_cmd = 'ipcluster3'
else:
raise NotImplementedError("Not run with Python 2 *or* 3?")
def start(n=4):
"""Convenient way to start an ipcluster for testing.
You have to wait for it to start, however.
"""
# FIXME: This should be reimplemented to signal when the cluster has
# successfully started
engines = "--engines=MPIEngineSetLauncher"
Popen([ipcluster_cmd, 'start', '-n', str(n), engines, str('&')],
stdout=PIPE, stderr=PIPE)
def stop():
"""Convenient way to stop an ipcluster."""
Popen([ipcluster_cmd, 'stop'], stdout=PIPE, stderr=PIPE)
if __name__ == '__main__':
cmd = sys.argv[1]
fn = eval(cmd)
fn()
|
05f53e2a39d4947ae4bf6b37f5e7a8389d833ed2
|
migrations/versions/970_add_downloaded_at_column_to_direct_.py
|
migrations/versions/970_add_downloaded_at_column_to_direct_.py
|
"""add downloaded_at column to direct award projects
Revision ID: 970
Revises: 960
Create Date: 2017-09-04 09:25:41.968481
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '970'
down_revision = '960'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('direct_award_projects', sa.Column('downloaded_at', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('direct_award_projects', 'downloaded_at')
# ### end Alembic commands ###
|
Add migration for database to add downloaded_at column
|
Add migration for database to add downloaded_at column
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Add migration for database to add downloaded_at column
|
"""add downloaded_at column to direct award projects
Revision ID: 970
Revises: 960
Create Date: 2017-09-04 09:25:41.968481
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '970'
down_revision = '960'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('direct_award_projects', sa.Column('downloaded_at', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('direct_award_projects', 'downloaded_at')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add migration for database to add downloaded_at column<commit_after>
|
"""add downloaded_at column to direct award projects
Revision ID: 970
Revises: 960
Create Date: 2017-09-04 09:25:41.968481
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '970'
down_revision = '960'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('direct_award_projects', sa.Column('downloaded_at', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('direct_award_projects', 'downloaded_at')
# ### end Alembic commands ###
|
Add migration for database to add downloaded_at column"""add downloaded_at column to direct award projects
Revision ID: 970
Revises: 960
Create Date: 2017-09-04 09:25:41.968481
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '970'
down_revision = '960'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('direct_award_projects', sa.Column('downloaded_at', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('direct_award_projects', 'downloaded_at')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add migration for database to add downloaded_at column<commit_after>"""add downloaded_at column to direct award projects
Revision ID: 970
Revises: 960
Create Date: 2017-09-04 09:25:41.968481
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '970'
down_revision = '960'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('direct_award_projects', sa.Column('downloaded_at', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('direct_award_projects', 'downloaded_at')
# ### end Alembic commands ###
|
|
2bfe6ac9e0408fa418dd3faea4c1e0bbd224949e
|
nixkeyboard.py
|
nixkeyboard.py
|
def read_device_file():
from pathlib import Path
event_files = Path('/dev/input/by-id').glob('*-event-kbd')
for event_file in event_files:
if '-if01-' not in event_file.name:
break
with event_file.open('rb') as events:
while True:
yield events.read(1)
def listen(handlers):
i = 0
for byte in read_device_file():
event = byte
for handler in handlers:
try:
if handler(event):
# Stop processing this hotkey.
return 1
except Exception as e:
print(e)
if __name__ == '__main__':
listen([])
|
Add initial structure for Unix support
|
Add initial structure for Unix support
|
Python
|
mit
|
glitchassassin/keyboard,boppreh/keyboard
|
Add initial structure for Unix support
|
def read_device_file():
from pathlib import Path
event_files = Path('/dev/input/by-id').glob('*-event-kbd')
for event_file in event_files:
if '-if01-' not in event_file.name:
break
with event_file.open('rb') as events:
while True:
yield events.read(1)
def listen(handlers):
i = 0
for byte in read_device_file():
event = byte
for handler in handlers:
try:
if handler(event):
# Stop processing this hotkey.
return 1
except Exception as e:
print(e)
if __name__ == '__main__':
listen([])
|
<commit_before><commit_msg>Add initial structure for Unix support<commit_after>
|
def read_device_file():
from pathlib import Path
event_files = Path('/dev/input/by-id').glob('*-event-kbd')
for event_file in event_files:
if '-if01-' not in event_file.name:
break
with event_file.open('rb') as events:
while True:
yield events.read(1)
def listen(handlers):
i = 0
for byte in read_device_file():
event = byte
for handler in handlers:
try:
if handler(event):
# Stop processing this hotkey.
return 1
except Exception as e:
print(e)
if __name__ == '__main__':
listen([])
|
Add initial structure for Unix supportdef read_device_file():
from pathlib import Path
event_files = Path('/dev/input/by-id').glob('*-event-kbd')
for event_file in event_files:
if '-if01-' not in event_file.name:
break
with event_file.open('rb') as events:
while True:
yield events.read(1)
def listen(handlers):
i = 0
for byte in read_device_file():
event = byte
for handler in handlers:
try:
if handler(event):
# Stop processing this hotkey.
return 1
except Exception as e:
print(e)
if __name__ == '__main__':
listen([])
|
<commit_before><commit_msg>Add initial structure for Unix support<commit_after>def read_device_file():
from pathlib import Path
event_files = Path('/dev/input/by-id').glob('*-event-kbd')
for event_file in event_files:
if '-if01-' not in event_file.name:
break
with event_file.open('rb') as events:
while True:
yield events.read(1)
def listen(handlers):
i = 0
for byte in read_device_file():
event = byte
for handler in handlers:
try:
if handler(event):
# Stop processing this hotkey.
return 1
except Exception as e:
print(e)
if __name__ == '__main__':
listen([])
|
|
e53f3eb1676f851362d6c81171c713877215a99b
|
google/colab/_serverextension/_resource_monitor.py
|
google/colab/_serverextension/_resource_monitor.py
|
"""Methods for tracking resource consumption of Colab kernels."""
import csv
import os
import re
import subprocess
from distutils import spawn
_cmd_regex = re.compile(r'.+kernel-(.+)\.json.*')
def get_gpu_usage():
"""Reports total and per-kernel GPU memory usage.
Returns:
A dict of the form {
usage: int,
limit: int,
kernels: A dict mapping kernel UUIDs to ints (memory usage in bytes),
}
"""
gpu_memory_path = '/var/colab/gpu-memory'
kernels = {}
usage = 0
limit = 0
if os.path.exists(gpu_memory_path):
with open(gpu_memory_path) as f:
reader = csv.DictReader(f.readlines(), delimiter=' ')
for row in reader:
kernels[row['kernel_id']] = int(row['gpu_mem(MiB)']) * 1024 * 1024
if spawn.find_executable('nvidia-smi') is not None:
ns = subprocess.check_output([
'nvidia-smi', '--query-gpu=memory.used,memory.total',
'--format=csv,nounits,noheader'
]).decode('utf-8')
r = csv.reader([ns])
row = next(r)
usage = int(row[0]) * 1024 * 1024
limit = int(row[1]) * 1024 * 1024
return {'usage': usage, 'limit': limit, 'kernels': kernels}
def get_ram_usage():
"""Reports total and per-kernel RAM usage.
Returns:
A dict of the form {
usage: int,
limit: int,
kernels: A dict mapping kernel UUIDs to ints (memory usage in bytes),
}
"""
free, limit = 0, 0
with open('/proc/meminfo', 'r') as f:
lines = f.readlines()
line = [x for x in lines if 'MemAvailable:' in x]
if line:
free = int(line[0].split()[1]) * 1024
line = [x for x in lines if 'MemTotal:' in x]
if line:
limit = int(line[0].split()[1]) * 1024
usage = limit - free
kernels = {}
ps = subprocess.check_output(
['ps', '--ppid',
str(os.getpid()), '-wwo', 'rss cmd', '--no-header']).decode('utf-8')
for proc in ps.split('\n')[:-1]:
proc = proc.strip().split(' ', 1)
if len(proc) != 2:
continue
if not re.match(_cmd_regex, proc[1]):
continue
kernel_id = re.sub(_cmd_regex, r'\1', proc[1])
kernels[kernel_id] = int(proc[0]) * 1024
return {'usage': usage, 'limit': limit, 'kernels': kernels}
|
Add library for monitoring RAM/GPU Memory
|
Add library for monitoring RAM/GPU Memory
PiperOrigin-RevId: 208935123
|
Python
|
apache-2.0
|
googlecolab/colabtools,googlecolab/colabtools
|
Add library for monitoring RAM/GPU Memory
PiperOrigin-RevId: 208935123
|
"""Methods for tracking resource consumption of Colab kernels."""
import csv
import os
import re
import subprocess
from distutils import spawn
_cmd_regex = re.compile(r'.+kernel-(.+)\.json.*')
def get_gpu_usage():
"""Reports total and per-kernel GPU memory usage.
Returns:
A dict of the form {
usage: int,
limit: int,
kernels: A dict mapping kernel UUIDs to ints (memory usage in bytes),
}
"""
gpu_memory_path = '/var/colab/gpu-memory'
kernels = {}
usage = 0
limit = 0
if os.path.exists(gpu_memory_path):
with open(gpu_memory_path) as f:
reader = csv.DictReader(f.readlines(), delimiter=' ')
for row in reader:
kernels[row['kernel_id']] = int(row['gpu_mem(MiB)']) * 1024 * 1024
if spawn.find_executable('nvidia-smi') is not None:
ns = subprocess.check_output([
'nvidia-smi', '--query-gpu=memory.used,memory.total',
'--format=csv,nounits,noheader'
]).decode('utf-8')
r = csv.reader([ns])
row = next(r)
usage = int(row[0]) * 1024 * 1024
limit = int(row[1]) * 1024 * 1024
return {'usage': usage, 'limit': limit, 'kernels': kernels}
def get_ram_usage():
"""Reports total and per-kernel RAM usage.
Returns:
A dict of the form {
usage: int,
limit: int,
kernels: A dict mapping kernel UUIDs to ints (memory usage in bytes),
}
"""
free, limit = 0, 0
with open('/proc/meminfo', 'r') as f:
lines = f.readlines()
line = [x for x in lines if 'MemAvailable:' in x]
if line:
free = int(line[0].split()[1]) * 1024
line = [x for x in lines if 'MemTotal:' in x]
if line:
limit = int(line[0].split()[1]) * 1024
usage = limit - free
kernels = {}
ps = subprocess.check_output(
['ps', '--ppid',
str(os.getpid()), '-wwo', 'rss cmd', '--no-header']).decode('utf-8')
for proc in ps.split('\n')[:-1]:
proc = proc.strip().split(' ', 1)
if len(proc) != 2:
continue
if not re.match(_cmd_regex, proc[1]):
continue
kernel_id = re.sub(_cmd_regex, r'\1', proc[1])
kernels[kernel_id] = int(proc[0]) * 1024
return {'usage': usage, 'limit': limit, 'kernels': kernels}
|
<commit_before><commit_msg>Add library for monitoring RAM/GPU Memory
PiperOrigin-RevId: 208935123<commit_after>
|
"""Methods for tracking resource consumption of Colab kernels."""
import csv
import os
import re
import subprocess
from distutils import spawn
_cmd_regex = re.compile(r'.+kernel-(.+)\.json.*')
def get_gpu_usage():
"""Reports total and per-kernel GPU memory usage.
Returns:
A dict of the form {
usage: int,
limit: int,
kernels: A dict mapping kernel UUIDs to ints (memory usage in bytes),
}
"""
gpu_memory_path = '/var/colab/gpu-memory'
kernels = {}
usage = 0
limit = 0
if os.path.exists(gpu_memory_path):
with open(gpu_memory_path) as f:
reader = csv.DictReader(f.readlines(), delimiter=' ')
for row in reader:
kernels[row['kernel_id']] = int(row['gpu_mem(MiB)']) * 1024 * 1024
if spawn.find_executable('nvidia-smi') is not None:
ns = subprocess.check_output([
'nvidia-smi', '--query-gpu=memory.used,memory.total',
'--format=csv,nounits,noheader'
]).decode('utf-8')
r = csv.reader([ns])
row = next(r)
usage = int(row[0]) * 1024 * 1024
limit = int(row[1]) * 1024 * 1024
return {'usage': usage, 'limit': limit, 'kernels': kernels}
def get_ram_usage():
"""Reports total and per-kernel RAM usage.
Returns:
A dict of the form {
usage: int,
limit: int,
kernels: A dict mapping kernel UUIDs to ints (memory usage in bytes),
}
"""
free, limit = 0, 0
with open('/proc/meminfo', 'r') as f:
lines = f.readlines()
line = [x for x in lines if 'MemAvailable:' in x]
if line:
free = int(line[0].split()[1]) * 1024
line = [x for x in lines if 'MemTotal:' in x]
if line:
limit = int(line[0].split()[1]) * 1024
usage = limit - free
kernels = {}
ps = subprocess.check_output(
['ps', '--ppid',
str(os.getpid()), '-wwo', 'rss cmd', '--no-header']).decode('utf-8')
for proc in ps.split('\n')[:-1]:
proc = proc.strip().split(' ', 1)
if len(proc) != 2:
continue
if not re.match(_cmd_regex, proc[1]):
continue
kernel_id = re.sub(_cmd_regex, r'\1', proc[1])
kernels[kernel_id] = int(proc[0]) * 1024
return {'usage': usage, 'limit': limit, 'kernels': kernels}
|
Add library for monitoring RAM/GPU Memory
PiperOrigin-RevId: 208935123"""Methods for tracking resource consumption of Colab kernels."""
import csv
import os
import re
import subprocess
from distutils import spawn
_cmd_regex = re.compile(r'.+kernel-(.+)\.json.*')
def get_gpu_usage():
"""Reports total and per-kernel GPU memory usage.
Returns:
A dict of the form {
usage: int,
limit: int,
kernels: A dict mapping kernel UUIDs to ints (memory usage in bytes),
}
"""
gpu_memory_path = '/var/colab/gpu-memory'
kernels = {}
usage = 0
limit = 0
if os.path.exists(gpu_memory_path):
with open(gpu_memory_path) as f:
reader = csv.DictReader(f.readlines(), delimiter=' ')
for row in reader:
kernels[row['kernel_id']] = int(row['gpu_mem(MiB)']) * 1024 * 1024
if spawn.find_executable('nvidia-smi') is not None:
ns = subprocess.check_output([
'nvidia-smi', '--query-gpu=memory.used,memory.total',
'--format=csv,nounits,noheader'
]).decode('utf-8')
r = csv.reader([ns])
row = next(r)
usage = int(row[0]) * 1024 * 1024
limit = int(row[1]) * 1024 * 1024
return {'usage': usage, 'limit': limit, 'kernels': kernels}
def get_ram_usage():
"""Reports total and per-kernel RAM usage.
Returns:
A dict of the form {
usage: int,
limit: int,
kernels: A dict mapping kernel UUIDs to ints (memory usage in bytes),
}
"""
free, limit = 0, 0
with open('/proc/meminfo', 'r') as f:
lines = f.readlines()
line = [x for x in lines if 'MemAvailable:' in x]
if line:
free = int(line[0].split()[1]) * 1024
line = [x for x in lines if 'MemTotal:' in x]
if line:
limit = int(line[0].split()[1]) * 1024
usage = limit - free
kernels = {}
ps = subprocess.check_output(
['ps', '--ppid',
str(os.getpid()), '-wwo', 'rss cmd', '--no-header']).decode('utf-8')
for proc in ps.split('\n')[:-1]:
proc = proc.strip().split(' ', 1)
if len(proc) != 2:
continue
if not re.match(_cmd_regex, proc[1]):
continue
kernel_id = re.sub(_cmd_regex, r'\1', proc[1])
kernels[kernel_id] = int(proc[0]) * 1024
return {'usage': usage, 'limit': limit, 'kernels': kernels}
|
<commit_before><commit_msg>Add library for monitoring RAM/GPU Memory
PiperOrigin-RevId: 208935123<commit_after>"""Methods for tracking resource consumption of Colab kernels."""
import csv
import os
import re
import subprocess
from distutils import spawn
_cmd_regex = re.compile(r'.+kernel-(.+)\.json.*')
def get_gpu_usage():
"""Reports total and per-kernel GPU memory usage.
Returns:
A dict of the form {
usage: int,
limit: int,
kernels: A dict mapping kernel UUIDs to ints (memory usage in bytes),
}
"""
gpu_memory_path = '/var/colab/gpu-memory'
kernels = {}
usage = 0
limit = 0
if os.path.exists(gpu_memory_path):
with open(gpu_memory_path) as f:
reader = csv.DictReader(f.readlines(), delimiter=' ')
for row in reader:
kernels[row['kernel_id']] = int(row['gpu_mem(MiB)']) * 1024 * 1024
if spawn.find_executable('nvidia-smi') is not None:
ns = subprocess.check_output([
'nvidia-smi', '--query-gpu=memory.used,memory.total',
'--format=csv,nounits,noheader'
]).decode('utf-8')
r = csv.reader([ns])
row = next(r)
usage = int(row[0]) * 1024 * 1024
limit = int(row[1]) * 1024 * 1024
return {'usage': usage, 'limit': limit, 'kernels': kernels}
def get_ram_usage():
"""Reports total and per-kernel RAM usage.
Returns:
A dict of the form {
usage: int,
limit: int,
kernels: A dict mapping kernel UUIDs to ints (memory usage in bytes),
}
"""
free, limit = 0, 0
with open('/proc/meminfo', 'r') as f:
lines = f.readlines()
line = [x for x in lines if 'MemAvailable:' in x]
if line:
free = int(line[0].split()[1]) * 1024
line = [x for x in lines if 'MemTotal:' in x]
if line:
limit = int(line[0].split()[1]) * 1024
usage = limit - free
kernels = {}
ps = subprocess.check_output(
['ps', '--ppid',
str(os.getpid()), '-wwo', 'rss cmd', '--no-header']).decode('utf-8')
for proc in ps.split('\n')[:-1]:
proc = proc.strip().split(' ', 1)
if len(proc) != 2:
continue
if not re.match(_cmd_regex, proc[1]):
continue
kernel_id = re.sub(_cmd_regex, r'\1', proc[1])
kernels[kernel_id] = int(proc[0]) * 1024
return {'usage': usage, 'limit': limit, 'kernels': kernels}
|
|
5231979e72624de6c0cbb26b8d531ff0ed046131
|
rftest/topo-4sw-4host-mn2.py
|
rftest/topo-4sw-4host-mn2.py
|
"""Custom topology example
author: Marcelo Nascimento (marcelon@cpqd.com.br)
Four switches connected in mesh topology plus a host for each switch:
h1 --- sA ---- sB --- h2
| \ |
| \ |
| \ |
| \ |
h3 --- sC ---- sD --- h4
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
class RFTopo( Topo ):
"RouteFlow Demo Setup"
def __init__( self, enable_all = True ):
"Create custom topo."
# Add default members to class.
Topo.__init__(self)
# Set Node IDs for hosts and switches
h1 = self.addHost( 'h1' )
h2 = self.addHost( 'h2' )
h3 = self.addHost( 'h3' )
h4 = self.addHost( 'h4' )
sA = self.addSwitch( 's1' )
sB = self.addSwitch( 's2' )
sC = self.addSwitch( 's3' )
sD = self.addSwitch( 's4' )
# Add edges
self.addLink( h1, sA )
self.addLink( h2, sB )
self.addLink( h3, sC )
self.addLink( h4, sD )
self.addLink( sA, sB )
self.addLink( sB, sD )
self.addLink( sD, sC )
self.addLink( sC, sA )
self.addLink( sA, sD )
topos = { 'rftopo': ( lambda: RFTopo() ) }
|
Add mininet2 topology file for rftest2
|
Add mininet2 topology file for rftest2
Signed-off-by: Joe Stringer <da02367898403910c66f5d62c3a247888772ce8a@gmail.com>
|
Python
|
apache-2.0
|
rsanger/RouteFlow,raphaelvrosa/RouteFlow,c3m3gyanesh/RouteFlow-OpenConfig,ralph-mikera/RouteFlow-1,ralph-mikera/RouteFlow-1,routeflow/RouteFlow,c3m3gyanesh/RouteFlow-OpenConfig,srijanmishra/RouteFlow,ralph-mikera/RouteFlow-1,srijanmishra/RouteFlow,arazmj/RouteFlow,raphaelvrosa/RouteFlow,routeflow/RouteFlow,CPqD/RouteFlow,srijanmishra/RouteFlow,c3m3gyanesh/RouteFlow-OpenConfig,arazmj/RouteFlow,raphaelvrosa/RouteFlow,rsanger/RouteFlow,ralph-mikera/RouteFlow-1,arazmj/RouteFlow,raphaelvrosa/RouteFlow,rsanger/RouteFlow,CPqD/RouteFlow,arazmj/RouteFlow,routeflow/RouteFlow,c3m3gyanesh/RouteFlow-OpenConfig,CPqD/RouteFlow,CPqD/RouteFlow,rsanger/RouteFlow,raphaelvrosa/RouteFlow,routeflow/RouteFlow,c3m3gyanesh/RouteFlow-OpenConfig,srijanmishra/RouteFlow,arazmj/RouteFlow,CPqD/RouteFlow,srijanmishra/RouteFlow,arazmj/RouteFlow,ralph-mikera/RouteFlow-1,rsanger/RouteFlow,raphaelvrosa/RouteFlow,routeflow/RouteFlow,ralph-mikera/RouteFlow-1,c3m3gyanesh/RouteFlow-OpenConfig,rsanger/RouteFlow
|
Add mininet2 topology file for rftest2
Signed-off-by: Joe Stringer <da02367898403910c66f5d62c3a247888772ce8a@gmail.com>
|
"""Custom topology example
author: Marcelo Nascimento (marcelon@cpqd.com.br)
Four switches connected in mesh topology plus a host for each switch:
h1 --- sA ---- sB --- h2
| \ |
| \ |
| \ |
| \ |
h3 --- sC ---- sD --- h4
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
class RFTopo( Topo ):
"RouteFlow Demo Setup"
def __init__( self, enable_all = True ):
"Create custom topo."
# Add default members to class.
Topo.__init__(self)
# Set Node IDs for hosts and switches
h1 = self.addHost( 'h1' )
h2 = self.addHost( 'h2' )
h3 = self.addHost( 'h3' )
h4 = self.addHost( 'h4' )
sA = self.addSwitch( 's1' )
sB = self.addSwitch( 's2' )
sC = self.addSwitch( 's3' )
sD = self.addSwitch( 's4' )
# Add edges
self.addLink( h1, sA )
self.addLink( h2, sB )
self.addLink( h3, sC )
self.addLink( h4, sD )
self.addLink( sA, sB )
self.addLink( sB, sD )
self.addLink( sD, sC )
self.addLink( sC, sA )
self.addLink( sA, sD )
topos = { 'rftopo': ( lambda: RFTopo() ) }
|
<commit_before><commit_msg>Add mininet2 topology file for rftest2
Signed-off-by: Joe Stringer <da02367898403910c66f5d62c3a247888772ce8a@gmail.com><commit_after>
|
"""Custom topology example
author: Marcelo Nascimento (marcelon@cpqd.com.br)
Four switches connected in mesh topology plus a host for each switch:
h1 --- sA ---- sB --- h2
| \ |
| \ |
| \ |
| \ |
h3 --- sC ---- sD --- h4
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
class RFTopo( Topo ):
"RouteFlow Demo Setup"
def __init__( self, enable_all = True ):
"Create custom topo."
# Add default members to class.
Topo.__init__(self)
# Set Node IDs for hosts and switches
h1 = self.addHost( 'h1' )
h2 = self.addHost( 'h2' )
h3 = self.addHost( 'h3' )
h4 = self.addHost( 'h4' )
sA = self.addSwitch( 's1' )
sB = self.addSwitch( 's2' )
sC = self.addSwitch( 's3' )
sD = self.addSwitch( 's4' )
# Add edges
self.addLink( h1, sA )
self.addLink( h2, sB )
self.addLink( h3, sC )
self.addLink( h4, sD )
self.addLink( sA, sB )
self.addLink( sB, sD )
self.addLink( sD, sC )
self.addLink( sC, sA )
self.addLink( sA, sD )
topos = { 'rftopo': ( lambda: RFTopo() ) }
|
Add mininet2 topology file for rftest2
Signed-off-by: Joe Stringer <da02367898403910c66f5d62c3a247888772ce8a@gmail.com>"""Custom topology example
author: Marcelo Nascimento (marcelon@cpqd.com.br)
Four switches connected in mesh topology plus a host for each switch:
h1 --- sA ---- sB --- h2
| \ |
| \ |
| \ |
| \ |
h3 --- sC ---- sD --- h4
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
class RFTopo( Topo ):
"RouteFlow Demo Setup"
def __init__( self, enable_all = True ):
"Create custom topo."
# Add default members to class.
Topo.__init__(self)
# Set Node IDs for hosts and switches
h1 = self.addHost( 'h1' )
h2 = self.addHost( 'h2' )
h3 = self.addHost( 'h3' )
h4 = self.addHost( 'h4' )
sA = self.addSwitch( 's1' )
sB = self.addSwitch( 's2' )
sC = self.addSwitch( 's3' )
sD = self.addSwitch( 's4' )
# Add edges
self.addLink( h1, sA )
self.addLink( h2, sB )
self.addLink( h3, sC )
self.addLink( h4, sD )
self.addLink( sA, sB )
self.addLink( sB, sD )
self.addLink( sD, sC )
self.addLink( sC, sA )
self.addLink( sA, sD )
topos = { 'rftopo': ( lambda: RFTopo() ) }
|
<commit_before><commit_msg>Add mininet2 topology file for rftest2
Signed-off-by: Joe Stringer <da02367898403910c66f5d62c3a247888772ce8a@gmail.com><commit_after>"""Custom topology example
author: Marcelo Nascimento (marcelon@cpqd.com.br)
Four switches connected in mesh topology plus a host for each switch:
h1 --- sA ---- sB --- h2
| \ |
| \ |
| \ |
| \ |
h3 --- sC ---- sD --- h4
Adding the 'topos' dict with a key/value pair to generate our newly defined
topology enables one to pass in '--topo=mytopo' from the command line.
"""
from mininet.topo import Topo
class RFTopo( Topo ):
"RouteFlow Demo Setup"
def __init__( self, enable_all = True ):
"Create custom topo."
# Add default members to class.
Topo.__init__(self)
# Set Node IDs for hosts and switches
h1 = self.addHost( 'h1' )
h2 = self.addHost( 'h2' )
h3 = self.addHost( 'h3' )
h4 = self.addHost( 'h4' )
sA = self.addSwitch( 's1' )
sB = self.addSwitch( 's2' )
sC = self.addSwitch( 's3' )
sD = self.addSwitch( 's4' )
# Add edges
self.addLink( h1, sA )
self.addLink( h2, sB )
self.addLink( h3, sC )
self.addLink( h4, sD )
self.addLink( sA, sB )
self.addLink( sB, sD )
self.addLink( sD, sC )
self.addLink( sC, sA )
self.addLink( sA, sD )
topos = { 'rftopo': ( lambda: RFTopo() ) }
|
|
bde2509df82a901a1530bc0189a4f1f5443375c6
|
tests/test_completion_skin_rainmeter_section.py
|
tests/test_completion_skin_rainmeter_section.py
|
import sys
from unittest import TestCase
rainmeter_section = sys.modules["Rainmeter.completion.skin.rainmeter_section"]
class TestFunctions(TestCase):
def test_completion_skin_rainmeter_section_with_not_rainmeter_should_return_none(self):
"""
The given section is 'Different' but we are moving in the Rainmeter section
thus only 'Rainmeter' is allowed
"""
complete = rainmeter_section.SkinRainmeterSectionAutoComplete()
value_completion = complete.get_value_context_completion(None, None, None, None, "Different", None, None)
self.assertEqual(value_completion, None)
|
Add initial test for skin rainmeter section
|
Add initial test for skin rainmeter section
|
Python
|
mit
|
thatsIch/sublime-rainmeter
|
Add initial test for skin rainmeter section
|
import sys
from unittest import TestCase
rainmeter_section = sys.modules["Rainmeter.completion.skin.rainmeter_section"]
class TestFunctions(TestCase):
def test_completion_skin_rainmeter_section_with_not_rainmeter_should_return_none(self):
"""
The given section is 'Different' but we are moving in the Rainmeter section
thus only 'Rainmeter' is allowed
"""
complete = rainmeter_section.SkinRainmeterSectionAutoComplete()
value_completion = complete.get_value_context_completion(None, None, None, None, "Different", None, None)
self.assertEqual(value_completion, None)
|
<commit_before><commit_msg>Add initial test for skin rainmeter section<commit_after>
|
import sys
from unittest import TestCase
rainmeter_section = sys.modules["Rainmeter.completion.skin.rainmeter_section"]
class TestFunctions(TestCase):
def test_completion_skin_rainmeter_section_with_not_rainmeter_should_return_none(self):
"""
The given section is 'Different' but we are moving in the Rainmeter section
thus only 'Rainmeter' is allowed
"""
complete = rainmeter_section.SkinRainmeterSectionAutoComplete()
value_completion = complete.get_value_context_completion(None, None, None, None, "Different", None, None)
self.assertEqual(value_completion, None)
|
Add initial test for skin rainmeter sectionimport sys
from unittest import TestCase
rainmeter_section = sys.modules["Rainmeter.completion.skin.rainmeter_section"]
class TestFunctions(TestCase):
def test_completion_skin_rainmeter_section_with_not_rainmeter_should_return_none(self):
"""
The given section is 'Different' but we are moving in the Rainmeter section
thus only 'Rainmeter' is allowed
"""
complete = rainmeter_section.SkinRainmeterSectionAutoComplete()
value_completion = complete.get_value_context_completion(None, None, None, None, "Different", None, None)
self.assertEqual(value_completion, None)
|
<commit_before><commit_msg>Add initial test for skin rainmeter section<commit_after>import sys
from unittest import TestCase
rainmeter_section = sys.modules["Rainmeter.completion.skin.rainmeter_section"]
class TestFunctions(TestCase):
def test_completion_skin_rainmeter_section_with_not_rainmeter_should_return_none(self):
"""
The given section is 'Different' but we are moving in the Rainmeter section
thus only 'Rainmeter' is allowed
"""
complete = rainmeter_section.SkinRainmeterSectionAutoComplete()
value_completion = complete.get_value_context_completion(None, None, None, None, "Different", None, None)
self.assertEqual(value_completion, None)
|
|
a0924b2a68d05ad778e5b792aace2dbac7bd6a7c
|
tests/test_encrypt.py
|
tests/test_encrypt.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from future.utils import bytes_to_native_str as n
from base64 import b64decode
import os
import boto3
import pytest
from figgypy.utils import kms_encrypt
@pytest.mark.skipif(os.environ.get('INTEGRATION') is None,
reason="credentials are required")
class TestEncryptIntegration(object):
def test_kms_encrypt(self):
key = 'alias/figgypy-test'
secret = 'test password 1234567890 !@#$%^&*()'
client = boto3.client('kms')
encrypted = kms_encrypt(secret, key)
dec_res = client.decrypt(CiphertextBlob=b64decode(encrypted))
decrypted = n(dec_res['Plaintext'])
assert decrypted == secret
|
Create integration test to verify KMS encryption
|
Create integration test to verify KMS encryption
|
Python
|
mit
|
theherk/figgypy
|
Create integration test to verify KMS encryption
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from future.utils import bytes_to_native_str as n
from base64 import b64decode
import os
import boto3
import pytest
from figgypy.utils import kms_encrypt
@pytest.mark.skipif(os.environ.get('INTEGRATION') is None,
reason="credentials are required")
class TestEncryptIntegration(object):
def test_kms_encrypt(self):
key = 'alias/figgypy-test'
secret = 'test password 1234567890 !@#$%^&*()'
client = boto3.client('kms')
encrypted = kms_encrypt(secret, key)
dec_res = client.decrypt(CiphertextBlob=b64decode(encrypted))
decrypted = n(dec_res['Plaintext'])
assert decrypted == secret
|
<commit_before><commit_msg>Create integration test to verify KMS encryption<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from future.utils import bytes_to_native_str as n
from base64 import b64decode
import os
import boto3
import pytest
from figgypy.utils import kms_encrypt
@pytest.mark.skipif(os.environ.get('INTEGRATION') is None,
reason="credentials are required")
class TestEncryptIntegration(object):
def test_kms_encrypt(self):
key = 'alias/figgypy-test'
secret = 'test password 1234567890 !@#$%^&*()'
client = boto3.client('kms')
encrypted = kms_encrypt(secret, key)
dec_res = client.decrypt(CiphertextBlob=b64decode(encrypted))
decrypted = n(dec_res['Plaintext'])
assert decrypted == secret
|
Create integration test to verify KMS encryption# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from future.utils import bytes_to_native_str as n
from base64 import b64decode
import os
import boto3
import pytest
from figgypy.utils import kms_encrypt
@pytest.mark.skipif(os.environ.get('INTEGRATION') is None,
reason="credentials are required")
class TestEncryptIntegration(object):
def test_kms_encrypt(self):
key = 'alias/figgypy-test'
secret = 'test password 1234567890 !@#$%^&*()'
client = boto3.client('kms')
encrypted = kms_encrypt(secret, key)
dec_res = client.decrypt(CiphertextBlob=b64decode(encrypted))
decrypted = n(dec_res['Plaintext'])
assert decrypted == secret
|
<commit_before><commit_msg>Create integration test to verify KMS encryption<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from future.utils import bytes_to_native_str as n
from base64 import b64decode
import os
import boto3
import pytest
from figgypy.utils import kms_encrypt
@pytest.mark.skipif(os.environ.get('INTEGRATION') is None,
reason="credentials are required")
class TestEncryptIntegration(object):
def test_kms_encrypt(self):
key = 'alias/figgypy-test'
secret = 'test password 1234567890 !@#$%^&*()'
client = boto3.client('kms')
encrypted = kms_encrypt(secret, key)
dec_res = client.decrypt(CiphertextBlob=b64decode(encrypted))
decrypted = n(dec_res['Plaintext'])
assert decrypted == secret
|
|
5ca43c4c1e357042a371cf876f4a55259fdebf2b
|
src/epiweb/apps/survey/management.py
|
src/epiweb/apps/survey/management.py
|
from django.dispatch import dispatcher
from django.db.models import signals
from epiweb.apps.survey import models
def post_syncdb(sender, **kwargs):
app = kwargs['app']
created_models = kwargs['created_models']
if (app == models) and (models.Survey in created_models):
survey = models.Survey()
survey.title = 'Dummy Survey'
survey.definition = ''
survey.active = True
survey.save()
signals.post_syncdb.connect(post_syncdb)
|
Add dummy data to Survey model.
|
Add dummy data to Survey model.
|
Python
|
agpl-3.0
|
ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website
|
Add dummy data to Survey model.
|
from django.dispatch import dispatcher
from django.db.models import signals
from epiweb.apps.survey import models
def post_syncdb(sender, **kwargs):
app = kwargs['app']
created_models = kwargs['created_models']
if (app == models) and (models.Survey in created_models):
survey = models.Survey()
survey.title = 'Dummy Survey'
survey.definition = ''
survey.active = True
survey.save()
signals.post_syncdb.connect(post_syncdb)
|
<commit_before><commit_msg>Add dummy data to Survey model.<commit_after>
|
from django.dispatch import dispatcher
from django.db.models import signals
from epiweb.apps.survey import models
def post_syncdb(sender, **kwargs):
app = kwargs['app']
created_models = kwargs['created_models']
if (app == models) and (models.Survey in created_models):
survey = models.Survey()
survey.title = 'Dummy Survey'
survey.definition = ''
survey.active = True
survey.save()
signals.post_syncdb.connect(post_syncdb)
|
Add dummy data to Survey model.from django.dispatch import dispatcher
from django.db.models import signals
from epiweb.apps.survey import models
def post_syncdb(sender, **kwargs):
app = kwargs['app']
created_models = kwargs['created_models']
if (app == models) and (models.Survey in created_models):
survey = models.Survey()
survey.title = 'Dummy Survey'
survey.definition = ''
survey.active = True
survey.save()
signals.post_syncdb.connect(post_syncdb)
|
<commit_before><commit_msg>Add dummy data to Survey model.<commit_after>from django.dispatch import dispatcher
from django.db.models import signals
from epiweb.apps.survey import models
def post_syncdb(sender, **kwargs):
app = kwargs['app']
created_models = kwargs['created_models']
if (app == models) and (models.Survey in created_models):
survey = models.Survey()
survey.title = 'Dummy Survey'
survey.definition = ''
survey.active = True
survey.save()
signals.post_syncdb.connect(post_syncdb)
|
|
e75aecc3e8606559fa722f7ffb081e2df258b60f
|
py/search-in-rotated-sorted-array.py
|
py/search-in-rotated-sorted-array.py
|
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(nums) == 1:
return 0 if nums[0] == target else -1
if target >= nums[0]:
L, U = -1, len(nums)
while L + 1 < U:
mid = L + (U - L) / 2
if nums[mid] > target or nums[mid] < nums[0]:
U = mid
else:
L = mid
return L if nums[L] == target else -1
elif target <= nums[-1]:
L, U = -1, len(nums)
while L + 1 < U:
mid = L + (U - L) / 2
if nums[mid] <= target or nums[mid] > nums[-1]:
L = mid
else:
U = mid
return L if nums[L] == target else -1
else:
return -1
|
Add py solution for Search in Rotated Sorted Array
|
Add py solution for Search in Rotated Sorted Array
Search in Rotated Sorted Array: https://leetcode.com/problems/search-in-rotated-sorted-array/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for Search in Rotated Sorted Array
Search in Rotated Sorted Array: https://leetcode.com/problems/search-in-rotated-sorted-array/
|
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(nums) == 1:
return 0 if nums[0] == target else -1
if target >= nums[0]:
L, U = -1, len(nums)
while L + 1 < U:
mid = L + (U - L) / 2
if nums[mid] > target or nums[mid] < nums[0]:
U = mid
else:
L = mid
return L if nums[L] == target else -1
elif target <= nums[-1]:
L, U = -1, len(nums)
while L + 1 < U:
mid = L + (U - L) / 2
if nums[mid] <= target or nums[mid] > nums[-1]:
L = mid
else:
U = mid
return L if nums[L] == target else -1
else:
return -1
|
<commit_before><commit_msg>Add py solution for Search in Rotated Sorted Array
Search in Rotated Sorted Array: https://leetcode.com/problems/search-in-rotated-sorted-array/<commit_after>
|
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(nums) == 1:
return 0 if nums[0] == target else -1
if target >= nums[0]:
L, U = -1, len(nums)
while L + 1 < U:
mid = L + (U - L) / 2
if nums[mid] > target or nums[mid] < nums[0]:
U = mid
else:
L = mid
return L if nums[L] == target else -1
elif target <= nums[-1]:
L, U = -1, len(nums)
while L + 1 < U:
mid = L + (U - L) / 2
if nums[mid] <= target or nums[mid] > nums[-1]:
L = mid
else:
U = mid
return L if nums[L] == target else -1
else:
return -1
|
Add py solution for Search in Rotated Sorted Array
Search in Rotated Sorted Array: https://leetcode.com/problems/search-in-rotated-sorted-array/class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(nums) == 1:
return 0 if nums[0] == target else -1
if target >= nums[0]:
L, U = -1, len(nums)
while L + 1 < U:
mid = L + (U - L) / 2
if nums[mid] > target or nums[mid] < nums[0]:
U = mid
else:
L = mid
return L if nums[L] == target else -1
elif target <= nums[-1]:
L, U = -1, len(nums)
while L + 1 < U:
mid = L + (U - L) / 2
if nums[mid] <= target or nums[mid] > nums[-1]:
L = mid
else:
U = mid
return L if nums[L] == target else -1
else:
return -1
|
<commit_before><commit_msg>Add py solution for Search in Rotated Sorted Array
Search in Rotated Sorted Array: https://leetcode.com/problems/search-in-rotated-sorted-array/<commit_after>class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(nums) == 1:
return 0 if nums[0] == target else -1
if target >= nums[0]:
L, U = -1, len(nums)
while L + 1 < U:
mid = L + (U - L) / 2
if nums[mid] > target or nums[mid] < nums[0]:
U = mid
else:
L = mid
return L if nums[L] == target else -1
elif target <= nums[-1]:
L, U = -1, len(nums)
while L + 1 < U:
mid = L + (U - L) / 2
if nums[mid] <= target or nums[mid] > nums[-1]:
L = mid
else:
U = mid
return L if nums[L] == target else -1
else:
return -1
|
|
4bac6dab8168170cca42f70f2c13dad5467d3cbb
|
ffdc/plugins/date_time_utils.py
|
ffdc/plugins/date_time_utils.py
|
#!/usr/bin/env python
r"""
This module contains functions having to do with date time filter.
"""
from datetime import datetime
def convert_string_dateime(date_str, date_format, desired_format):
r"""
Return a date time formatted from a string datetime.
Description of arguments(s):
date_str Date time string e.g 2021072418161
or list ["2021072418161", "20210723163401"]
date_format Date time pattern of the string date time
e.g '%Y%m%d%H%M%S'
desired_format User define format e.g '%m/%d/%Y - %H:%M:%S'
"""
if isinstance(date_str, list):
tmp_date = []
for date in date_str:
tmp_date.append(datetime.strptime(date, date_format).strftime(desired_format))
return tmp_date
else:
return datetime.strptime(date_str, date_format).strftime(desired_format)
|
Add date time util module
|
Plugin: Add date time util module
Change-Id: I6ab1d8bb8df63b8590ce7800e60415d90ea3bad3
Signed-off-by: George Keishing <bef0a9ecac45fb57611777c8270153994e13fd2e@in.ibm.com>
|
Python
|
apache-2.0
|
openbmc/openbmc-test-automation,openbmc/openbmc-test-automation
|
Plugin: Add date time util module
Change-Id: I6ab1d8bb8df63b8590ce7800e60415d90ea3bad3
Signed-off-by: George Keishing <bef0a9ecac45fb57611777c8270153994e13fd2e@in.ibm.com>
|
#!/usr/bin/env python
r"""
This module contains functions having to do with date time filter.
"""
from datetime import datetime
def convert_string_dateime(date_str, date_format, desired_format):
r"""
Return a date time formatted from a string datetime.
Description of arguments(s):
date_str Date time string e.g 2021072418161
or list ["2021072418161", "20210723163401"]
date_format Date time pattern of the string date time
e.g '%Y%m%d%H%M%S'
desired_format User define format e.g '%m/%d/%Y - %H:%M:%S'
"""
if isinstance(date_str, list):
tmp_date = []
for date in date_str:
tmp_date.append(datetime.strptime(date, date_format).strftime(desired_format))
return tmp_date
else:
return datetime.strptime(date_str, date_format).strftime(desired_format)
|
<commit_before><commit_msg>Plugin: Add date time util module
Change-Id: I6ab1d8bb8df63b8590ce7800e60415d90ea3bad3
Signed-off-by: George Keishing <bef0a9ecac45fb57611777c8270153994e13fd2e@in.ibm.com><commit_after>
|
#!/usr/bin/env python
r"""
This module contains functions having to do with date time filter.
"""
from datetime import datetime
def convert_string_dateime(date_str, date_format, desired_format):
r"""
Return a date time formatted from a string datetime.
Description of arguments(s):
date_str Date time string e.g 2021072418161
or list ["2021072418161", "20210723163401"]
date_format Date time pattern of the string date time
e.g '%Y%m%d%H%M%S'
desired_format User define format e.g '%m/%d/%Y - %H:%M:%S'
"""
if isinstance(date_str, list):
tmp_date = []
for date in date_str:
tmp_date.append(datetime.strptime(date, date_format).strftime(desired_format))
return tmp_date
else:
return datetime.strptime(date_str, date_format).strftime(desired_format)
|
Plugin: Add date time util module
Change-Id: I6ab1d8bb8df63b8590ce7800e60415d90ea3bad3
Signed-off-by: George Keishing <bef0a9ecac45fb57611777c8270153994e13fd2e@in.ibm.com>#!/usr/bin/env python
r"""
This module contains functions having to do with date time filter.
"""
from datetime import datetime
def convert_string_dateime(date_str, date_format, desired_format):
r"""
Return a date time formatted from a string datetime.
Description of arguments(s):
date_str Date time string e.g 2021072418161
or list ["2021072418161", "20210723163401"]
date_format Date time pattern of the string date time
e.g '%Y%m%d%H%M%S'
desired_format User define format e.g '%m/%d/%Y - %H:%M:%S'
"""
if isinstance(date_str, list):
tmp_date = []
for date in date_str:
tmp_date.append(datetime.strptime(date, date_format).strftime(desired_format))
return tmp_date
else:
return datetime.strptime(date_str, date_format).strftime(desired_format)
|
<commit_before><commit_msg>Plugin: Add date time util module
Change-Id: I6ab1d8bb8df63b8590ce7800e60415d90ea3bad3
Signed-off-by: George Keishing <bef0a9ecac45fb57611777c8270153994e13fd2e@in.ibm.com><commit_after>#!/usr/bin/env python
r"""
This module contains functions having to do with date time filter.
"""
from datetime import datetime
def convert_string_dateime(date_str, date_format, desired_format):
r"""
Return a date time formatted from a string datetime.
Description of arguments(s):
date_str Date time string e.g 2021072418161
or list ["2021072418161", "20210723163401"]
date_format Date time pattern of the string date time
e.g '%Y%m%d%H%M%S'
desired_format User define format e.g '%m/%d/%Y - %H:%M:%S'
"""
if isinstance(date_str, list):
tmp_date = []
for date in date_str:
tmp_date.append(datetime.strptime(date, date_format).strftime(desired_format))
return tmp_date
else:
return datetime.strptime(date_str, date_format).strftime(desired_format)
|
|
c0041317494eab24ec7eef92eb7716d0b00be8fa
|
api/management/commands/generate_certificate.py
|
api/management/commands/generate_certificate.py
|
# coding=utf8
from django.core.management.base import BaseCommand, CommandError
from certificates.generator import *
from api.models import Event
class Command(BaseCommand):
args = '<event-id> [<event-id>, ...]'
help = """
Generates or regenerates a certificate for a given event.
"""
def handle(self, *args, **options):
if not args:
self.stderr.write('Please provide at least one event ID.')
for event_id in args:
self.stdout.write('Regenerating the certificate for event ID %s...' % event_id)
try:
event = Event.objects.get(pk=int(event_id))
except Event.DoesNotExist:
self.stderr.write('Event ID "%s" does not exist.' % event_id)
continue
path = generate_certificate_for(event.pk, event.certificate_file_name(), event.name_for_certificate)
if path:
self.stdout.write('Certificate for event ID %s geenrated successfully: %s' % (event_id, path))
else:
self.stderr.write('An error occurred while generating the certificate.')
|
Add a command for manual certificate regeneration
|
Add a command for manual certificate regeneration
Sometimes we may need to test or regenerate a certificate manually, in
case the procedure failed the first time.
|
Python
|
mit
|
codeeu/coding-events,codeeu/coding-events,codeeu/coding-events,codeeu/coding-events,codeeu/coding-events
|
Add a command for manual certificate regeneration
Sometimes we may need to test or regenerate a certificate manually, in
case the procedure failed the first time.
|
# coding=utf8
from django.core.management.base import BaseCommand, CommandError
from certificates.generator import *
from api.models import Event
class Command(BaseCommand):
args = '<event-id> [<event-id>, ...]'
help = """
Generates or regenerates a certificate for a given event.
"""
def handle(self, *args, **options):
if not args:
self.stderr.write('Please provide at least one event ID.')
for event_id in args:
self.stdout.write('Regenerating the certificate for event ID %s...' % event_id)
try:
event = Event.objects.get(pk=int(event_id))
except Event.DoesNotExist:
self.stderr.write('Event ID "%s" does not exist.' % event_id)
continue
path = generate_certificate_for(event.pk, event.certificate_file_name(), event.name_for_certificate)
if path:
self.stdout.write('Certificate for event ID %s geenrated successfully: %s' % (event_id, path))
else:
self.stderr.write('An error occurred while generating the certificate.')
|
<commit_before><commit_msg>Add a command for manual certificate regeneration
Sometimes we may need to test or regenerate a certificate manually, in
case the procedure failed the first time.<commit_after>
|
# coding=utf8
from django.core.management.base import BaseCommand, CommandError
from certificates.generator import *
from api.models import Event
class Command(BaseCommand):
args = '<event-id> [<event-id>, ...]'
help = """
Generates or regenerates a certificate for a given event.
"""
def handle(self, *args, **options):
if not args:
self.stderr.write('Please provide at least one event ID.')
for event_id in args:
self.stdout.write('Regenerating the certificate for event ID %s...' % event_id)
try:
event = Event.objects.get(pk=int(event_id))
except Event.DoesNotExist:
self.stderr.write('Event ID "%s" does not exist.' % event_id)
continue
path = generate_certificate_for(event.pk, event.certificate_file_name(), event.name_for_certificate)
if path:
self.stdout.write('Certificate for event ID %s geenrated successfully: %s' % (event_id, path))
else:
self.stderr.write('An error occurred while generating the certificate.')
|
Add a command for manual certificate regeneration
Sometimes we may need to test or regenerate a certificate manually, in
case the procedure failed the first time.# coding=utf8
from django.core.management.base import BaseCommand, CommandError
from certificates.generator import *
from api.models import Event
class Command(BaseCommand):
args = '<event-id> [<event-id>, ...]'
help = """
Generates or regenerates a certificate for a given event.
"""
def handle(self, *args, **options):
if not args:
self.stderr.write('Please provide at least one event ID.')
for event_id in args:
self.stdout.write('Regenerating the certificate for event ID %s...' % event_id)
try:
event = Event.objects.get(pk=int(event_id))
except Event.DoesNotExist:
self.stderr.write('Event ID "%s" does not exist.' % event_id)
continue
path = generate_certificate_for(event.pk, event.certificate_file_name(), event.name_for_certificate)
if path:
self.stdout.write('Certificate for event ID %s geenrated successfully: %s' % (event_id, path))
else:
self.stderr.write('An error occurred while generating the certificate.')
|
<commit_before><commit_msg>Add a command for manual certificate regeneration
Sometimes we may need to test or regenerate a certificate manually, in
case the procedure failed the first time.<commit_after># coding=utf8
from django.core.management.base import BaseCommand, CommandError
from certificates.generator import *
from api.models import Event
class Command(BaseCommand):
args = '<event-id> [<event-id>, ...]'
help = """
Generates or regenerates a certificate for a given event.
"""
def handle(self, *args, **options):
if not args:
self.stderr.write('Please provide at least one event ID.')
for event_id in args:
self.stdout.write('Regenerating the certificate for event ID %s...' % event_id)
try:
event = Event.objects.get(pk=int(event_id))
except Event.DoesNotExist:
self.stderr.write('Event ID "%s" does not exist.' % event_id)
continue
path = generate_certificate_for(event.pk, event.certificate_file_name(), event.name_for_certificate)
if path:
self.stdout.write('Certificate for event ID %s geenrated successfully: %s' % (event_id, path))
else:
self.stderr.write('An error occurred while generating the certificate.')
|
|
fd332ab34d1380d103b89dae942fbf2ca4334749
|
scripts/find_missing_miro_records.py
|
scripts/find_missing_miro_records.py
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
This is a diagnostic script for finding Miro records that have been
pushed into DynamoDB but aren't available in the API -- which may be an
indicator that the pipeline is leaking somewhere.
"""
import pprint
import sys
import boto3
import requests
def api_miro_ids():
"""
Generates the Miro IDs for records that are available in the API.
"""
page = 1
while True:
r = requests.get(
'https://api.wellcomecollection.org/catalogue/v0/works',
params={'includes': 'identifiers', 'pageSize': 100, 'page': page}
)
if not r.json()['results']:
break
for work in r.json()['results']:
identifiers = work['identifiers']
miro_ids = [i for i in identifiers if i['source'] == 'Miro']
if miro_ids:
yield miro_ids[0]['value']
page += 1
def get_records(table):
"""
Retrieve all the records in a DynamoDB table.
"""
kwargs = {}
while True:
resp = table.scan(**kwargs)
yield from resp['Items']
# DynamoDB results are paginated, with the ``LastEvaluatedKey`` in
# the response defining a parameter to be passed into the next page,
# as the start of the next response. When it's no longer present,
# we're at the end of the table. For more details:
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Table.scan
try:
kwargs['ExclusiveStartKey'] = resp['LastEvaluatedKey']
except KeyError:
break
def main():
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('MiroData')
api_ids = list(api_items())
dynamodb_records = [t['MiroID'] for t in get_records(table)]
missing = set(dynamodb_records) - set(api_ids)
if missing:
pprint.pprint(missing)
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
Check in my script for finding missing Miro IDs
|
Check in my script for finding missing Miro IDs
|
Python
|
mit
|
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
|
Check in my script for finding missing Miro IDs
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
This is a diagnostic script for finding Miro records that have been
pushed into DynamoDB but aren't available in the API -- which may be an
indicator that the pipeline is leaking somewhere.
"""
import pprint
import sys
import boto3
import requests
def api_miro_ids():
"""
Generates the Miro IDs for records that are available in the API.
"""
page = 1
while True:
r = requests.get(
'https://api.wellcomecollection.org/catalogue/v0/works',
params={'includes': 'identifiers', 'pageSize': 100, 'page': page}
)
if not r.json()['results']:
break
for work in r.json()['results']:
identifiers = work['identifiers']
miro_ids = [i for i in identifiers if i['source'] == 'Miro']
if miro_ids:
yield miro_ids[0]['value']
page += 1
def get_records(table):
"""
Retrieve all the records in a DynamoDB table.
"""
kwargs = {}
while True:
resp = table.scan(**kwargs)
yield from resp['Items']
# DynamoDB results are paginated, with the ``LastEvaluatedKey`` in
# the response defining a parameter to be passed into the next page,
# as the start of the next response. When it's no longer present,
# we're at the end of the table. For more details:
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Table.scan
try:
kwargs['ExclusiveStartKey'] = resp['LastEvaluatedKey']
except KeyError:
break
def main():
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('MiroData')
api_ids = list(api_items())
dynamodb_records = [t['MiroID'] for t in get_records(table)]
missing = set(dynamodb_records) - set(api_ids)
if missing:
pprint.pprint(missing)
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Check in my script for finding missing Miro IDs<commit_after>
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
This is a diagnostic script for finding Miro records that have been
pushed into DynamoDB but aren't available in the API -- which may be an
indicator that the pipeline is leaking somewhere.
"""
import pprint
import sys
import boto3
import requests
def api_miro_ids():
"""
Generates the Miro IDs for records that are available in the API.
"""
page = 1
while True:
r = requests.get(
'https://api.wellcomecollection.org/catalogue/v0/works',
params={'includes': 'identifiers', 'pageSize': 100, 'page': page}
)
if not r.json()['results']:
break
for work in r.json()['results']:
identifiers = work['identifiers']
miro_ids = [i for i in identifiers if i['source'] == 'Miro']
if miro_ids:
yield miro_ids[0]['value']
page += 1
def get_records(table):
"""
Retrieve all the records in a DynamoDB table.
"""
kwargs = {}
while True:
resp = table.scan(**kwargs)
yield from resp['Items']
# DynamoDB results are paginated, with the ``LastEvaluatedKey`` in
# the response defining a parameter to be passed into the next page,
# as the start of the next response. When it's no longer present,
# we're at the end of the table. For more details:
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Table.scan
try:
kwargs['ExclusiveStartKey'] = resp['LastEvaluatedKey']
except KeyError:
break
def main():
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('MiroData')
api_ids = list(api_items())
dynamodb_records = [t['MiroID'] for t in get_records(table)]
missing = set(dynamodb_records) - set(api_ids)
if missing:
pprint.pprint(missing)
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
Check in my script for finding missing Miro IDs#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
This is a diagnostic script for finding Miro records that have been
pushed into DynamoDB but aren't available in the API -- which may be an
indicator that the pipeline is leaking somewhere.
"""
import pprint
import sys
import boto3
import requests
def api_miro_ids():
"""
Generates the Miro IDs for records that are available in the API.
"""
page = 1
while True:
r = requests.get(
'https://api.wellcomecollection.org/catalogue/v0/works',
params={'includes': 'identifiers', 'pageSize': 100, 'page': page}
)
if not r.json()['results']:
break
for work in r.json()['results']:
identifiers = work['identifiers']
miro_ids = [i for i in identifiers if i['source'] == 'Miro']
if miro_ids:
yield miro_ids[0]['value']
page += 1
def get_records(table):
"""
Retrieve all the records in a DynamoDB table.
"""
kwargs = {}
while True:
resp = table.scan(**kwargs)
yield from resp['Items']
# DynamoDB results are paginated, with the ``LastEvaluatedKey`` in
# the response defining a parameter to be passed into the next page,
# as the start of the next response. When it's no longer present,
# we're at the end of the table. For more details:
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Table.scan
try:
kwargs['ExclusiveStartKey'] = resp['LastEvaluatedKey']
except KeyError:
break
def main():
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('MiroData')
api_ids = list(api_items())
dynamodb_records = [t['MiroID'] for t in get_records(table)]
missing = set(dynamodb_records) - set(api_ids)
if missing:
pprint.pprint(missing)
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Check in my script for finding missing Miro IDs<commit_after>#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
This is a diagnostic script for finding Miro records that have been
pushed into DynamoDB but aren't available in the API -- which may be an
indicator that the pipeline is leaking somewhere.
"""
import pprint
import sys
import boto3
import requests
def api_miro_ids():
"""
Generates the Miro IDs for records that are available in the API.
"""
page = 1
while True:
r = requests.get(
'https://api.wellcomecollection.org/catalogue/v0/works',
params={'includes': 'identifiers', 'pageSize': 100, 'page': page}
)
if not r.json()['results']:
break
for work in r.json()['results']:
identifiers = work['identifiers']
miro_ids = [i for i in identifiers if i['source'] == 'Miro']
if miro_ids:
yield miro_ids[0]['value']
page += 1
def get_records(table):
"""
Retrieve all the records in a DynamoDB table.
"""
kwargs = {}
while True:
resp = table.scan(**kwargs)
yield from resp['Items']
# DynamoDB results are paginated, with the ``LastEvaluatedKey`` in
# the response defining a parameter to be passed into the next page,
# as the start of the next response. When it's no longer present,
# we're at the end of the table. For more details:
# http://boto3.readthedocs.io/en/latest/reference/services/dynamodb.html#DynamoDB.Table.scan
try:
kwargs['ExclusiveStartKey'] = resp['LastEvaluatedKey']
except KeyError:
break
def main():
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('MiroData')
api_ids = list(api_items())
dynamodb_records = [t['MiroID'] for t in get_records(table)]
missing = set(dynamodb_records) - set(api_ids)
if missing:
pprint.pprint(missing)
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
0a4dc6c864387b2a70d85279623523003700affb
|
py509/utils.py
|
py509/utils.py
|
def tree(node, _depth=1):
"""Print a tree.
Sometimes it's useful to print datastructures as a tree. This function prints
out a pretty tree with root `node`. A tree is represented as a :class:`dict`,
whose keys are node names and values are :class:`dict` objects for sub-trees
and :class:`None` for terminals.
:param dict node: The root of the tree to print.
"""
current = 0
length = len(node.keys())
tee_joint = '\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x80'
elbow_joint = '\xe2\x94\x94\xe2\x94\x80\xe2\x94\x80'
for key, value in node.iteritems():
current += 1
if current == length:
yield ' {space} {key}'.format(space=elbow_joint, key=key)
else:
yield ' {space} {key}'.format(space=tee_joint, key=key)
if value:
for e in tree(value, _depth=_depth + 1):
yield (' | ' if current != length else ' ') + e
# XXX: Currently, pyOpenSSL doesn't expose any nice OpenSSL.crypto.X509Store
# functions for us to use to take a *real* store as an input.
def assemble_chain(leaf, store):
"""Assemble the trust chain.
:param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the
chain.
:param list[OpenSSL.crypto.X509] store: A list of certificates to use to
resolve the chain.
:return: The trust chain.
:rtype: list[OpenSSL.crypto.X509]
"""
chain = [leaf]
current = leaf
while current.get_issuer().CN != current.get_subject().CN:
for cert in store:
if cert.get_subject().CN == current.get_issuer().CN:
chain.append(cert)
current = cert
chain.reverse()
return chain
|
Add a few utilities for printing results
|
Add a few utilities for printing results
1) Copy over the ``tree`` function from sholsapp/powertools repository.
2) Add a ``assemble_chain`` function for pretty printing verification
results.
|
Python
|
apache-2.0
|
sholsapp/py509
|
Add a few utilities for printing results
1) Copy over the ``tree`` function from sholsapp/powertools repository.
2) Add a ``assemble_chain`` function for pretty printing verification
results.
|
def tree(node, _depth=1):
"""Print a tree.
Sometimes it's useful to print datastructures as a tree. This function prints
out a pretty tree with root `node`. A tree is represented as a :class:`dict`,
whose keys are node names and values are :class:`dict` objects for sub-trees
and :class:`None` for terminals.
:param dict node: The root of the tree to print.
"""
current = 0
length = len(node.keys())
tee_joint = '\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x80'
elbow_joint = '\xe2\x94\x94\xe2\x94\x80\xe2\x94\x80'
for key, value in node.iteritems():
current += 1
if current == length:
yield ' {space} {key}'.format(space=elbow_joint, key=key)
else:
yield ' {space} {key}'.format(space=tee_joint, key=key)
if value:
for e in tree(value, _depth=_depth + 1):
yield (' | ' if current != length else ' ') + e
# XXX: Currently, pyOpenSSL doesn't expose any nice OpenSSL.crypto.X509Store
# functions for us to use to take a *real* store as an input.
def assemble_chain(leaf, store):
"""Assemble the trust chain.
:param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the
chain.
:param list[OpenSSL.crypto.X509] store: A list of certificates to use to
resolve the chain.
:return: The trust chain.
:rtype: list[OpenSSL.crypto.X509]
"""
chain = [leaf]
current = leaf
while current.get_issuer().CN != current.get_subject().CN:
for cert in store:
if cert.get_subject().CN == current.get_issuer().CN:
chain.append(cert)
current = cert
chain.reverse()
return chain
|
<commit_before><commit_msg>Add a few utilities for printing results
1) Copy over the ``tree`` function from sholsapp/powertools repository.
2) Add a ``assemble_chain`` function for pretty printing verification
results.<commit_after>
|
def tree(node, _depth=1):
"""Print a tree.
Sometimes it's useful to print datastructures as a tree. This function prints
out a pretty tree with root `node`. A tree is represented as a :class:`dict`,
whose keys are node names and values are :class:`dict` objects for sub-trees
and :class:`None` for terminals.
:param dict node: The root of the tree to print.
"""
current = 0
length = len(node.keys())
tee_joint = '\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x80'
elbow_joint = '\xe2\x94\x94\xe2\x94\x80\xe2\x94\x80'
for key, value in node.iteritems():
current += 1
if current == length:
yield ' {space} {key}'.format(space=elbow_joint, key=key)
else:
yield ' {space} {key}'.format(space=tee_joint, key=key)
if value:
for e in tree(value, _depth=_depth + 1):
yield (' | ' if current != length else ' ') + e
# XXX: Currently, pyOpenSSL doesn't expose any nice OpenSSL.crypto.X509Store
# functions for us to use to take a *real* store as an input.
def assemble_chain(leaf, store):
"""Assemble the trust chain.
:param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the
chain.
:param list[OpenSSL.crypto.X509] store: A list of certificates to use to
resolve the chain.
:return: The trust chain.
:rtype: list[OpenSSL.crypto.X509]
"""
chain = [leaf]
current = leaf
while current.get_issuer().CN != current.get_subject().CN:
for cert in store:
if cert.get_subject().CN == current.get_issuer().CN:
chain.append(cert)
current = cert
chain.reverse()
return chain
|
Add a few utilities for printing results
1) Copy over the ``tree`` function from sholsapp/powertools repository.
2) Add a ``assemble_chain`` function for pretty printing verification
results.def tree(node, _depth=1):
"""Print a tree.
Sometimes it's useful to print datastructures as a tree. This function prints
out a pretty tree with root `node`. A tree is represented as a :class:`dict`,
whose keys are node names and values are :class:`dict` objects for sub-trees
and :class:`None` for terminals.
:param dict node: The root of the tree to print.
"""
current = 0
length = len(node.keys())
tee_joint = '\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x80'
elbow_joint = '\xe2\x94\x94\xe2\x94\x80\xe2\x94\x80'
for key, value in node.iteritems():
current += 1
if current == length:
yield ' {space} {key}'.format(space=elbow_joint, key=key)
else:
yield ' {space} {key}'.format(space=tee_joint, key=key)
if value:
for e in tree(value, _depth=_depth + 1):
yield (' | ' if current != length else ' ') + e
# XXX: Currently, pyOpenSSL doesn't expose any nice OpenSSL.crypto.X509Store
# functions for us to use to take a *real* store as an input.
def assemble_chain(leaf, store):
"""Assemble the trust chain.
:param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the
chain.
:param list[OpenSSL.crypto.X509] store: A list of certificates to use to
resolve the chain.
:return: The trust chain.
:rtype: list[OpenSSL.crypto.X509]
"""
chain = [leaf]
current = leaf
while current.get_issuer().CN != current.get_subject().CN:
for cert in store:
if cert.get_subject().CN == current.get_issuer().CN:
chain.append(cert)
current = cert
chain.reverse()
return chain
|
<commit_before><commit_msg>Add a few utilities for printing results
1) Copy over the ``tree`` function from sholsapp/powertools repository.
2) Add a ``assemble_chain`` function for pretty printing verification
results.<commit_after>def tree(node, _depth=1):
"""Print a tree.
Sometimes it's useful to print datastructures as a tree. This function prints
out a pretty tree with root `node`. A tree is represented as a :class:`dict`,
whose keys are node names and values are :class:`dict` objects for sub-trees
and :class:`None` for terminals.
:param dict node: The root of the tree to print.
"""
current = 0
length = len(node.keys())
tee_joint = '\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x80'
elbow_joint = '\xe2\x94\x94\xe2\x94\x80\xe2\x94\x80'
for key, value in node.iteritems():
current += 1
if current == length:
yield ' {space} {key}'.format(space=elbow_joint, key=key)
else:
yield ' {space} {key}'.format(space=tee_joint, key=key)
if value:
for e in tree(value, _depth=_depth + 1):
yield (' | ' if current != length else ' ') + e
# XXX: Currently, pyOpenSSL doesn't expose any nice OpenSSL.crypto.X509Store
# functions for us to use to take a *real* store as an input.
def assemble_chain(leaf, store):
"""Assemble the trust chain.
:param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the
chain.
:param list[OpenSSL.crypto.X509] store: A list of certificates to use to
resolve the chain.
:return: The trust chain.
:rtype: list[OpenSSL.crypto.X509]
"""
chain = [leaf]
current = leaf
while current.get_issuer().CN != current.get_subject().CN:
for cert in store:
if cert.get_subject().CN == current.get_issuer().CN:
chain.append(cert)
current = cert
chain.reverse()
return chain
|
|
02ad46a4463afdedc87aabd14b75fc9968b8ea64
|
src/waldur_mastermind/marketplace/management/commands/import_marketplace_orders.py
|
src/waldur_mastermind/marketplace/management/commands/import_marketplace_orders.py
|
from django.core.management.base import BaseCommand
from waldur_core.core.models import User
from waldur_mastermind.marketplace.models import Order, OrderItem, Resource
class Command(BaseCommand):
help = """Create marketplace order for each resource if it does not yet exist."""
def handle(self, *args, **options):
default_user = User.objects.filter(is_staff=True).first()
existing_resources = OrderItem.objects.exclude(resource_id=None)\
.values_list('resource_id', flat=True).distinct()
missing_resources = Resource.objects.exclude(id__in=existing_resources)
for resource in missing_resources:
order = Order.objects.create(
created=resource.created,
modified=resource.modified,
created_by=default_user,
approved_by=default_user,
approved_at=resource.created,
project=resource.project,
state=Order.States.DONE,
)
OrderItem.objects.create(
order=order,
resource=resource,
offering=resource.offering,
attributes=resource.attributes,
limits=resource.limits,
plan=resource.plan,
state=OrderItem.States.DONE,
)
count = missing_resources.count()
if count == 0:
self.stdout.write(self.style.SUCCESS('There are no resources without orders.'))
if count == 1:
self.stdout.write(self.style.SUCCESS('%s order has been created.' % count))
else:
self.stdout.write(self.style.SUCCESS('%s orders have been created.' % count))
|
Implement management command to import orders for existing resources.
|
Implement management command to import orders for existing resources.
|
Python
|
mit
|
opennode/nodeconductor-assembly-waldur,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur
|
Implement management command to import orders for existing resources.
|
from django.core.management.base import BaseCommand
from waldur_core.core.models import User
from waldur_mastermind.marketplace.models import Order, OrderItem, Resource
class Command(BaseCommand):
help = """Create marketplace order for each resource if it does not yet exist."""
def handle(self, *args, **options):
default_user = User.objects.filter(is_staff=True).first()
existing_resources = OrderItem.objects.exclude(resource_id=None)\
.values_list('resource_id', flat=True).distinct()
missing_resources = Resource.objects.exclude(id__in=existing_resources)
for resource in missing_resources:
order = Order.objects.create(
created=resource.created,
modified=resource.modified,
created_by=default_user,
approved_by=default_user,
approved_at=resource.created,
project=resource.project,
state=Order.States.DONE,
)
OrderItem.objects.create(
order=order,
resource=resource,
offering=resource.offering,
attributes=resource.attributes,
limits=resource.limits,
plan=resource.plan,
state=OrderItem.States.DONE,
)
count = missing_resources.count()
if count == 0:
self.stdout.write(self.style.SUCCESS('There are no resources without orders.'))
if count == 1:
self.stdout.write(self.style.SUCCESS('%s order has been created.' % count))
else:
self.stdout.write(self.style.SUCCESS('%s orders have been created.' % count))
|
<commit_before><commit_msg>Implement management command to import orders for existing resources.<commit_after>
|
from django.core.management.base import BaseCommand
from waldur_core.core.models import User
from waldur_mastermind.marketplace.models import Order, OrderItem, Resource
class Command(BaseCommand):
help = """Create marketplace order for each resource if it does not yet exist."""
def handle(self, *args, **options):
default_user = User.objects.filter(is_staff=True).first()
existing_resources = OrderItem.objects.exclude(resource_id=None)\
.values_list('resource_id', flat=True).distinct()
missing_resources = Resource.objects.exclude(id__in=existing_resources)
for resource in missing_resources:
order = Order.objects.create(
created=resource.created,
modified=resource.modified,
created_by=default_user,
approved_by=default_user,
approved_at=resource.created,
project=resource.project,
state=Order.States.DONE,
)
OrderItem.objects.create(
order=order,
resource=resource,
offering=resource.offering,
attributes=resource.attributes,
limits=resource.limits,
plan=resource.plan,
state=OrderItem.States.DONE,
)
count = missing_resources.count()
if count == 0:
self.stdout.write(self.style.SUCCESS('There are no resources without orders.'))
if count == 1:
self.stdout.write(self.style.SUCCESS('%s order has been created.' % count))
else:
self.stdout.write(self.style.SUCCESS('%s orders have been created.' % count))
|
Implement management command to import orders for existing resources.from django.core.management.base import BaseCommand
from waldur_core.core.models import User
from waldur_mastermind.marketplace.models import Order, OrderItem, Resource
class Command(BaseCommand):
help = """Create marketplace order for each resource if it does not yet exist."""
def handle(self, *args, **options):
default_user = User.objects.filter(is_staff=True).first()
existing_resources = OrderItem.objects.exclude(resource_id=None)\
.values_list('resource_id', flat=True).distinct()
missing_resources = Resource.objects.exclude(id__in=existing_resources)
for resource in missing_resources:
order = Order.objects.create(
created=resource.created,
modified=resource.modified,
created_by=default_user,
approved_by=default_user,
approved_at=resource.created,
project=resource.project,
state=Order.States.DONE,
)
OrderItem.objects.create(
order=order,
resource=resource,
offering=resource.offering,
attributes=resource.attributes,
limits=resource.limits,
plan=resource.plan,
state=OrderItem.States.DONE,
)
count = missing_resources.count()
if count == 0:
self.stdout.write(self.style.SUCCESS('There are no resources without orders.'))
if count == 1:
self.stdout.write(self.style.SUCCESS('%s order has been created.' % count))
else:
self.stdout.write(self.style.SUCCESS('%s orders have been created.' % count))
|
<commit_before><commit_msg>Implement management command to import orders for existing resources.<commit_after>from django.core.management.base import BaseCommand
from waldur_core.core.models import User
from waldur_mastermind.marketplace.models import Order, OrderItem, Resource
class Command(BaseCommand):
help = """Create marketplace order for each resource if it does not yet exist."""
def handle(self, *args, **options):
default_user = User.objects.filter(is_staff=True).first()
existing_resources = OrderItem.objects.exclude(resource_id=None)\
.values_list('resource_id', flat=True).distinct()
missing_resources = Resource.objects.exclude(id__in=existing_resources)
for resource in missing_resources:
order = Order.objects.create(
created=resource.created,
modified=resource.modified,
created_by=default_user,
approved_by=default_user,
approved_at=resource.created,
project=resource.project,
state=Order.States.DONE,
)
OrderItem.objects.create(
order=order,
resource=resource,
offering=resource.offering,
attributes=resource.attributes,
limits=resource.limits,
plan=resource.plan,
state=OrderItem.States.DONE,
)
count = missing_resources.count()
if count == 0:
self.stdout.write(self.style.SUCCESS('There are no resources without orders.'))
if count == 1:
self.stdout.write(self.style.SUCCESS('%s order has been created.' % count))
else:
self.stdout.write(self.style.SUCCESS('%s orders have been created.' % count))
|
|
6844fb64dd855f9d05d93b7e5b2dc84e467b53e1
|
arc_distance/arc_distance_theano.py
|
arc_distance/arc_distance_theano.py
|
# Authors: Frederic Bastien
# License: MIT
import theano
import theano.tensor as tensor
def arc_distance_theano_alloc_prepare(dtype='float64'):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
a = tensor.matrix(dtype=str(dtype))
b = tensor.matrix(dtype=str(dtype))
# Theano don't implement all case of tile, so we do the equivalent with alloc.
#theta_1 = tensor.tile(a[:, 0], (b.shape[0], 1)).T
theta_1 = tensor.alloc(a[:, 0], b.shape[0], b.shape[0]).T
phi_1 = tensor.alloc(a[:, 1], b.shape[0], b.shape[0]).T
theta_2 = tensor.alloc(b[:, 0], a.shape[0], a.shape[0])
phi_2 = tensor.alloc(b[:, 1], a.shape[0], a.shape[0])
temp = (tensor.sin((theta_2 - theta_1) / 2)**2
+
tensor.cos(theta_1) * tensor.cos(theta_2)
* tensor.sin((phi_2 - phi_1) / 2)**2)
distance_matrix = 2 * (tensor.arctan2(tensor.sqrt(temp),
tensor.sqrt(1 - temp)))
name = "arc_distance_theano_alloc"
rval = theano.function([a, b],
distance_matrix,
name=name)
rval.__name__ = name
return rval
def arc_distance_theano_broadcast_prepare(dtype='float64'):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
a = tensor.matrix(dtype=str(dtype))
b = tensor.matrix(dtype=str(dtype))
theta_1 = a[:, 0][None, :]
theta_2 = b[:, 0][None, :]
phi_1 = a[:, 1][:, None]
phi_2 = b[:, 1][None, :]
temp = (tensor.sin((theta_2 - theta_1) / 2)**2
+
tensor.cos(theta_1) * tensor.cos(theta_2)
* tensor.sin((phi_2 - phi_1) / 2)**2)
distance_matrix = 2 * (tensor.arctan2(tensor.sqrt(temp),
tensor.sqrt(1 - temp)))
name = "arc_distance_theano_broadcast"
rval = theano.function([a, b],
distance_matrix,
name=name)
rval.__name__ = name
return rval
benchmarks = (
arc_distance_theano_alloc_prepare('float64'),
arc_distance_theano_broadcast_prepare('float64'),
)
|
Add arc distance for theano.
|
Add arc distance for theano.
|
Python
|
mit
|
numfocus/python-benchmarks,numfocus/python-benchmarks
|
Add arc distance for theano.
|
# Authors: Frederic Bastien
# License: MIT
import theano
import theano.tensor as tensor
def arc_distance_theano_alloc_prepare(dtype='float64'):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
a = tensor.matrix(dtype=str(dtype))
b = tensor.matrix(dtype=str(dtype))
# Theano don't implement all case of tile, so we do the equivalent with alloc.
#theta_1 = tensor.tile(a[:, 0], (b.shape[0], 1)).T
theta_1 = tensor.alloc(a[:, 0], b.shape[0], b.shape[0]).T
phi_1 = tensor.alloc(a[:, 1], b.shape[0], b.shape[0]).T
theta_2 = tensor.alloc(b[:, 0], a.shape[0], a.shape[0])
phi_2 = tensor.alloc(b[:, 1], a.shape[0], a.shape[0])
temp = (tensor.sin((theta_2 - theta_1) / 2)**2
+
tensor.cos(theta_1) * tensor.cos(theta_2)
* tensor.sin((phi_2 - phi_1) / 2)**2)
distance_matrix = 2 * (tensor.arctan2(tensor.sqrt(temp),
tensor.sqrt(1 - temp)))
name = "arc_distance_theano_alloc"
rval = theano.function([a, b],
distance_matrix,
name=name)
rval.__name__ = name
return rval
def arc_distance_theano_broadcast_prepare(dtype='float64'):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
a = tensor.matrix(dtype=str(dtype))
b = tensor.matrix(dtype=str(dtype))
theta_1 = a[:, 0][None, :]
theta_2 = b[:, 0][None, :]
phi_1 = a[:, 1][:, None]
phi_2 = b[:, 1][None, :]
temp = (tensor.sin((theta_2 - theta_1) / 2)**2
+
tensor.cos(theta_1) * tensor.cos(theta_2)
* tensor.sin((phi_2 - phi_1) / 2)**2)
distance_matrix = 2 * (tensor.arctan2(tensor.sqrt(temp),
tensor.sqrt(1 - temp)))
name = "arc_distance_theano_broadcast"
rval = theano.function([a, b],
distance_matrix,
name=name)
rval.__name__ = name
return rval
benchmarks = (
arc_distance_theano_alloc_prepare('float64'),
arc_distance_theano_broadcast_prepare('float64'),
)
|
<commit_before><commit_msg>Add arc distance for theano.<commit_after>
|
# Authors: Frederic Bastien
# License: MIT
import theano
import theano.tensor as tensor
def arc_distance_theano_alloc_prepare(dtype='float64'):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
a = tensor.matrix(dtype=str(dtype))
b = tensor.matrix(dtype=str(dtype))
# Theano don't implement all case of tile, so we do the equivalent with alloc.
#theta_1 = tensor.tile(a[:, 0], (b.shape[0], 1)).T
theta_1 = tensor.alloc(a[:, 0], b.shape[0], b.shape[0]).T
phi_1 = tensor.alloc(a[:, 1], b.shape[0], b.shape[0]).T
theta_2 = tensor.alloc(b[:, 0], a.shape[0], a.shape[0])
phi_2 = tensor.alloc(b[:, 1], a.shape[0], a.shape[0])
temp = (tensor.sin((theta_2 - theta_1) / 2)**2
+
tensor.cos(theta_1) * tensor.cos(theta_2)
* tensor.sin((phi_2 - phi_1) / 2)**2)
distance_matrix = 2 * (tensor.arctan2(tensor.sqrt(temp),
tensor.sqrt(1 - temp)))
name = "arc_distance_theano_alloc"
rval = theano.function([a, b],
distance_matrix,
name=name)
rval.__name__ = name
return rval
def arc_distance_theano_broadcast_prepare(dtype='float64'):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
a = tensor.matrix(dtype=str(dtype))
b = tensor.matrix(dtype=str(dtype))
theta_1 = a[:, 0][None, :]
theta_2 = b[:, 0][None, :]
phi_1 = a[:, 1][:, None]
phi_2 = b[:, 1][None, :]
temp = (tensor.sin((theta_2 - theta_1) / 2)**2
+
tensor.cos(theta_1) * tensor.cos(theta_2)
* tensor.sin((phi_2 - phi_1) / 2)**2)
distance_matrix = 2 * (tensor.arctan2(tensor.sqrt(temp),
tensor.sqrt(1 - temp)))
name = "arc_distance_theano_broadcast"
rval = theano.function([a, b],
distance_matrix,
name=name)
rval.__name__ = name
return rval
benchmarks = (
arc_distance_theano_alloc_prepare('float64'),
arc_distance_theano_broadcast_prepare('float64'),
)
|
Add arc distance for theano.# Authors: Frederic Bastien
# License: MIT
import theano
import theano.tensor as tensor
def arc_distance_theano_alloc_prepare(dtype='float64'):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
a = tensor.matrix(dtype=str(dtype))
b = tensor.matrix(dtype=str(dtype))
# Theano don't implement all case of tile, so we do the equivalent with alloc.
#theta_1 = tensor.tile(a[:, 0], (b.shape[0], 1)).T
theta_1 = tensor.alloc(a[:, 0], b.shape[0], b.shape[0]).T
phi_1 = tensor.alloc(a[:, 1], b.shape[0], b.shape[0]).T
theta_2 = tensor.alloc(b[:, 0], a.shape[0], a.shape[0])
phi_2 = tensor.alloc(b[:, 1], a.shape[0], a.shape[0])
temp = (tensor.sin((theta_2 - theta_1) / 2)**2
+
tensor.cos(theta_1) * tensor.cos(theta_2)
* tensor.sin((phi_2 - phi_1) / 2)**2)
distance_matrix = 2 * (tensor.arctan2(tensor.sqrt(temp),
tensor.sqrt(1 - temp)))
name = "arc_distance_theano_alloc"
rval = theano.function([a, b],
distance_matrix,
name=name)
rval.__name__ = name
return rval
def arc_distance_theano_broadcast_prepare(dtype='float64'):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
a = tensor.matrix(dtype=str(dtype))
b = tensor.matrix(dtype=str(dtype))
theta_1 = a[:, 0][None, :]
theta_2 = b[:, 0][None, :]
phi_1 = a[:, 1][:, None]
phi_2 = b[:, 1][None, :]
temp = (tensor.sin((theta_2 - theta_1) / 2)**2
+
tensor.cos(theta_1) * tensor.cos(theta_2)
* tensor.sin((phi_2 - phi_1) / 2)**2)
distance_matrix = 2 * (tensor.arctan2(tensor.sqrt(temp),
tensor.sqrt(1 - temp)))
name = "arc_distance_theano_broadcast"
rval = theano.function([a, b],
distance_matrix,
name=name)
rval.__name__ = name
return rval
benchmarks = (
arc_distance_theano_alloc_prepare('float64'),
arc_distance_theano_broadcast_prepare('float64'),
)
|
<commit_before><commit_msg>Add arc distance for theano.<commit_after># Authors: Frederic Bastien
# License: MIT
import theano
import theano.tensor as tensor
def arc_distance_theano_alloc_prepare(dtype='float64'):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
a = tensor.matrix(dtype=str(dtype))
b = tensor.matrix(dtype=str(dtype))
# Theano don't implement all case of tile, so we do the equivalent with alloc.
#theta_1 = tensor.tile(a[:, 0], (b.shape[0], 1)).T
theta_1 = tensor.alloc(a[:, 0], b.shape[0], b.shape[0]).T
phi_1 = tensor.alloc(a[:, 1], b.shape[0], b.shape[0]).T
theta_2 = tensor.alloc(b[:, 0], a.shape[0], a.shape[0])
phi_2 = tensor.alloc(b[:, 1], a.shape[0], a.shape[0])
temp = (tensor.sin((theta_2 - theta_1) / 2)**2
+
tensor.cos(theta_1) * tensor.cos(theta_2)
* tensor.sin((phi_2 - phi_1) / 2)**2)
distance_matrix = 2 * (tensor.arctan2(tensor.sqrt(temp),
tensor.sqrt(1 - temp)))
name = "arc_distance_theano_alloc"
rval = theano.function([a, b],
distance_matrix,
name=name)
rval.__name__ = name
return rval
def arc_distance_theano_broadcast_prepare(dtype='float64'):
"""
Calculates the pairwise arc distance between all points in vector a and b.
"""
a = tensor.matrix(dtype=str(dtype))
b = tensor.matrix(dtype=str(dtype))
theta_1 = a[:, 0][None, :]
theta_2 = b[:, 0][None, :]
phi_1 = a[:, 1][:, None]
phi_2 = b[:, 1][None, :]
temp = (tensor.sin((theta_2 - theta_1) / 2)**2
+
tensor.cos(theta_1) * tensor.cos(theta_2)
* tensor.sin((phi_2 - phi_1) / 2)**2)
distance_matrix = 2 * (tensor.arctan2(tensor.sqrt(temp),
tensor.sqrt(1 - temp)))
name = "arc_distance_theano_broadcast"
rval = theano.function([a, b],
distance_matrix,
name=name)
rval.__name__ = name
return rval
benchmarks = (
arc_distance_theano_alloc_prepare('float64'),
arc_distance_theano_broadcast_prepare('float64'),
)
|
|
10b8a2e6fe104fa7bdbb2498733430f16254c545
|
src/longest_common_prefix.py
|
src/longest_common_prefix.py
|
class Solution:
# @param {string []} strs
# @return {string}
def longestCommonPrefix(self, strs):
if not strs:
return ""
longestIndex = 0
foundNotMatched = False
for index in range(0, len(strs[0])):
if not foundNotMatched:
longestIndex = index
char = strs[0][index]
for i,str in enumerate(strs):
if index >= len(str) or str[index] != char:
foundNotMatched = True
break
if foundNotMatched:
return strs[0][:longestIndex]
else:
return strs[0][:longestIndex+1]
if __name__ == '__main__':
test_list = [[], ['a'], ['a','b'], ['aa','aa'], ['aa', 'a']]
result_list = ['','a','','aa','a']
success = True
solution = Solution()
for i, s in enumerate(test_list):
result = solution.longestCommonPrefix(s)
if result != result_list[i]:
success = False
print s
print 'Expected value ',result_list[i]
print 'Actual value ',result
if success:
print 'All the tests passed'
else:
print 'Please fix the failed test'
|
Solve the longest common prefix
|
Solve the longest common prefix
|
Python
|
mit
|
chancyWu/leetcode
|
Solve the longest common prefix
|
class Solution:
# @param {string []} strs
# @return {string}
def longestCommonPrefix(self, strs):
if not strs:
return ""
longestIndex = 0
foundNotMatched = False
for index in range(0, len(strs[0])):
if not foundNotMatched:
longestIndex = index
char = strs[0][index]
for i,str in enumerate(strs):
if index >= len(str) or str[index] != char:
foundNotMatched = True
break
if foundNotMatched:
return strs[0][:longestIndex]
else:
return strs[0][:longestIndex+1]
if __name__ == '__main__':
test_list = [[], ['a'], ['a','b'], ['aa','aa'], ['aa', 'a']]
result_list = ['','a','','aa','a']
success = True
solution = Solution()
for i, s in enumerate(test_list):
result = solution.longestCommonPrefix(s)
if result != result_list[i]:
success = False
print s
print 'Expected value ',result_list[i]
print 'Actual value ',result
if success:
print 'All the tests passed'
else:
print 'Please fix the failed test'
|
<commit_before><commit_msg>Solve the longest common prefix<commit_after>
|
class Solution:
# @param {string []} strs
# @return {string}
def longestCommonPrefix(self, strs):
if not strs:
return ""
longestIndex = 0
foundNotMatched = False
for index in range(0, len(strs[0])):
if not foundNotMatched:
longestIndex = index
char = strs[0][index]
for i,str in enumerate(strs):
if index >= len(str) or str[index] != char:
foundNotMatched = True
break
if foundNotMatched:
return strs[0][:longestIndex]
else:
return strs[0][:longestIndex+1]
if __name__ == '__main__':
test_list = [[], ['a'], ['a','b'], ['aa','aa'], ['aa', 'a']]
result_list = ['','a','','aa','a']
success = True
solution = Solution()
for i, s in enumerate(test_list):
result = solution.longestCommonPrefix(s)
if result != result_list[i]:
success = False
print s
print 'Expected value ',result_list[i]
print 'Actual value ',result
if success:
print 'All the tests passed'
else:
print 'Please fix the failed test'
|
Solve the longest common prefixclass Solution:
# @param {string []} strs
# @return {string}
def longestCommonPrefix(self, strs):
if not strs:
return ""
longestIndex = 0
foundNotMatched = False
for index in range(0, len(strs[0])):
if not foundNotMatched:
longestIndex = index
char = strs[0][index]
for i,str in enumerate(strs):
if index >= len(str) or str[index] != char:
foundNotMatched = True
break
if foundNotMatched:
return strs[0][:longestIndex]
else:
return strs[0][:longestIndex+1]
if __name__ == '__main__':
test_list = [[], ['a'], ['a','b'], ['aa','aa'], ['aa', 'a']]
result_list = ['','a','','aa','a']
success = True
solution = Solution()
for i, s in enumerate(test_list):
result = solution.longestCommonPrefix(s)
if result != result_list[i]:
success = False
print s
print 'Expected value ',result_list[i]
print 'Actual value ',result
if success:
print 'All the tests passed'
else:
print 'Please fix the failed test'
|
<commit_before><commit_msg>Solve the longest common prefix<commit_after>class Solution:
# @param {string []} strs
# @return {string}
def longestCommonPrefix(self, strs):
if not strs:
return ""
longestIndex = 0
foundNotMatched = False
for index in range(0, len(strs[0])):
if not foundNotMatched:
longestIndex = index
char = strs[0][index]
for i,str in enumerate(strs):
if index >= len(str) or str[index] != char:
foundNotMatched = True
break
if foundNotMatched:
return strs[0][:longestIndex]
else:
return strs[0][:longestIndex+1]
if __name__ == '__main__':
test_list = [[], ['a'], ['a','b'], ['aa','aa'], ['aa', 'a']]
result_list = ['','a','','aa','a']
success = True
solution = Solution()
for i, s in enumerate(test_list):
result = solution.longestCommonPrefix(s)
if result != result_list[i]:
success = False
print s
print 'Expected value ',result_list[i]
print 'Actual value ',result
if success:
print 'All the tests passed'
else:
print 'Please fix the failed test'
|
|
4a182b614da8ced1650d0d81f20624120031e5b0
|
timetracker/testing_utils.py
|
timetracker/testing_utils.py
|
import logging
from timetracker import models
def create_activity(title='Test Title', start_time=None, end_time=None,
logger=None):
"""Create an `Activity` instance for testing purposes.
The fields are given default values so that instances can be easily
created for testing purposes.
Args:
title (str): The title of the activity.
start_time (datetime,optional): The time the activity started.
Defaults to the current time if not provided.
end_time (datetime,optional): The time the activity ended.
Defaults to `None` if not provided.
Returns:
Activity: An `Activity` instance with the given parameters.
"""
logger = logger or logging.getLogger(__name__)
kwargs = {
'title': title,
}
if start_time is not None:
kwargs['start_time'] = start_time
if end_time is not None:
kwargs['end_time'] = end_time
logger.debug("Creating new test activity with params title: %s, "
"start_time: %s, end_time: %s", title, start_time, end_time)
return models.Activity.objects.create(**kwargs)
|
Add testing utility to create activities.
|
Add testing utility to create activities.
|
Python
|
mit
|
cdriehuys/django-timetracker
|
Add testing utility to create activities.
|
import logging
from timetracker import models
def create_activity(title='Test Title', start_time=None, end_time=None,
logger=None):
"""Create an `Activity` instance for testing purposes.
The fields are given default values so that instances can be easily
created for testing purposes.
Args:
title (str): The title of the activity.
start_time (datetime,optional): The time the activity started.
Defaults to the current time if not provided.
end_time (datetime,optional): The time the activity ended.
Defaults to `None` if not provided.
Returns:
Activity: An `Activity` instance with the given parameters.
"""
logger = logger or logging.getLogger(__name__)
kwargs = {
'title': title,
}
if start_time is not None:
kwargs['start_time'] = start_time
if end_time is not None:
kwargs['end_time'] = end_time
logger.debug("Creating new test activity with params title: %s, "
"start_time: %s, end_time: %s", title, start_time, end_time)
return models.Activity.objects.create(**kwargs)
|
<commit_before><commit_msg>Add testing utility to create activities.<commit_after>
|
import logging
from timetracker import models
def create_activity(title='Test Title', start_time=None, end_time=None,
logger=None):
"""Create an `Activity` instance for testing purposes.
The fields are given default values so that instances can be easily
created for testing purposes.
Args:
title (str): The title of the activity.
start_time (datetime,optional): The time the activity started.
Defaults to the current time if not provided.
end_time (datetime,optional): The time the activity ended.
Defaults to `None` if not provided.
Returns:
Activity: An `Activity` instance with the given parameters.
"""
logger = logger or logging.getLogger(__name__)
kwargs = {
'title': title,
}
if start_time is not None:
kwargs['start_time'] = start_time
if end_time is not None:
kwargs['end_time'] = end_time
logger.debug("Creating new test activity with params title: %s, "
"start_time: %s, end_time: %s", title, start_time, end_time)
return models.Activity.objects.create(**kwargs)
|
Add testing utility to create activities.import logging
from timetracker import models
def create_activity(title='Test Title', start_time=None, end_time=None,
logger=None):
"""Create an `Activity` instance for testing purposes.
The fields are given default values so that instances can be easily
created for testing purposes.
Args:
title (str): The title of the activity.
start_time (datetime,optional): The time the activity started.
Defaults to the current time if not provided.
end_time (datetime,optional): The time the activity ended.
Defaults to `None` if not provided.
Returns:
Activity: An `Activity` instance with the given parameters.
"""
logger = logger or logging.getLogger(__name__)
kwargs = {
'title': title,
}
if start_time is not None:
kwargs['start_time'] = start_time
if end_time is not None:
kwargs['end_time'] = end_time
logger.debug("Creating new test activity with params title: %s, "
"start_time: %s, end_time: %s", title, start_time, end_time)
return models.Activity.objects.create(**kwargs)
|
<commit_before><commit_msg>Add testing utility to create activities.<commit_after>import logging
from timetracker import models
def create_activity(title='Test Title', start_time=None, end_time=None,
logger=None):
"""Create an `Activity` instance for testing purposes.
The fields are given default values so that instances can be easily
created for testing purposes.
Args:
title (str): The title of the activity.
start_time (datetime,optional): The time the activity started.
Defaults to the current time if not provided.
end_time (datetime,optional): The time the activity ended.
Defaults to `None` if not provided.
Returns:
Activity: An `Activity` instance with the given parameters.
"""
logger = logger or logging.getLogger(__name__)
kwargs = {
'title': title,
}
if start_time is not None:
kwargs['start_time'] = start_time
if end_time is not None:
kwargs['end_time'] = end_time
logger.debug("Creating new test activity with params title: %s, "
"start_time: %s, end_time: %s", title, start_time, end_time)
return models.Activity.objects.create(**kwargs)
|
|
b1edcbe02e2e1b2c54fd96c994e2c83e27e9b7b9
|
test/client/local_recognizer_test.py
|
test/client/local_recognizer_test.py
|
import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
|
import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
rl = RecognizerLoop()
self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl,
16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
|
Fix init of local recognizer
|
Fix init of local recognizer
|
Python
|
apache-2.0
|
MycroftAI/mycroft-core,aatchison/mycroft-core,linuxipho/mycroft-core,forslund/mycroft-core,Dark5ide/mycroft-core,linuxipho/mycroft-core,forslund/mycroft-core,MycroftAI/mycroft-core,Dark5ide/mycroft-core,aatchison/mycroft-core
|
import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
Fix init of local recognizer
|
import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
rl = RecognizerLoop()
self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl,
16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
|
<commit_before>import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
<commit_msg>Fix init of local recognizer<commit_after>
|
import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
rl = RecognizerLoop()
self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl,
16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
|
import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
Fix init of local recognizerimport unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
rl = RecognizerLoop()
self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl,
16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
|
<commit_before>import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
<commit_msg>Fix init of local recognizer<commit_after>import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
rl = RecognizerLoop()
self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl,
16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
|
630ea6ba267673819d5a3f4b980276039f4773b4
|
support/jenkins/buildAllModules.py
|
support/jenkins/buildAllModules.py
|
import os
from subprocess import call
modules = os.listdir("../../modules")
cmd = ["cmake"]
cmd.append("-DGHOUL_USE_DEVIL=OFF")
for m in modules:
cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=ON")
cmd.append(".")
call(cmd)
|
Add python script to make jenkins build all modules
|
Add python script to make jenkins build all modules
|
Python
|
mit
|
OpenSpace/OpenSpace,OpenSpace/OpenSpace,OpenSpace/OpenSpace,OpenSpace/OpenSpace
|
Add python script to make jenkins build all modules
|
import os
from subprocess import call
modules = os.listdir("../../modules")
cmd = ["cmake"]
cmd.append("-DGHOUL_USE_DEVIL=OFF")
for m in modules:
cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=ON")
cmd.append(".")
call(cmd)
|
<commit_before><commit_msg>Add python script to make jenkins build all modules<commit_after>
|
import os
from subprocess import call
modules = os.listdir("../../modules")
cmd = ["cmake"]
cmd.append("-DGHOUL_USE_DEVIL=OFF")
for m in modules:
cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=ON")
cmd.append(".")
call(cmd)
|
Add python script to make jenkins build all modulesimport os
from subprocess import call
modules = os.listdir("../../modules")
cmd = ["cmake"]
cmd.append("-DGHOUL_USE_DEVIL=OFF")
for m in modules:
cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=ON")
cmd.append(".")
call(cmd)
|
<commit_before><commit_msg>Add python script to make jenkins build all modules<commit_after>import os
from subprocess import call
modules = os.listdir("../../modules")
cmd = ["cmake"]
cmd.append("-DGHOUL_USE_DEVIL=OFF")
for m in modules:
cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=ON")
cmd.append(".")
call(cmd)
|
|
ce672e705056c4343e51b9e7d69446fc5f3ef58f
|
talks_keeper/forms.py
|
talks_keeper/forms.py
|
from django import forms
from .models import Label, Talk
class TalkForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TalkForm, self).__init__(*args, **kwargs)
labels = Label.objects.all()
for label_ in labels:
self.fields.update({
'label_{}'.format(label_.id): forms.BooleanField(
label=label_.name,
required=False,
)})
class Meta:
model = Talk
exclude = ['company']
def save(self):
talk = super(TalkForm, self).save()
for label_ in Label.objects.all():
if self.cleaned_data['label_{}'.format(label_.id)]:
label_.talks.add(talk)
|
Add TalkFor (to use color labels)
|
Add TalkFor (to use color labels)
|
Python
|
mit
|
samitnuk/talks_keeper,samitnuk/talks_keeper,samitnuk/talks_keeper
|
Add TalkFor (to use color labels)
|
from django import forms
from .models import Label, Talk
class TalkForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TalkForm, self).__init__(*args, **kwargs)
labels = Label.objects.all()
for label_ in labels:
self.fields.update({
'label_{}'.format(label_.id): forms.BooleanField(
label=label_.name,
required=False,
)})
class Meta:
model = Talk
exclude = ['company']
def save(self):
talk = super(TalkForm, self).save()
for label_ in Label.objects.all():
if self.cleaned_data['label_{}'.format(label_.id)]:
label_.talks.add(talk)
|
<commit_before><commit_msg>Add TalkFor (to use color labels)<commit_after>
|
from django import forms
from .models import Label, Talk
class TalkForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TalkForm, self).__init__(*args, **kwargs)
labels = Label.objects.all()
for label_ in labels:
self.fields.update({
'label_{}'.format(label_.id): forms.BooleanField(
label=label_.name,
required=False,
)})
class Meta:
model = Talk
exclude = ['company']
def save(self):
talk = super(TalkForm, self).save()
for label_ in Label.objects.all():
if self.cleaned_data['label_{}'.format(label_.id)]:
label_.talks.add(talk)
|
Add TalkFor (to use color labels)from django import forms
from .models import Label, Talk
class TalkForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TalkForm, self).__init__(*args, **kwargs)
labels = Label.objects.all()
for label_ in labels:
self.fields.update({
'label_{}'.format(label_.id): forms.BooleanField(
label=label_.name,
required=False,
)})
class Meta:
model = Talk
exclude = ['company']
def save(self):
talk = super(TalkForm, self).save()
for label_ in Label.objects.all():
if self.cleaned_data['label_{}'.format(label_.id)]:
label_.talks.add(talk)
|
<commit_before><commit_msg>Add TalkFor (to use color labels)<commit_after>from django import forms
from .models import Label, Talk
class TalkForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TalkForm, self).__init__(*args, **kwargs)
labels = Label.objects.all()
for label_ in labels:
self.fields.update({
'label_{}'.format(label_.id): forms.BooleanField(
label=label_.name,
required=False,
)})
class Meta:
model = Talk
exclude = ['company']
def save(self):
talk = super(TalkForm, self).save()
for label_ in Label.objects.all():
if self.cleaned_data['label_{}'.format(label_.id)]:
label_.talks.add(talk)
|
|
c3aae42475966e8dbd71b8f2e16da5cbf81cf3cc
|
src/mmw/apps/modeling/migrations/0016_old_scenarios.py
|
src/mmw/apps/modeling/migrations/0016_old_scenarios.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_old_scenario_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
for scenario in Scenario.objects.all():
if 'pc_modified' not in scenario.results or 'pc_unmodified' not in scenario.results:
scenario.results = "[]"
scenario.modification_hash = ""
scenario.save()
class Migration(migrations.Migration):
dependencies = [
('modeling', '0015_remove_scenario_census'),
]
operations = [
migrations.RunPython(clear_old_scenario_results)
]
|
Remove Incompatible Results from Scenarios
|
Remove Incompatible Results from Scenarios
This patch provides a data migratin which removes incompatible results
from scenarios created prior to
https://github.com/WikiWatershed/model-my-watershed/pull/716.
Connects #935
|
Python
|
apache-2.0
|
lliss/model-my-watershed,lliss/model-my-watershed,kdeloach/model-my-watershed,lliss/model-my-watershed,WikiWatershed/model-my-watershed,kdeloach/model-my-watershed,WikiWatershed/model-my-watershed,project-icp/bee-pollinator-app,kdeloach/model-my-watershed,project-icp/bee-pollinator-app,lliss/model-my-watershed,lliss/model-my-watershed,WikiWatershed/model-my-watershed,project-icp/bee-pollinator-app,kdeloach/model-my-watershed,kdeloach/model-my-watershed,WikiWatershed/model-my-watershed,project-icp/bee-pollinator-app,WikiWatershed/model-my-watershed
|
Remove Incompatible Results from Scenarios
This patch provides a data migratin which removes incompatible results
from scenarios created prior to
https://github.com/WikiWatershed/model-my-watershed/pull/716.
Connects #935
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_old_scenario_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
for scenario in Scenario.objects.all():
if 'pc_modified' not in scenario.results or 'pc_unmodified' not in scenario.results:
scenario.results = "[]"
scenario.modification_hash = ""
scenario.save()
class Migration(migrations.Migration):
dependencies = [
('modeling', '0015_remove_scenario_census'),
]
operations = [
migrations.RunPython(clear_old_scenario_results)
]
|
<commit_before><commit_msg>Remove Incompatible Results from Scenarios
This patch provides a data migratin which removes incompatible results
from scenarios created prior to
https://github.com/WikiWatershed/model-my-watershed/pull/716.
Connects #935<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_old_scenario_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
for scenario in Scenario.objects.all():
if 'pc_modified' not in scenario.results or 'pc_unmodified' not in scenario.results:
scenario.results = "[]"
scenario.modification_hash = ""
scenario.save()
class Migration(migrations.Migration):
dependencies = [
('modeling', '0015_remove_scenario_census'),
]
operations = [
migrations.RunPython(clear_old_scenario_results)
]
|
Remove Incompatible Results from Scenarios
This patch provides a data migratin which removes incompatible results
from scenarios created prior to
https://github.com/WikiWatershed/model-my-watershed/pull/716.
Connects #935# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_old_scenario_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
for scenario in Scenario.objects.all():
if 'pc_modified' not in scenario.results or 'pc_unmodified' not in scenario.results:
scenario.results = "[]"
scenario.modification_hash = ""
scenario.save()
class Migration(migrations.Migration):
dependencies = [
('modeling', '0015_remove_scenario_census'),
]
operations = [
migrations.RunPython(clear_old_scenario_results)
]
|
<commit_before><commit_msg>Remove Incompatible Results from Scenarios
This patch provides a data migratin which removes incompatible results
from scenarios created prior to
https://github.com/WikiWatershed/model-my-watershed/pull/716.
Connects #935<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_old_scenario_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
for scenario in Scenario.objects.all():
if 'pc_modified' not in scenario.results or 'pc_unmodified' not in scenario.results:
scenario.results = "[]"
scenario.modification_hash = ""
scenario.save()
class Migration(migrations.Migration):
dependencies = [
('modeling', '0015_remove_scenario_census'),
]
operations = [
migrations.RunPython(clear_old_scenario_results)
]
|
|
843f689fd76344aa6921b94576a92d4ff7bba609
|
test/load_unload/TestLoadUnload.py
|
test/load_unload/TestLoadUnload.py
|
"""
Test that breakpoint by symbol name works correctly dlopen'ing a dynamic lib.
"""
import os, time
import unittest
import lldb
import lldbtest
class TestClassTypes(lldbtest.TestBase):
mydir = "load_unload"
def test_dead_strip(self):
"""Test breakpoint by name works correctly with dlopen'ing."""
res = self.res
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Break by function name a_function (not yet loaded).
self.ci.HandleCommand("breakpoint set -n a_function", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().startswith(
"Breakpoint created: 1: name = 'a_function', locations = 0 "
"(pending)"
))
self.ci.HandleCommand("run", res)
time.sleep(0.1)
self.assertTrue(res.Succeeded())
# The stop reason of the thread should be breakpoint and at a_function.
self.ci.HandleCommand("thread list", res)
output = res.GetOutput()
self.assertTrue(res.Succeeded())
self.assertTrue(output.find('state is Stopped') > 0 and
output.find('a_function') > 0 and
output.find('a.c:14') > 0 and
output.find('stop reason = breakpoint') > 0)
# The breakpoint should have a hit count of 1.
self.ci.HandleCommand("breakpoint list", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find(' resolved, hit count = 1') > 0)
self.ci.HandleCommand("continue", res)
self.assertTrue(res.Succeeded())
# # We should stop agaian at a_function.
# # The stop reason of the thread should be breakpoint and at a_function.
# self.ci.HandleCommand("thread list", res)
# output = res.GetOutput()
# self.assertTrue(res.Succeeded())
# self.assertTrue(output.find('state is Stopped') > 0 and
# output.find('a_function') > 0 and
# output.find('a.c:14') > 0 and
# output.find('stop reason = breakpoint') > 0)
# # The breakpoint should have a hit count of 2.
# self.ci.HandleCommand("breakpoint list", res)
# self.assertTrue(res.Succeeded())
# self.assertTrue(res.GetOutput().find(' resolved, hit count = 2') > 0)
# self.ci.HandleCommand("continue", res)
# self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
unittest.main()
lldb.SBDebugger.Terminate()
|
Test that breakpoint by symbol name works correctly dlopen'ing a dynamic lib.
|
Test that breakpoint by symbol name works correctly dlopen'ing a dynamic lib.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107812 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb
|
Test that breakpoint by symbol name works correctly dlopen'ing a dynamic lib.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107812 91177308-0d34-0410-b5e6-96231b3b80d8
|
"""
Test that breakpoint by symbol name works correctly dlopen'ing a dynamic lib.
"""
import os, time
import unittest
import lldb
import lldbtest
class TestClassTypes(lldbtest.TestBase):
mydir = "load_unload"
def test_dead_strip(self):
"""Test breakpoint by name works correctly with dlopen'ing."""
res = self.res
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Break by function name a_function (not yet loaded).
self.ci.HandleCommand("breakpoint set -n a_function", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().startswith(
"Breakpoint created: 1: name = 'a_function', locations = 0 "
"(pending)"
))
self.ci.HandleCommand("run", res)
time.sleep(0.1)
self.assertTrue(res.Succeeded())
# The stop reason of the thread should be breakpoint and at a_function.
self.ci.HandleCommand("thread list", res)
output = res.GetOutput()
self.assertTrue(res.Succeeded())
self.assertTrue(output.find('state is Stopped') > 0 and
output.find('a_function') > 0 and
output.find('a.c:14') > 0 and
output.find('stop reason = breakpoint') > 0)
# The breakpoint should have a hit count of 1.
self.ci.HandleCommand("breakpoint list", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find(' resolved, hit count = 1') > 0)
self.ci.HandleCommand("continue", res)
self.assertTrue(res.Succeeded())
# # We should stop agaian at a_function.
# # The stop reason of the thread should be breakpoint and at a_function.
# self.ci.HandleCommand("thread list", res)
# output = res.GetOutput()
# self.assertTrue(res.Succeeded())
# self.assertTrue(output.find('state is Stopped') > 0 and
# output.find('a_function') > 0 and
# output.find('a.c:14') > 0 and
# output.find('stop reason = breakpoint') > 0)
# # The breakpoint should have a hit count of 2.
# self.ci.HandleCommand("breakpoint list", res)
# self.assertTrue(res.Succeeded())
# self.assertTrue(res.GetOutput().find(' resolved, hit count = 2') > 0)
# self.ci.HandleCommand("continue", res)
# self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
unittest.main()
lldb.SBDebugger.Terminate()
|
<commit_before><commit_msg>Test that breakpoint by symbol name works correctly dlopen'ing a dynamic lib.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107812 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
"""
Test that breakpoint by symbol name works correctly dlopen'ing a dynamic lib.
"""
import os, time
import unittest
import lldb
import lldbtest
class TestClassTypes(lldbtest.TestBase):
mydir = "load_unload"
def test_dead_strip(self):
"""Test breakpoint by name works correctly with dlopen'ing."""
res = self.res
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Break by function name a_function (not yet loaded).
self.ci.HandleCommand("breakpoint set -n a_function", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().startswith(
"Breakpoint created: 1: name = 'a_function', locations = 0 "
"(pending)"
))
self.ci.HandleCommand("run", res)
time.sleep(0.1)
self.assertTrue(res.Succeeded())
# The stop reason of the thread should be breakpoint and at a_function.
self.ci.HandleCommand("thread list", res)
output = res.GetOutput()
self.assertTrue(res.Succeeded())
self.assertTrue(output.find('state is Stopped') > 0 and
output.find('a_function') > 0 and
output.find('a.c:14') > 0 and
output.find('stop reason = breakpoint') > 0)
# The breakpoint should have a hit count of 1.
self.ci.HandleCommand("breakpoint list", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find(' resolved, hit count = 1') > 0)
self.ci.HandleCommand("continue", res)
self.assertTrue(res.Succeeded())
# # We should stop agaian at a_function.
# # The stop reason of the thread should be breakpoint and at a_function.
# self.ci.HandleCommand("thread list", res)
# output = res.GetOutput()
# self.assertTrue(res.Succeeded())
# self.assertTrue(output.find('state is Stopped') > 0 and
# output.find('a_function') > 0 and
# output.find('a.c:14') > 0 and
# output.find('stop reason = breakpoint') > 0)
# # The breakpoint should have a hit count of 2.
# self.ci.HandleCommand("breakpoint list", res)
# self.assertTrue(res.Succeeded())
# self.assertTrue(res.GetOutput().find(' resolved, hit count = 2') > 0)
# self.ci.HandleCommand("continue", res)
# self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
unittest.main()
lldb.SBDebugger.Terminate()
|
Test that breakpoint by symbol name works correctly dlopen'ing a dynamic lib.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107812 91177308-0d34-0410-b5e6-96231b3b80d8"""
Test that breakpoint by symbol name works correctly dlopen'ing a dynamic lib.
"""
import os, time
import unittest
import lldb
import lldbtest
class TestClassTypes(lldbtest.TestBase):
mydir = "load_unload"
def test_dead_strip(self):
"""Test breakpoint by name works correctly with dlopen'ing."""
res = self.res
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Break by function name a_function (not yet loaded).
self.ci.HandleCommand("breakpoint set -n a_function", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().startswith(
"Breakpoint created: 1: name = 'a_function', locations = 0 "
"(pending)"
))
self.ci.HandleCommand("run", res)
time.sleep(0.1)
self.assertTrue(res.Succeeded())
# The stop reason of the thread should be breakpoint and at a_function.
self.ci.HandleCommand("thread list", res)
output = res.GetOutput()
self.assertTrue(res.Succeeded())
self.assertTrue(output.find('state is Stopped') > 0 and
output.find('a_function') > 0 and
output.find('a.c:14') > 0 and
output.find('stop reason = breakpoint') > 0)
# The breakpoint should have a hit count of 1.
self.ci.HandleCommand("breakpoint list", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find(' resolved, hit count = 1') > 0)
self.ci.HandleCommand("continue", res)
self.assertTrue(res.Succeeded())
# # We should stop agaian at a_function.
# # The stop reason of the thread should be breakpoint and at a_function.
# self.ci.HandleCommand("thread list", res)
# output = res.GetOutput()
# self.assertTrue(res.Succeeded())
# self.assertTrue(output.find('state is Stopped') > 0 and
# output.find('a_function') > 0 and
# output.find('a.c:14') > 0 and
# output.find('stop reason = breakpoint') > 0)
# # The breakpoint should have a hit count of 2.
# self.ci.HandleCommand("breakpoint list", res)
# self.assertTrue(res.Succeeded())
# self.assertTrue(res.GetOutput().find(' resolved, hit count = 2') > 0)
# self.ci.HandleCommand("continue", res)
# self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
unittest.main()
lldb.SBDebugger.Terminate()
|
<commit_before><commit_msg>Test that breakpoint by symbol name works correctly dlopen'ing a dynamic lib.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@107812 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>"""
Test that breakpoint by symbol name works correctly dlopen'ing a dynamic lib.
"""
import os, time
import unittest
import lldb
import lldbtest
class TestClassTypes(lldbtest.TestBase):
mydir = "load_unload"
def test_dead_strip(self):
"""Test breakpoint by name works correctly with dlopen'ing."""
res = self.res
exe = os.path.join(os.getcwd(), "a.out")
self.ci.HandleCommand("file " + exe, res)
self.assertTrue(res.Succeeded())
# Break by function name a_function (not yet loaded).
self.ci.HandleCommand("breakpoint set -n a_function", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().startswith(
"Breakpoint created: 1: name = 'a_function', locations = 0 "
"(pending)"
))
self.ci.HandleCommand("run", res)
time.sleep(0.1)
self.assertTrue(res.Succeeded())
# The stop reason of the thread should be breakpoint and at a_function.
self.ci.HandleCommand("thread list", res)
output = res.GetOutput()
self.assertTrue(res.Succeeded())
self.assertTrue(output.find('state is Stopped') > 0 and
output.find('a_function') > 0 and
output.find('a.c:14') > 0 and
output.find('stop reason = breakpoint') > 0)
# The breakpoint should have a hit count of 1.
self.ci.HandleCommand("breakpoint list", res)
self.assertTrue(res.Succeeded())
self.assertTrue(res.GetOutput().find(' resolved, hit count = 1') > 0)
self.ci.HandleCommand("continue", res)
self.assertTrue(res.Succeeded())
# # We should stop agaian at a_function.
# # The stop reason of the thread should be breakpoint and at a_function.
# self.ci.HandleCommand("thread list", res)
# output = res.GetOutput()
# self.assertTrue(res.Succeeded())
# self.assertTrue(output.find('state is Stopped') > 0 and
# output.find('a_function') > 0 and
# output.find('a.c:14') > 0 and
# output.find('stop reason = breakpoint') > 0)
# # The breakpoint should have a hit count of 2.
# self.ci.HandleCommand("breakpoint list", res)
# self.assertTrue(res.Succeeded())
# self.assertTrue(res.GetOutput().find(' resolved, hit count = 2') > 0)
# self.ci.HandleCommand("continue", res)
# self.assertTrue(res.Succeeded())
if __name__ == '__main__':
lldb.SBDebugger.Initialize()
unittest.main()
lldb.SBDebugger.Terminate()
|
|
32779ab1f6c2658771e69e58f8d8a451b23043b0
|
var/spack/repos/builtin/packages/r-filehash/package.py
|
var/spack/repos/builtin/packages/r-filehash/package.py
|
from spack import *
class RFilehash(Package):
"""Implements a simple key-value style database where character string keys are associated with data values that are stored on the disk. A simple interface is provided for inserting, retrieving, and deleting data from the database. Utilities are provided that allow 'filehash' databases to be treated much like environments and lists are already used in R. These utilities are provided to encourage interactive and exploratory analysis on large datasets. Three different file formats for representing the database are currently available and new formats can easily be incorporated by third parties for use in the 'filehash' framework."""
homepage = 'https://cran.r-project.org/'
url = "https://cran.r-project.org/src/contrib/filehash_2.3.tar.gz"
version('2.3', '01fffafe09b148ccadc9814c103bdc2f', expand=False)
extends('R')
def install(self, spec, prefix):
R('CMD', 'INSTALL', '--library=%s' % self.module.r_lib_dir, '%s' % self.stage.archive_file)
|
Add r-filehash to test version naming.
|
Add r-filehash to test version naming.
|
Python
|
lgpl-2.1
|
krafczyk/spack,mfherbst/spack,TheTimmy/spack,TheTimmy/spack,iulian787/spack,skosukhin/spack,krafczyk/spack,TheTimmy/spack,LLNL/spack,iulian787/spack,mfherbst/spack,lgarren/spack,skosukhin/spack,iulian787/spack,matthiasdiener/spack,lgarren/spack,matthiasdiener/spack,LLNL/spack,lgarren/spack,matthiasdiener/spack,krafczyk/spack,lgarren/spack,tmerrick1/spack,LLNL/spack,lgarren/spack,skosukhin/spack,tmerrick1/spack,LLNL/spack,krafczyk/spack,tmerrick1/spack,EmreAtes/spack,TheTimmy/spack,LLNL/spack,TheTimmy/spack,matthiasdiener/spack,EmreAtes/spack,tmerrick1/spack,matthiasdiener/spack,mfherbst/spack,krafczyk/spack,skosukhin/spack,EmreAtes/spack,mfherbst/spack,EmreAtes/spack,tmerrick1/spack,iulian787/spack,skosukhin/spack,mfherbst/spack,EmreAtes/spack,iulian787/spack
|
Add r-filehash to test version naming.
|
from spack import *
class RFilehash(Package):
"""Implements a simple key-value style database where character string keys are associated with data values that are stored on the disk. A simple interface is provided for inserting, retrieving, and deleting data from the database. Utilities are provided that allow 'filehash' databases to be treated much like environments and lists are already used in R. These utilities are provided to encourage interactive and exploratory analysis on large datasets. Three different file formats for representing the database are currently available and new formats can easily be incorporated by third parties for use in the 'filehash' framework."""
homepage = 'https://cran.r-project.org/'
url = "https://cran.r-project.org/src/contrib/filehash_2.3.tar.gz"
version('2.3', '01fffafe09b148ccadc9814c103bdc2f', expand=False)
extends('R')
def install(self, spec, prefix):
R('CMD', 'INSTALL', '--library=%s' % self.module.r_lib_dir, '%s' % self.stage.archive_file)
|
<commit_before><commit_msg>Add r-filehash to test version naming.<commit_after>
|
from spack import *
class RFilehash(Package):
"""Implements a simple key-value style database where character string keys are associated with data values that are stored on the disk. A simple interface is provided for inserting, retrieving, and deleting data from the database. Utilities are provided that allow 'filehash' databases to be treated much like environments and lists are already used in R. These utilities are provided to encourage interactive and exploratory analysis on large datasets. Three different file formats for representing the database are currently available and new formats can easily be incorporated by third parties for use in the 'filehash' framework."""
homepage = 'https://cran.r-project.org/'
url = "https://cran.r-project.org/src/contrib/filehash_2.3.tar.gz"
version('2.3', '01fffafe09b148ccadc9814c103bdc2f', expand=False)
extends('R')
def install(self, spec, prefix):
R('CMD', 'INSTALL', '--library=%s' % self.module.r_lib_dir, '%s' % self.stage.archive_file)
|
Add r-filehash to test version naming.from spack import *
class RFilehash(Package):
"""Implements a simple key-value style database where character string keys are associated with data values that are stored on the disk. A simple interface is provided for inserting, retrieving, and deleting data from the database. Utilities are provided that allow 'filehash' databases to be treated much like environments and lists are already used in R. These utilities are provided to encourage interactive and exploratory analysis on large datasets. Three different file formats for representing the database are currently available and new formats can easily be incorporated by third parties for use in the 'filehash' framework."""
homepage = 'https://cran.r-project.org/'
url = "https://cran.r-project.org/src/contrib/filehash_2.3.tar.gz"
version('2.3', '01fffafe09b148ccadc9814c103bdc2f', expand=False)
extends('R')
def install(self, spec, prefix):
R('CMD', 'INSTALL', '--library=%s' % self.module.r_lib_dir, '%s' % self.stage.archive_file)
|
<commit_before><commit_msg>Add r-filehash to test version naming.<commit_after>from spack import *
class RFilehash(Package):
"""Implements a simple key-value style database where character string keys are associated with data values that are stored on the disk. A simple interface is provided for inserting, retrieving, and deleting data from the database. Utilities are provided that allow 'filehash' databases to be treated much like environments and lists are already used in R. These utilities are provided to encourage interactive and exploratory analysis on large datasets. Three different file formats for representing the database are currently available and new formats can easily be incorporated by third parties for use in the 'filehash' framework."""
homepage = 'https://cran.r-project.org/'
url = "https://cran.r-project.org/src/contrib/filehash_2.3.tar.gz"
version('2.3', '01fffafe09b148ccadc9814c103bdc2f', expand=False)
extends('R')
def install(self, spec, prefix):
R('CMD', 'INSTALL', '--library=%s' % self.module.r_lib_dir, '%s' % self.stage.archive_file)
|
|
463f907eb2b8c4cb1cebfed44ec8da0f76c4f7bb
|
cabby/data/metagraph/utils.py
|
cabby/data/metagraph/utils.py
|
# coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Utils for metagraph construction'''
import networkx as nx
import pandas as pd
def convert_pandas_df_to_metagraph(df, source_column, target_column,
source_metadata_columns, target_metadata_columns,
edge_attribute_columns):
'''Convert a pandas dataframe to a networkx multigraph
Arguments:
df: pandas dataframe
source_column: column in df to use as source node label
target_column: column in df to use as target node label
source_metadata_columns: for source nodes, dict from column name to
metadata field name
target_metadata_columns: for target nodes, dict from column name to
metadata field name
edge_attribute_columns: list of columns in df to use as edge attributes
Returns:
g: networkx Graph holding all graph data.
'''
g = nx.convert_matrix.from_pandas_edgelist(df,
source=source_column,
target=target_column,
edge_attr=edge_attribute_columns,
create_using=nx.classes.multidigraph.MultiDiGraph)
for metadata_column, metadata_name in source_metadata_columns.items():
attribute_dict = dict(zip(df[source_column], df[metadata_column]))
nx.set_node_attributes(g, attribute_dict, metadata_name)
for metadata_column, metadata_name in source_metadata_columns.items():
attribute_dict = dict(zip(df[target_column], df[metadata_column]))
nx.set_node_attributes(g, attribute_dict, metadata_name)
return g
|
Add KG graph building util.
|
Add KG graph building util.
|
Python
|
apache-2.0
|
googleinterns/cabby,googleinterns/cabby,googleinterns/cabby,googleinterns/cabby
|
Add KG graph building util.
|
# coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Utils for metagraph construction'''
import networkx as nx
import pandas as pd
def convert_pandas_df_to_metagraph(df, source_column, target_column,
source_metadata_columns, target_metadata_columns,
edge_attribute_columns):
'''Convert a pandas dataframe to a networkx multigraph
Arguments:
df: pandas dataframe
source_column: column in df to use as source node label
target_column: column in df to use as target node label
source_metadata_columns: for source nodes, dict from column name to
metadata field name
target_metadata_columns: for target nodes, dict from column name to
metadata field name
edge_attribute_columns: list of columns in df to use as edge attributes
Returns:
g: networkx Graph holding all graph data.
'''
g = nx.convert_matrix.from_pandas_edgelist(df,
source=source_column,
target=target_column,
edge_attr=edge_attribute_columns,
create_using=nx.classes.multidigraph.MultiDiGraph)
for metadata_column, metadata_name in source_metadata_columns.items():
attribute_dict = dict(zip(df[source_column], df[metadata_column]))
nx.set_node_attributes(g, attribute_dict, metadata_name)
for metadata_column, metadata_name in source_metadata_columns.items():
attribute_dict = dict(zip(df[target_column], df[metadata_column]))
nx.set_node_attributes(g, attribute_dict, metadata_name)
return g
|
<commit_before><commit_msg>Add KG graph building util.<commit_after>
|
# coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Utils for metagraph construction'''
import networkx as nx
import pandas as pd
def convert_pandas_df_to_metagraph(df, source_column, target_column,
source_metadata_columns, target_metadata_columns,
edge_attribute_columns):
'''Convert a pandas dataframe to a networkx multigraph
Arguments:
df: pandas dataframe
source_column: column in df to use as source node label
target_column: column in df to use as target node label
source_metadata_columns: for source nodes, dict from column name to
metadata field name
target_metadata_columns: for target nodes, dict from column name to
metadata field name
edge_attribute_columns: list of columns in df to use as edge attributes
Returns:
g: networkx Graph holding all graph data.
'''
g = nx.convert_matrix.from_pandas_edgelist(df,
source=source_column,
target=target_column,
edge_attr=edge_attribute_columns,
create_using=nx.classes.multidigraph.MultiDiGraph)
for metadata_column, metadata_name in source_metadata_columns.items():
attribute_dict = dict(zip(df[source_column], df[metadata_column]))
nx.set_node_attributes(g, attribute_dict, metadata_name)
for metadata_column, metadata_name in source_metadata_columns.items():
attribute_dict = dict(zip(df[target_column], df[metadata_column]))
nx.set_node_attributes(g, attribute_dict, metadata_name)
return g
|
Add KG graph building util.# coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Utils for metagraph construction'''
import networkx as nx
import pandas as pd
def convert_pandas_df_to_metagraph(df, source_column, target_column,
source_metadata_columns, target_metadata_columns,
edge_attribute_columns):
'''Convert a pandas dataframe to a networkx multigraph
Arguments:
df: pandas dataframe
source_column: column in df to use as source node label
target_column: column in df to use as target node label
source_metadata_columns: for source nodes, dict from column name to
metadata field name
target_metadata_columns: for target nodes, dict from column name to
metadata field name
edge_attribute_columns: list of columns in df to use as edge attributes
Returns:
g: networkx Graph holding all graph data.
'''
g = nx.convert_matrix.from_pandas_edgelist(df,
source=source_column,
target=target_column,
edge_attr=edge_attribute_columns,
create_using=nx.classes.multidigraph.MultiDiGraph)
for metadata_column, metadata_name in source_metadata_columns.items():
attribute_dict = dict(zip(df[source_column], df[metadata_column]))
nx.set_node_attributes(g, attribute_dict, metadata_name)
for metadata_column, metadata_name in source_metadata_columns.items():
attribute_dict = dict(zip(df[target_column], df[metadata_column]))
nx.set_node_attributes(g, attribute_dict, metadata_name)
return g
|
<commit_before><commit_msg>Add KG graph building util.<commit_after># coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Utils for metagraph construction'''
import networkx as nx
import pandas as pd
def convert_pandas_df_to_metagraph(df, source_column, target_column,
source_metadata_columns, target_metadata_columns,
edge_attribute_columns):
'''Convert a pandas dataframe to a networkx multigraph
Arguments:
df: pandas dataframe
source_column: column in df to use as source node label
target_column: column in df to use as target node label
source_metadata_columns: for source nodes, dict from column name to
metadata field name
target_metadata_columns: for target nodes, dict from column name to
metadata field name
edge_attribute_columns: list of columns in df to use as edge attributes
Returns:
g: networkx Graph holding all graph data.
'''
g = nx.convert_matrix.from_pandas_edgelist(df,
source=source_column,
target=target_column,
edge_attr=edge_attribute_columns,
create_using=nx.classes.multidigraph.MultiDiGraph)
for metadata_column, metadata_name in source_metadata_columns.items():
attribute_dict = dict(zip(df[source_column], df[metadata_column]))
nx.set_node_attributes(g, attribute_dict, metadata_name)
for metadata_column, metadata_name in source_metadata_columns.items():
attribute_dict = dict(zip(df[target_column], df[metadata_column]))
nx.set_node_attributes(g, attribute_dict, metadata_name)
return g
|
|
e10e890ceadf166db76d92f5d6407683d1c30ba2
|
jarbas/core/tests/test_loadsuppliers_command.py
|
jarbas/core/tests/test_loadsuppliers_command.py
|
from datetime import datetime
from django.test import TestCase
from jarbas.core.management.commands.loadsuppliers import Command
class TestStaticMethods(TestCase):
def setUp(self):
self.command = Command()
def test_to_float(self):
self.assertEqual(self.command.to_float(1), 1.0)
self.assertEqual(self.command.to_float('abc'), None)
def test_to_email(self):
expected = 'jane@example.com'
self.assertEqual(self.command.to_email('abc'), None)
self.assertEqual(self.command.to_email('jane@example.com'), expected)
def test_get_file_name(self):
expected = '1970-01-01-ahoy.xz'
with self.settings(AMAZON_S3_SUPPLIERS_DATE='1970-01-01'):
self.assertEqual(self.command.get_file_name('ahoy'), expected)
def test_to_date(self):
expected = '1991-07-22'
self.assertEqual(self.command.to_date('22/7/91'), expected)
self.assertEqual(self.command.to_date('22/13/91'), None)
self.assertEqual(self.command.to_date('aa/7/91'), None)
|
Add tests for loadsuppliers statics methods
|
Add tests for loadsuppliers statics methods
|
Python
|
mit
|
Guilhermeslucas/jarbas,datasciencebr/serenata-de-amor,datasciencebr/jarbas,Guilhermeslucas/jarbas,datasciencebr/jarbas,marcusrehm/serenata-de-amor,marcusrehm/serenata-de-amor,datasciencebr/serenata-de-amor,datasciencebr/jarbas,rogeriochaves/jarbas,rogeriochaves/jarbas,rogeriochaves/jarbas,marcusrehm/serenata-de-amor,Guilhermeslucas/jarbas,datasciencebr/jarbas,Guilhermeslucas/jarbas,marcusrehm/serenata-de-amor,rogeriochaves/jarbas
|
Add tests for loadsuppliers statics methods
|
from datetime import datetime
from django.test import TestCase
from jarbas.core.management.commands.loadsuppliers import Command
class TestStaticMethods(TestCase):
def setUp(self):
self.command = Command()
def test_to_float(self):
self.assertEqual(self.command.to_float(1), 1.0)
self.assertEqual(self.command.to_float('abc'), None)
def test_to_email(self):
expected = 'jane@example.com'
self.assertEqual(self.command.to_email('abc'), None)
self.assertEqual(self.command.to_email('jane@example.com'), expected)
def test_get_file_name(self):
expected = '1970-01-01-ahoy.xz'
with self.settings(AMAZON_S3_SUPPLIERS_DATE='1970-01-01'):
self.assertEqual(self.command.get_file_name('ahoy'), expected)
def test_to_date(self):
expected = '1991-07-22'
self.assertEqual(self.command.to_date('22/7/91'), expected)
self.assertEqual(self.command.to_date('22/13/91'), None)
self.assertEqual(self.command.to_date('aa/7/91'), None)
|
<commit_before><commit_msg>Add tests for loadsuppliers statics methods<commit_after>
|
from datetime import datetime
from django.test import TestCase
from jarbas.core.management.commands.loadsuppliers import Command
class TestStaticMethods(TestCase):
def setUp(self):
self.command = Command()
def test_to_float(self):
self.assertEqual(self.command.to_float(1), 1.0)
self.assertEqual(self.command.to_float('abc'), None)
def test_to_email(self):
expected = 'jane@example.com'
self.assertEqual(self.command.to_email('abc'), None)
self.assertEqual(self.command.to_email('jane@example.com'), expected)
def test_get_file_name(self):
expected = '1970-01-01-ahoy.xz'
with self.settings(AMAZON_S3_SUPPLIERS_DATE='1970-01-01'):
self.assertEqual(self.command.get_file_name('ahoy'), expected)
def test_to_date(self):
expected = '1991-07-22'
self.assertEqual(self.command.to_date('22/7/91'), expected)
self.assertEqual(self.command.to_date('22/13/91'), None)
self.assertEqual(self.command.to_date('aa/7/91'), None)
|
Add tests for loadsuppliers statics methodsfrom datetime import datetime
from django.test import TestCase
from jarbas.core.management.commands.loadsuppliers import Command
class TestStaticMethods(TestCase):
def setUp(self):
self.command = Command()
def test_to_float(self):
self.assertEqual(self.command.to_float(1), 1.0)
self.assertEqual(self.command.to_float('abc'), None)
def test_to_email(self):
expected = 'jane@example.com'
self.assertEqual(self.command.to_email('abc'), None)
self.assertEqual(self.command.to_email('jane@example.com'), expected)
def test_get_file_name(self):
expected = '1970-01-01-ahoy.xz'
with self.settings(AMAZON_S3_SUPPLIERS_DATE='1970-01-01'):
self.assertEqual(self.command.get_file_name('ahoy'), expected)
def test_to_date(self):
expected = '1991-07-22'
self.assertEqual(self.command.to_date('22/7/91'), expected)
self.assertEqual(self.command.to_date('22/13/91'), None)
self.assertEqual(self.command.to_date('aa/7/91'), None)
|
<commit_before><commit_msg>Add tests for loadsuppliers statics methods<commit_after>from datetime import datetime
from django.test import TestCase
from jarbas.core.management.commands.loadsuppliers import Command
class TestStaticMethods(TestCase):
def setUp(self):
self.command = Command()
def test_to_float(self):
self.assertEqual(self.command.to_float(1), 1.0)
self.assertEqual(self.command.to_float('abc'), None)
def test_to_email(self):
expected = 'jane@example.com'
self.assertEqual(self.command.to_email('abc'), None)
self.assertEqual(self.command.to_email('jane@example.com'), expected)
def test_get_file_name(self):
expected = '1970-01-01-ahoy.xz'
with self.settings(AMAZON_S3_SUPPLIERS_DATE='1970-01-01'):
self.assertEqual(self.command.get_file_name('ahoy'), expected)
def test_to_date(self):
expected = '1991-07-22'
self.assertEqual(self.command.to_date('22/7/91'), expected)
self.assertEqual(self.command.to_date('22/13/91'), None)
self.assertEqual(self.command.to_date('aa/7/91'), None)
|
|
18982181574bc69457d792848c028c3efd09b543
|
tests/core/test_decorator.py
|
tests/core/test_decorator.py
|
from fastats.core.decorator import fs
def square(x):
return x * x
@fs
def cube(x):
return x * x * x
@fs
def func(x):
a = square(x)
return a / 2
def test_fs_decorated_functions_as_kwargs_to_another():
assert square(2) == 4.0
assert square(3) == 9.0
assert cube(3) == 27.0
assert cube(4) == 64.0
assert func(6) == 18.0
assert func(4) == 8.0
assert func(6, square=cube) == 108.0
assert func(4, square=cube) == 32.0
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
Add test for decorated kwargs to decorated function
|
Add test for decorated kwargs to decorated function
|
Python
|
mit
|
fastats/fastats,dwillmer/fastats
|
Add test for decorated kwargs to decorated function
|
from fastats.core.decorator import fs
def square(x):
return x * x
@fs
def cube(x):
return x * x * x
@fs
def func(x):
a = square(x)
return a / 2
def test_fs_decorated_functions_as_kwargs_to_another():
assert square(2) == 4.0
assert square(3) == 9.0
assert cube(3) == 27.0
assert cube(4) == 64.0
assert func(6) == 18.0
assert func(4) == 8.0
assert func(6, square=cube) == 108.0
assert func(4, square=cube) == 32.0
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
<commit_before><commit_msg>Add test for decorated kwargs to decorated function<commit_after>
|
from fastats.core.decorator import fs
def square(x):
return x * x
@fs
def cube(x):
return x * x * x
@fs
def func(x):
a = square(x)
return a / 2
def test_fs_decorated_functions_as_kwargs_to_another():
assert square(2) == 4.0
assert square(3) == 9.0
assert cube(3) == 27.0
assert cube(4) == 64.0
assert func(6) == 18.0
assert func(4) == 8.0
assert func(6, square=cube) == 108.0
assert func(4, square=cube) == 32.0
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
Add test for decorated kwargs to decorated function
from fastats.core.decorator import fs
def square(x):
return x * x
@fs
def cube(x):
return x * x * x
@fs
def func(x):
a = square(x)
return a / 2
def test_fs_decorated_functions_as_kwargs_to_another():
assert square(2) == 4.0
assert square(3) == 9.0
assert cube(3) == 27.0
assert cube(4) == 64.0
assert func(6) == 18.0
assert func(4) == 8.0
assert func(6, square=cube) == 108.0
assert func(4, square=cube) == 32.0
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
<commit_before><commit_msg>Add test for decorated kwargs to decorated function<commit_after>
from fastats.core.decorator import fs
def square(x):
return x * x
@fs
def cube(x):
return x * x * x
@fs
def func(x):
a = square(x)
return a / 2
def test_fs_decorated_functions_as_kwargs_to_another():
assert square(2) == 4.0
assert square(3) == 9.0
assert cube(3) == 27.0
assert cube(4) == 64.0
assert func(6) == 18.0
assert func(4) == 8.0
assert func(6, square=cube) == 108.0
assert func(4, square=cube) == 32.0
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
|
68dd135e43e78968796b9967f83361b5225961c0
|
l10n_br_sale/models/sale-l10n_br_sale_service.py
|
l10n_br_sale/models/sale-l10n_br_sale_service.py
|
# Copyright (C) 2014 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import _, api, models
from openerp.exceptions import Warning as UserError
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.model
def _prepare_invoice(self, order, lines):
result = super(SaleOrder, self)._prepare_invoice(order, lines)
result["fiscal_type"] = self.env.context.get("fiscal_type")
return result
@api.model
def _make_invoice(self, order, lines):
context = dict(self.env.context)
obj_invoice_line = self.env["account.invoice.line"]
lines_service = []
lines_product = []
inv_id_product = 0
inv_id_service = 0
def call_make_invoice(self, lines):
self = self.with_context(context)
return super(SaleOrder, self)._make_invoice(order, lines)
if not order.fiscal_category_id.property_journal:
raise UserError(
_("Error !"),
_(
"There is no journal defined for this company in Fiscal "
"Category: %s Company: %s"
)
% (order.fiscal_category_id.name, order.company_id.name),
)
for inv_line in obj_invoice_line.browse(lines):
if inv_line.product_id.fiscal_type == "service":
lines_service.append(inv_line.id)
elif inv_line.product_id.fiscal_type == "product":
lines_product.append(inv_line.id)
if lines_product:
context["fiscal_type"] = "product"
inv_id_product = call_make_invoice(self, lines_product)
if lines_service:
context["fiscal_type"] = "service"
inv_id_service = call_make_invoice(self, lines_service)
if inv_id_product and inv_id_service:
self._cr.execute(
"insert into sale_order_invoice_rel "
"(order_id,invoice_id) values (%s,%s)",
(order.id, inv_id_service),
)
inv_id = inv_id_product or inv_id_service
return inv_id
|
Move models from l10n_br_sale_service to l10n_br_sale.
|
[12.0][MIG][WIP] Move models from l10n_br_sale_service to l10n_br_sale.
|
Python
|
agpl-3.0
|
OCA/l10n-brazil,akretion/l10n-brazil,akretion/l10n-brazil,akretion/l10n-brazil,OCA/l10n-brazil,OCA/l10n-brazil
|
[12.0][MIG][WIP] Move models from l10n_br_sale_service to l10n_br_sale.
|
# Copyright (C) 2014 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import _, api, models
from openerp.exceptions import Warning as UserError
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.model
def _prepare_invoice(self, order, lines):
result = super(SaleOrder, self)._prepare_invoice(order, lines)
result["fiscal_type"] = self.env.context.get("fiscal_type")
return result
@api.model
def _make_invoice(self, order, lines):
context = dict(self.env.context)
obj_invoice_line = self.env["account.invoice.line"]
lines_service = []
lines_product = []
inv_id_product = 0
inv_id_service = 0
def call_make_invoice(self, lines):
self = self.with_context(context)
return super(SaleOrder, self)._make_invoice(order, lines)
if not order.fiscal_category_id.property_journal:
raise UserError(
_("Error !"),
_(
"There is no journal defined for this company in Fiscal "
"Category: %s Company: %s"
)
% (order.fiscal_category_id.name, order.company_id.name),
)
for inv_line in obj_invoice_line.browse(lines):
if inv_line.product_id.fiscal_type == "service":
lines_service.append(inv_line.id)
elif inv_line.product_id.fiscal_type == "product":
lines_product.append(inv_line.id)
if lines_product:
context["fiscal_type"] = "product"
inv_id_product = call_make_invoice(self, lines_product)
if lines_service:
context["fiscal_type"] = "service"
inv_id_service = call_make_invoice(self, lines_service)
if inv_id_product and inv_id_service:
self._cr.execute(
"insert into sale_order_invoice_rel "
"(order_id,invoice_id) values (%s,%s)",
(order.id, inv_id_service),
)
inv_id = inv_id_product or inv_id_service
return inv_id
|
<commit_before><commit_msg>[12.0][MIG][WIP] Move models from l10n_br_sale_service to l10n_br_sale.<commit_after>
|
# Copyright (C) 2014 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import _, api, models
from openerp.exceptions import Warning as UserError
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.model
def _prepare_invoice(self, order, lines):
result = super(SaleOrder, self)._prepare_invoice(order, lines)
result["fiscal_type"] = self.env.context.get("fiscal_type")
return result
@api.model
def _make_invoice(self, order, lines):
context = dict(self.env.context)
obj_invoice_line = self.env["account.invoice.line"]
lines_service = []
lines_product = []
inv_id_product = 0
inv_id_service = 0
def call_make_invoice(self, lines):
self = self.with_context(context)
return super(SaleOrder, self)._make_invoice(order, lines)
if not order.fiscal_category_id.property_journal:
raise UserError(
_("Error !"),
_(
"There is no journal defined for this company in Fiscal "
"Category: %s Company: %s"
)
% (order.fiscal_category_id.name, order.company_id.name),
)
for inv_line in obj_invoice_line.browse(lines):
if inv_line.product_id.fiscal_type == "service":
lines_service.append(inv_line.id)
elif inv_line.product_id.fiscal_type == "product":
lines_product.append(inv_line.id)
if lines_product:
context["fiscal_type"] = "product"
inv_id_product = call_make_invoice(self, lines_product)
if lines_service:
context["fiscal_type"] = "service"
inv_id_service = call_make_invoice(self, lines_service)
if inv_id_product and inv_id_service:
self._cr.execute(
"insert into sale_order_invoice_rel "
"(order_id,invoice_id) values (%s,%s)",
(order.id, inv_id_service),
)
inv_id = inv_id_product or inv_id_service
return inv_id
|
[12.0][MIG][WIP] Move models from l10n_br_sale_service to l10n_br_sale.# Copyright (C) 2014 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import _, api, models
from openerp.exceptions import Warning as UserError
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.model
def _prepare_invoice(self, order, lines):
result = super(SaleOrder, self)._prepare_invoice(order, lines)
result["fiscal_type"] = self.env.context.get("fiscal_type")
return result
@api.model
def _make_invoice(self, order, lines):
context = dict(self.env.context)
obj_invoice_line = self.env["account.invoice.line"]
lines_service = []
lines_product = []
inv_id_product = 0
inv_id_service = 0
def call_make_invoice(self, lines):
self = self.with_context(context)
return super(SaleOrder, self)._make_invoice(order, lines)
if not order.fiscal_category_id.property_journal:
raise UserError(
_("Error !"),
_(
"There is no journal defined for this company in Fiscal "
"Category: %s Company: %s"
)
% (order.fiscal_category_id.name, order.company_id.name),
)
for inv_line in obj_invoice_line.browse(lines):
if inv_line.product_id.fiscal_type == "service":
lines_service.append(inv_line.id)
elif inv_line.product_id.fiscal_type == "product":
lines_product.append(inv_line.id)
if lines_product:
context["fiscal_type"] = "product"
inv_id_product = call_make_invoice(self, lines_product)
if lines_service:
context["fiscal_type"] = "service"
inv_id_service = call_make_invoice(self, lines_service)
if inv_id_product and inv_id_service:
self._cr.execute(
"insert into sale_order_invoice_rel "
"(order_id,invoice_id) values (%s,%s)",
(order.id, inv_id_service),
)
inv_id = inv_id_product or inv_id_service
return inv_id
|
<commit_before><commit_msg>[12.0][MIG][WIP] Move models from l10n_br_sale_service to l10n_br_sale.<commit_after># Copyright (C) 2014 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import _, api, models
from openerp.exceptions import Warning as UserError
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.model
def _prepare_invoice(self, order, lines):
result = super(SaleOrder, self)._prepare_invoice(order, lines)
result["fiscal_type"] = self.env.context.get("fiscal_type")
return result
@api.model
def _make_invoice(self, order, lines):
context = dict(self.env.context)
obj_invoice_line = self.env["account.invoice.line"]
lines_service = []
lines_product = []
inv_id_product = 0
inv_id_service = 0
def call_make_invoice(self, lines):
self = self.with_context(context)
return super(SaleOrder, self)._make_invoice(order, lines)
if not order.fiscal_category_id.property_journal:
raise UserError(
_("Error !"),
_(
"There is no journal defined for this company in Fiscal "
"Category: %s Company: %s"
)
% (order.fiscal_category_id.name, order.company_id.name),
)
for inv_line in obj_invoice_line.browse(lines):
if inv_line.product_id.fiscal_type == "service":
lines_service.append(inv_line.id)
elif inv_line.product_id.fiscal_type == "product":
lines_product.append(inv_line.id)
if lines_product:
context["fiscal_type"] = "product"
inv_id_product = call_make_invoice(self, lines_product)
if lines_service:
context["fiscal_type"] = "service"
inv_id_service = call_make_invoice(self, lines_service)
if inv_id_product and inv_id_service:
self._cr.execute(
"insert into sale_order_invoice_rel "
"(order_id,invoice_id) values (%s,%s)",
(order.id, inv_id_service),
)
inv_id = inv_id_product or inv_id_service
return inv_id
|
|
aadbb5bc69aff6cc1bdffbc85da6499ab16f3e2c
|
tests/test_assembly_graph.py
|
tests/test_assembly_graph.py
|
import pytest # noqa
from phasm.assembly_graph import (AssemblyGraph, remove_tips, node_path_edges,
remove_transitive_edges, clean_graph)
def test_tip_removal():
g = AssemblyGraph()
g.add_edges_from(node_path_edges(['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7',
'v8', 'v9']))
g.add_edges_from([
('v2', 'vt1'),
('vt1', 'vt2'),
('vt2', 'vt3')
])
num_tip_edges = remove_tips(g)
num_isolated_nodes = clean_graph(g)
edges = set(g.edges())
assert ('v2', 'vt1') not in edges
assert ('vt1', 'vt2') not in edges
assert ('vt2', 'vt3') not in edges
assert ('v8', 'v9') in edges
assert num_tip_edges == 3
assert num_isolated_nodes == 3
def test_transitive_reduction():
g = AssemblyGraph()
g.add_edges_from(node_path_edges(['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7',
'v8', 'v9', 'v10', 'v11', 'v12']))
for u, v, data in g.edges_iter(data=True):
data['weight'] = 1
g.add_edges_from([
('v2', 'v4', {'weight': 3}),
('v8', 'v11', {'weight': 5})
])
edges_to_remove = remove_transitive_edges(g)
g.remove_edges_from(edges_to_remove)
edges = set(g.edges())
assert ('v2', 'v4') not in edges
assert ('v8', 'v11') in edges
assert len(edges_to_remove) == 1
|
Add tests for a few assembly graph operations
|
Add tests for a few assembly graph operations
|
Python
|
mit
|
AbeelLab/phasm,AbeelLab/phasm
|
Add tests for a few assembly graph operations
|
import pytest # noqa
from phasm.assembly_graph import (AssemblyGraph, remove_tips, node_path_edges,
remove_transitive_edges, clean_graph)
def test_tip_removal():
g = AssemblyGraph()
g.add_edges_from(node_path_edges(['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7',
'v8', 'v9']))
g.add_edges_from([
('v2', 'vt1'),
('vt1', 'vt2'),
('vt2', 'vt3')
])
num_tip_edges = remove_tips(g)
num_isolated_nodes = clean_graph(g)
edges = set(g.edges())
assert ('v2', 'vt1') not in edges
assert ('vt1', 'vt2') not in edges
assert ('vt2', 'vt3') not in edges
assert ('v8', 'v9') in edges
assert num_tip_edges == 3
assert num_isolated_nodes == 3
def test_transitive_reduction():
g = AssemblyGraph()
g.add_edges_from(node_path_edges(['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7',
'v8', 'v9', 'v10', 'v11', 'v12']))
for u, v, data in g.edges_iter(data=True):
data['weight'] = 1
g.add_edges_from([
('v2', 'v4', {'weight': 3}),
('v8', 'v11', {'weight': 5})
])
edges_to_remove = remove_transitive_edges(g)
g.remove_edges_from(edges_to_remove)
edges = set(g.edges())
assert ('v2', 'v4') not in edges
assert ('v8', 'v11') in edges
assert len(edges_to_remove) == 1
|
<commit_before><commit_msg>Add tests for a few assembly graph operations<commit_after>
|
import pytest # noqa
from phasm.assembly_graph import (AssemblyGraph, remove_tips, node_path_edges,
remove_transitive_edges, clean_graph)
def test_tip_removal():
g = AssemblyGraph()
g.add_edges_from(node_path_edges(['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7',
'v8', 'v9']))
g.add_edges_from([
('v2', 'vt1'),
('vt1', 'vt2'),
('vt2', 'vt3')
])
num_tip_edges = remove_tips(g)
num_isolated_nodes = clean_graph(g)
edges = set(g.edges())
assert ('v2', 'vt1') not in edges
assert ('vt1', 'vt2') not in edges
assert ('vt2', 'vt3') not in edges
assert ('v8', 'v9') in edges
assert num_tip_edges == 3
assert num_isolated_nodes == 3
def test_transitive_reduction():
g = AssemblyGraph()
g.add_edges_from(node_path_edges(['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7',
'v8', 'v9', 'v10', 'v11', 'v12']))
for u, v, data in g.edges_iter(data=True):
data['weight'] = 1
g.add_edges_from([
('v2', 'v4', {'weight': 3}),
('v8', 'v11', {'weight': 5})
])
edges_to_remove = remove_transitive_edges(g)
g.remove_edges_from(edges_to_remove)
edges = set(g.edges())
assert ('v2', 'v4') not in edges
assert ('v8', 'v11') in edges
assert len(edges_to_remove) == 1
|
Add tests for a few assembly graph operationsimport pytest # noqa
from phasm.assembly_graph import (AssemblyGraph, remove_tips, node_path_edges,
remove_transitive_edges, clean_graph)
def test_tip_removal():
g = AssemblyGraph()
g.add_edges_from(node_path_edges(['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7',
'v8', 'v9']))
g.add_edges_from([
('v2', 'vt1'),
('vt1', 'vt2'),
('vt2', 'vt3')
])
num_tip_edges = remove_tips(g)
num_isolated_nodes = clean_graph(g)
edges = set(g.edges())
assert ('v2', 'vt1') not in edges
assert ('vt1', 'vt2') not in edges
assert ('vt2', 'vt3') not in edges
assert ('v8', 'v9') in edges
assert num_tip_edges == 3
assert num_isolated_nodes == 3
def test_transitive_reduction():
g = AssemblyGraph()
g.add_edges_from(node_path_edges(['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7',
'v8', 'v9', 'v10', 'v11', 'v12']))
for u, v, data in g.edges_iter(data=True):
data['weight'] = 1
g.add_edges_from([
('v2', 'v4', {'weight': 3}),
('v8', 'v11', {'weight': 5})
])
edges_to_remove = remove_transitive_edges(g)
g.remove_edges_from(edges_to_remove)
edges = set(g.edges())
assert ('v2', 'v4') not in edges
assert ('v8', 'v11') in edges
assert len(edges_to_remove) == 1
|
<commit_before><commit_msg>Add tests for a few assembly graph operations<commit_after>import pytest # noqa
from phasm.assembly_graph import (AssemblyGraph, remove_tips, node_path_edges,
remove_transitive_edges, clean_graph)
def test_tip_removal():
g = AssemblyGraph()
g.add_edges_from(node_path_edges(['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7',
'v8', 'v9']))
g.add_edges_from([
('v2', 'vt1'),
('vt1', 'vt2'),
('vt2', 'vt3')
])
num_tip_edges = remove_tips(g)
num_isolated_nodes = clean_graph(g)
edges = set(g.edges())
assert ('v2', 'vt1') not in edges
assert ('vt1', 'vt2') not in edges
assert ('vt2', 'vt3') not in edges
assert ('v8', 'v9') in edges
assert num_tip_edges == 3
assert num_isolated_nodes == 3
def test_transitive_reduction():
g = AssemblyGraph()
g.add_edges_from(node_path_edges(['v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7',
'v8', 'v9', 'v10', 'v11', 'v12']))
for u, v, data in g.edges_iter(data=True):
data['weight'] = 1
g.add_edges_from([
('v2', 'v4', {'weight': 3}),
('v8', 'v11', {'weight': 5})
])
edges_to_remove = remove_transitive_edges(g)
g.remove_edges_from(edges_to_remove)
edges = set(g.edges())
assert ('v2', 'v4') not in edges
assert ('v8', 'v11') in edges
assert len(edges_to_remove) == 1
|
|
2664406698c9f39527f005a3546740e2763c7d1f
|
benchmarks/expand4_sage.py
|
benchmarks/expand4_sage.py
|
print "import..."
from timeit import default_timer as clock
from sage.all import var
var("x")
e = 1
print "constructing expression..."
for i in range(1, 351):
e *= (i+x)**3
print "running benchmark..."
t1 = clock()
f = e.expand()
t2 = clock()
print "Total time:", t2-t1, "s"
|
Add Sage benchmark for expand4
|
Add Sage benchmark for expand4
|
Python
|
mit
|
symengine/symengine.py,symengine/symengine.py,bjodah/symengine.py,symengine/symengine.py,bjodah/symengine.py,bjodah/symengine.py
|
Add Sage benchmark for expand4
|
print "import..."
from timeit import default_timer as clock
from sage.all import var
var("x")
e = 1
print "constructing expression..."
for i in range(1, 351):
e *= (i+x)**3
print "running benchmark..."
t1 = clock()
f = e.expand()
t2 = clock()
print "Total time:", t2-t1, "s"
|
<commit_before><commit_msg>Add Sage benchmark for expand4<commit_after>
|
print "import..."
from timeit import default_timer as clock
from sage.all import var
var("x")
e = 1
print "constructing expression..."
for i in range(1, 351):
e *= (i+x)**3
print "running benchmark..."
t1 = clock()
f = e.expand()
t2 = clock()
print "Total time:", t2-t1, "s"
|
Add Sage benchmark for expand4print "import..."
from timeit import default_timer as clock
from sage.all import var
var("x")
e = 1
print "constructing expression..."
for i in range(1, 351):
e *= (i+x)**3
print "running benchmark..."
t1 = clock()
f = e.expand()
t2 = clock()
print "Total time:", t2-t1, "s"
|
<commit_before><commit_msg>Add Sage benchmark for expand4<commit_after>print "import..."
from timeit import default_timer as clock
from sage.all import var
var("x")
e = 1
print "constructing expression..."
for i in range(1, 351):
e *= (i+x)**3
print "running benchmark..."
t1 = clock()
f = e.expand()
t2 = clock()
print "Total time:", t2-t1, "s"
|
|
1cd217777c1c0f9643fe203c68cdd091e00fc909
|
tests/test_validate_export.py
|
tests/test_validate_export.py
|
import Bio.Phylo
from io import StringIO
from pathlib import Path
import pytest
import sys
# we assume (and assert) that this script is running from the tests/ directory
sys.path.append(str(Path(__file__).parent.parent.parent))
from augur.export_v2 import convert_tree_to_json_structure
from augur.validate import ValidateError
from augur.validate_export import ensure_no_duplicate_names
class TestValidateExport():
def test_export_without_duplicate_names(self):
# Create a tree with unique tip names.
tree = Bio.Phylo.read(StringIO("root(A, internal(B, C))"), "newick")
metadata = {"A": {}, "B": {}, "C": {}, "root": {}, "internal": {}}
root = convert_tree_to_json_structure(tree.root, metadata)
ensure_no_duplicate_names(root, ValidateError)
def test_export_with_duplicate_names(self):
# Create a tree with duplicate tip names.
tree = Bio.Phylo.read(StringIO("root(A, internal(B, B))"), "newick")
metadata = {"A": {}, "B": {}, "root": {}, "internal": {}}
root = convert_tree_to_json_structure(tree.root, metadata)
with pytest.raises(ValidateError):
ensure_no_duplicate_names(root, ValidateError)
|
Add unit tests for ensure_no_duplicate_names
|
Add unit tests for ensure_no_duplicate_names
Adds minimal tests for validation of no duplicate names. Instead of
mocking up the tree JSON directly, these tests build a minimal BioPython
tree and use augur's own tree-to-JSON function to make the more verbose
JSON structure expected by the validation function.
|
Python
|
agpl-3.0
|
nextstrain/augur,nextstrain/augur,nextstrain/augur,blab/nextstrain-augur
|
Add unit tests for ensure_no_duplicate_names
Adds minimal tests for validation of no duplicate names. Instead of
mocking up the tree JSON directly, these tests build a minimal BioPython
tree and use augur's own tree-to-JSON function to make the more verbose
JSON structure expected by the validation function.
|
import Bio.Phylo
from io import StringIO
from pathlib import Path
import pytest
import sys
# we assume (and assert) that this script is running from the tests/ directory
sys.path.append(str(Path(__file__).parent.parent.parent))
from augur.export_v2 import convert_tree_to_json_structure
from augur.validate import ValidateError
from augur.validate_export import ensure_no_duplicate_names
class TestValidateExport():
def test_export_without_duplicate_names(self):
# Create a tree with unique tip names.
tree = Bio.Phylo.read(StringIO("root(A, internal(B, C))"), "newick")
metadata = {"A": {}, "B": {}, "C": {}, "root": {}, "internal": {}}
root = convert_tree_to_json_structure(tree.root, metadata)
ensure_no_duplicate_names(root, ValidateError)
def test_export_with_duplicate_names(self):
# Create a tree with duplicate tip names.
tree = Bio.Phylo.read(StringIO("root(A, internal(B, B))"), "newick")
metadata = {"A": {}, "B": {}, "root": {}, "internal": {}}
root = convert_tree_to_json_structure(tree.root, metadata)
with pytest.raises(ValidateError):
ensure_no_duplicate_names(root, ValidateError)
|
<commit_before><commit_msg>Add unit tests for ensure_no_duplicate_names
Adds minimal tests for validation of no duplicate names. Instead of
mocking up the tree JSON directly, these tests build a minimal BioPython
tree and use augur's own tree-to-JSON function to make the more verbose
JSON structure expected by the validation function.<commit_after>
|
import Bio.Phylo
from io import StringIO
from pathlib import Path
import pytest
import sys
# we assume (and assert) that this script is running from the tests/ directory
sys.path.append(str(Path(__file__).parent.parent.parent))
from augur.export_v2 import convert_tree_to_json_structure
from augur.validate import ValidateError
from augur.validate_export import ensure_no_duplicate_names
class TestValidateExport():
def test_export_without_duplicate_names(self):
# Create a tree with unique tip names.
tree = Bio.Phylo.read(StringIO("root(A, internal(B, C))"), "newick")
metadata = {"A": {}, "B": {}, "C": {}, "root": {}, "internal": {}}
root = convert_tree_to_json_structure(tree.root, metadata)
ensure_no_duplicate_names(root, ValidateError)
def test_export_with_duplicate_names(self):
# Create a tree with duplicate tip names.
tree = Bio.Phylo.read(StringIO("root(A, internal(B, B))"), "newick")
metadata = {"A": {}, "B": {}, "root": {}, "internal": {}}
root = convert_tree_to_json_structure(tree.root, metadata)
with pytest.raises(ValidateError):
ensure_no_duplicate_names(root, ValidateError)
|
Add unit tests for ensure_no_duplicate_names
Adds minimal tests for validation of no duplicate names. Instead of
mocking up the tree JSON directly, these tests build a minimal BioPython
tree and use augur's own tree-to-JSON function to make the more verbose
JSON structure expected by the validation function.import Bio.Phylo
from io import StringIO
from pathlib import Path
import pytest
import sys
# we assume (and assert) that this script is running from the tests/ directory
sys.path.append(str(Path(__file__).parent.parent.parent))
from augur.export_v2 import convert_tree_to_json_structure
from augur.validate import ValidateError
from augur.validate_export import ensure_no_duplicate_names
class TestValidateExport():
def test_export_without_duplicate_names(self):
# Create a tree with unique tip names.
tree = Bio.Phylo.read(StringIO("root(A, internal(B, C))"), "newick")
metadata = {"A": {}, "B": {}, "C": {}, "root": {}, "internal": {}}
root = convert_tree_to_json_structure(tree.root, metadata)
ensure_no_duplicate_names(root, ValidateError)
def test_export_with_duplicate_names(self):
# Create a tree with duplicate tip names.
tree = Bio.Phylo.read(StringIO("root(A, internal(B, B))"), "newick")
metadata = {"A": {}, "B": {}, "root": {}, "internal": {}}
root = convert_tree_to_json_structure(tree.root, metadata)
with pytest.raises(ValidateError):
ensure_no_duplicate_names(root, ValidateError)
|
<commit_before><commit_msg>Add unit tests for ensure_no_duplicate_names
Adds minimal tests for validation of no duplicate names. Instead of
mocking up the tree JSON directly, these tests build a minimal BioPython
tree and use augur's own tree-to-JSON function to make the more verbose
JSON structure expected by the validation function.<commit_after>import Bio.Phylo
from io import StringIO
from pathlib import Path
import pytest
import sys
# we assume (and assert) that this script is running from the tests/ directory
sys.path.append(str(Path(__file__).parent.parent.parent))
from augur.export_v2 import convert_tree_to_json_structure
from augur.validate import ValidateError
from augur.validate_export import ensure_no_duplicate_names
class TestValidateExport():
def test_export_without_duplicate_names(self):
# Create a tree with unique tip names.
tree = Bio.Phylo.read(StringIO("root(A, internal(B, C))"), "newick")
metadata = {"A": {}, "B": {}, "C": {}, "root": {}, "internal": {}}
root = convert_tree_to_json_structure(tree.root, metadata)
ensure_no_duplicate_names(root, ValidateError)
def test_export_with_duplicate_names(self):
# Create a tree with duplicate tip names.
tree = Bio.Phylo.read(StringIO("root(A, internal(B, B))"), "newick")
metadata = {"A": {}, "B": {}, "root": {}, "internal": {}}
root = convert_tree_to_json_structure(tree.root, metadata)
with pytest.raises(ValidateError):
ensure_no_duplicate_names(root, ValidateError)
|
|
dc7e5a15eaba0a64c59afcb1d885f32347d95ebb
|
bluebottle/notifications/tests/test_effects.py
|
bluebottle/notifications/tests/test_effects.py
|
from django.core import mail
from bluebottle.events.messages import EventClosedOwnerMessage
from bluebottle.events.tests.factories import EventFactory
from bluebottle.notifications.effects import NotificationEffect
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.utils import BluebottleTestCase
class NotificationEffectsTestCase(BluebottleTestCase):
def test_notification_effect(self):
user = BlueBottleUserFactory.create(
email='faal@haas.nl'
)
event = EventFactory.create(
title='Bound to fail',
owner=user
)
subject = 'Your event "Bound to fail" has been closed'
effect = NotificationEffect(EventClosedOwnerMessage)(event)
self.assertEqual(unicode(effect), 'Message {} to faal@haas.nl'.format(subject))
effect.execute()
self.assertEqual(mail.outbox[0].subject, subject)
|
Add effect test for notifications
|
Add effect test for notifications
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Add effect test for notifications
|
from django.core import mail
from bluebottle.events.messages import EventClosedOwnerMessage
from bluebottle.events.tests.factories import EventFactory
from bluebottle.notifications.effects import NotificationEffect
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.utils import BluebottleTestCase
class NotificationEffectsTestCase(BluebottleTestCase):
def test_notification_effect(self):
user = BlueBottleUserFactory.create(
email='faal@haas.nl'
)
event = EventFactory.create(
title='Bound to fail',
owner=user
)
subject = 'Your event "Bound to fail" has been closed'
effect = NotificationEffect(EventClosedOwnerMessage)(event)
self.assertEqual(unicode(effect), 'Message {} to faal@haas.nl'.format(subject))
effect.execute()
self.assertEqual(mail.outbox[0].subject, subject)
|
<commit_before><commit_msg>Add effect test for notifications<commit_after>
|
from django.core import mail
from bluebottle.events.messages import EventClosedOwnerMessage
from bluebottle.events.tests.factories import EventFactory
from bluebottle.notifications.effects import NotificationEffect
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.utils import BluebottleTestCase
class NotificationEffectsTestCase(BluebottleTestCase):
def test_notification_effect(self):
user = BlueBottleUserFactory.create(
email='faal@haas.nl'
)
event = EventFactory.create(
title='Bound to fail',
owner=user
)
subject = 'Your event "Bound to fail" has been closed'
effect = NotificationEffect(EventClosedOwnerMessage)(event)
self.assertEqual(unicode(effect), 'Message {} to faal@haas.nl'.format(subject))
effect.execute()
self.assertEqual(mail.outbox[0].subject, subject)
|
Add effect test for notificationsfrom django.core import mail
from bluebottle.events.messages import EventClosedOwnerMessage
from bluebottle.events.tests.factories import EventFactory
from bluebottle.notifications.effects import NotificationEffect
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.utils import BluebottleTestCase
class NotificationEffectsTestCase(BluebottleTestCase):
def test_notification_effect(self):
user = BlueBottleUserFactory.create(
email='faal@haas.nl'
)
event = EventFactory.create(
title='Bound to fail',
owner=user
)
subject = 'Your event "Bound to fail" has been closed'
effect = NotificationEffect(EventClosedOwnerMessage)(event)
self.assertEqual(unicode(effect), 'Message {} to faal@haas.nl'.format(subject))
effect.execute()
self.assertEqual(mail.outbox[0].subject, subject)
|
<commit_before><commit_msg>Add effect test for notifications<commit_after>from django.core import mail
from bluebottle.events.messages import EventClosedOwnerMessage
from bluebottle.events.tests.factories import EventFactory
from bluebottle.notifications.effects import NotificationEffect
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.utils import BluebottleTestCase
class NotificationEffectsTestCase(BluebottleTestCase):
def test_notification_effect(self):
user = BlueBottleUserFactory.create(
email='faal@haas.nl'
)
event = EventFactory.create(
title='Bound to fail',
owner=user
)
subject = 'Your event "Bound to fail" has been closed'
effect = NotificationEffect(EventClosedOwnerMessage)(event)
self.assertEqual(unicode(effect), 'Message {} to faal@haas.nl'.format(subject))
effect.execute()
self.assertEqual(mail.outbox[0].subject, subject)
|
|
c9b46dc1421ff56c45aab25cb849957a5c62ec9c
|
misc/cpp-support/iwyu-output-paths-normalizer.py
|
misc/cpp-support/iwyu-output-paths-normalizer.py
|
#!/usr/bin/python3
import fileinput
import re
import os.path
envisionRootDir = "/fast/Envision"
currentFile = "" # This is the file that needs its includes adjusted
currentFileRegex = re.compile(r'^(/.*) should (?:add|remove) these lines:$')
currentDir = ""
includeRegex = re.compile(r'^#include "(.+)".*$') # We purposefully do not touch lines that indicate removals: ^- #include.*$
endOfSourceFile = re.compile(r'^---$')
# Computes an absolute path for an include
# Note that IWYU suggests includes with broken paths so we need to search for them
def findFullPath(include):
parentDir = currentDir
while not os.path.isfile(parentDir + '/' + include):
parentDir = os.path.dirname(parentDir)
assert parentDir
return parentDir + '/' + include
# Makes the include path as relative as possible
def fixPath(include):
assert currentFile
fullPath = findFullPath(include)
# We get the common prefix and take its directory to avoid things like "Envision/Co"
commonPrefix = os.path.dirname(os.path.commonprefix([currentDir, fullPath]))
if commonPrefix == envisionRootDir:
return fullPath[len(commonPrefix)+1:] # +1 to include the /
else:
return os.path.relpath(fullPath, currentDir)
for line in fileinput.input():
line = line.strip()
if endOfSourceFile.match(line):
currentFile = ""
print(line)
continue
match = currentFileRegex.match(line)
if match:
currentFile = match.group(1)
assert os.path.isfile(currentFile)
currentDir = os.path.dirname(currentFile)
print(line)
else:
match = includeRegex.match(line)
if match:
print('#include "'+fixPath(match.group(1)) + '"')
else:
print(line)
|
Add a python script that normalizes paths in the output of the include-what-you-use tool
|
Add a python script that normalizes paths in the output of the include-what-you-use tool
|
Python
|
bsd-3-clause
|
mgalbier/Envision,dimitar-asenov/Envision,Vaishal-shah/Envision,lukedirtwalker/Envision,Vaishal-shah/Envision,mgalbier/Envision,lukedirtwalker/Envision,mgalbier/Envision,dimitar-asenov/Envision,dimitar-asenov/Envision,Vaishal-shah/Envision,mgalbier/Envision,Vaishal-shah/Envision,lukedirtwalker/Envision,dimitar-asenov/Envision,lukedirtwalker/Envision,mgalbier/Envision,dimitar-asenov/Envision,Vaishal-shah/Envision,dimitar-asenov/Envision,lukedirtwalker/Envision,Vaishal-shah/Envision,mgalbier/Envision,lukedirtwalker/Envision
|
Add a python script that normalizes paths in the output of the include-what-you-use tool
|
#!/usr/bin/python3
import fileinput
import re
import os.path
envisionRootDir = "/fast/Envision"
currentFile = "" # This is the file that needs its includes adjusted
currentFileRegex = re.compile(r'^(/.*) should (?:add|remove) these lines:$')
currentDir = ""
includeRegex = re.compile(r'^#include "(.+)".*$') # We purposefully do not touch lines that indicate removals: ^- #include.*$
endOfSourceFile = re.compile(r'^---$')
# Computes an absolute path for an include
# Note that IWYU suggests includes with broken paths so we need to search for them
def findFullPath(include):
parentDir = currentDir
while not os.path.isfile(parentDir + '/' + include):
parentDir = os.path.dirname(parentDir)
assert parentDir
return parentDir + '/' + include
# Makes the include path as relative as possible
def fixPath(include):
assert currentFile
fullPath = findFullPath(include)
# We get the common prefix and take its directory to avoid things like "Envision/Co"
commonPrefix = os.path.dirname(os.path.commonprefix([currentDir, fullPath]))
if commonPrefix == envisionRootDir:
return fullPath[len(commonPrefix)+1:] # +1 to include the /
else:
return os.path.relpath(fullPath, currentDir)
for line in fileinput.input():
line = line.strip()
if endOfSourceFile.match(line):
currentFile = ""
print(line)
continue
match = currentFileRegex.match(line)
if match:
currentFile = match.group(1)
assert os.path.isfile(currentFile)
currentDir = os.path.dirname(currentFile)
print(line)
else:
match = includeRegex.match(line)
if match:
print('#include "'+fixPath(match.group(1)) + '"')
else:
print(line)
|
<commit_before><commit_msg>Add a python script that normalizes paths in the output of the include-what-you-use tool<commit_after>
|
#!/usr/bin/python3
import fileinput
import re
import os.path
envisionRootDir = "/fast/Envision"
currentFile = "" # This is the file that needs its includes adjusted
currentFileRegex = re.compile(r'^(/.*) should (?:add|remove) these lines:$')
currentDir = ""
includeRegex = re.compile(r'^#include "(.+)".*$') # We purposefully do not touch lines that indicate removals: ^- #include.*$
endOfSourceFile = re.compile(r'^---$')
# Computes an absolute path for an include
# Note that IWYU suggests includes with broken paths so we need to search for them
def findFullPath(include):
parentDir = currentDir
while not os.path.isfile(parentDir + '/' + include):
parentDir = os.path.dirname(parentDir)
assert parentDir
return parentDir + '/' + include
# Makes the include path as relative as possible
def fixPath(include):
assert currentFile
fullPath = findFullPath(include)
# We get the common prefix and take its directory to avoid things like "Envision/Co"
commonPrefix = os.path.dirname(os.path.commonprefix([currentDir, fullPath]))
if commonPrefix == envisionRootDir:
return fullPath[len(commonPrefix)+1:] # +1 to include the /
else:
return os.path.relpath(fullPath, currentDir)
for line in fileinput.input():
line = line.strip()
if endOfSourceFile.match(line):
currentFile = ""
print(line)
continue
match = currentFileRegex.match(line)
if match:
currentFile = match.group(1)
assert os.path.isfile(currentFile)
currentDir = os.path.dirname(currentFile)
print(line)
else:
match = includeRegex.match(line)
if match:
print('#include "'+fixPath(match.group(1)) + '"')
else:
print(line)
|
Add a python script that normalizes paths in the output of the include-what-you-use tool#!/usr/bin/python3
import fileinput
import re
import os.path
envisionRootDir = "/fast/Envision"
currentFile = "" # This is the file that needs its includes adjusted
currentFileRegex = re.compile(r'^(/.*) should (?:add|remove) these lines:$')
currentDir = ""
includeRegex = re.compile(r'^#include "(.+)".*$') # We purposefully do not touch lines that indicate removals: ^- #include.*$
endOfSourceFile = re.compile(r'^---$')
# Computes an absolute path for an include
# Note that IWYU suggests includes with broken paths so we need to search for them
def findFullPath(include):
parentDir = currentDir
while not os.path.isfile(parentDir + '/' + include):
parentDir = os.path.dirname(parentDir)
assert parentDir
return parentDir + '/' + include
# Makes the include path as relative as possible
def fixPath(include):
assert currentFile
fullPath = findFullPath(include)
# We get the common prefix and take its directory to avoid things like "Envision/Co"
commonPrefix = os.path.dirname(os.path.commonprefix([currentDir, fullPath]))
if commonPrefix == envisionRootDir:
return fullPath[len(commonPrefix)+1:] # +1 to include the /
else:
return os.path.relpath(fullPath, currentDir)
for line in fileinput.input():
line = line.strip()
if endOfSourceFile.match(line):
currentFile = ""
print(line)
continue
match = currentFileRegex.match(line)
if match:
currentFile = match.group(1)
assert os.path.isfile(currentFile)
currentDir = os.path.dirname(currentFile)
print(line)
else:
match = includeRegex.match(line)
if match:
print('#include "'+fixPath(match.group(1)) + '"')
else:
print(line)
|
<commit_before><commit_msg>Add a python script that normalizes paths in the output of the include-what-you-use tool<commit_after>#!/usr/bin/python3
import fileinput
import re
import os.path
envisionRootDir = "/fast/Envision"
currentFile = "" # This is the file that needs its includes adjusted
currentFileRegex = re.compile(r'^(/.*) should (?:add|remove) these lines:$')
currentDir = ""
includeRegex = re.compile(r'^#include "(.+)".*$') # We purposefully do not touch lines that indicate removals: ^- #include.*$
endOfSourceFile = re.compile(r'^---$')
# Computes an absolute path for an include
# Note that IWYU suggests includes with broken paths so we need to search for them
def findFullPath(include):
parentDir = currentDir
while not os.path.isfile(parentDir + '/' + include):
parentDir = os.path.dirname(parentDir)
assert parentDir
return parentDir + '/' + include
# Makes the include path as relative as possible
def fixPath(include):
assert currentFile
fullPath = findFullPath(include)
# We get the common prefix and take its directory to avoid things like "Envision/Co"
commonPrefix = os.path.dirname(os.path.commonprefix([currentDir, fullPath]))
if commonPrefix == envisionRootDir:
return fullPath[len(commonPrefix)+1:] # +1 to include the /
else:
return os.path.relpath(fullPath, currentDir)
for line in fileinput.input():
line = line.strip()
if endOfSourceFile.match(line):
currentFile = ""
print(line)
continue
match = currentFileRegex.match(line)
if match:
currentFile = match.group(1)
assert os.path.isfile(currentFile)
currentDir = os.path.dirname(currentFile)
print(line)
else:
match = includeRegex.match(line)
if match:
print('#include "'+fixPath(match.group(1)) + '"')
else:
print(line)
|
|
e107f3d6b895997c1ba2edb930dff30a93a89754
|
tools/templates/build_docs.py
|
tools/templates/build_docs.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Example api reference docs generation script.
This script generates API reference docs for the reference doc generator.
$> pip install -U git+https://github.com/tensorflow/docs
$> python build_docs.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow_docs
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
PROJECT_SHORT_NAME = 'tfdocs'
PROJECT_FULL_NAME = 'TensorFlow Docs'
CODE_URL_PREFIX = 'https://github.com/tensorflow/docs/tree/master/tools/tensorflow_docs'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'output_dir',
default='/tmp/generated_docs',
help='Where to write the resulting docs to.')
def main(argv):
if argv[1:]:
raise ValueError('Unrecognized arguments: {}'.format(argv[1:]))
doc_generator = generate_lib.DocGenerator(
root_title=PROJECT_FULL_NAME,
# Replace `tensorflow_docs` with your module, here.
py_modules=[(PROJECT_SHORT_NAME, tensorflow_docs)],
code_url_prefix=CODE_URL_PREFIX,
# This callback cleans up a lot of aliases caused by internal imports.
callbacks=[public_api.local_definitions_filter])
doc_generator.build(FLAGS.output_dir)
print('Output docs to: ', FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
|
Add example doc generator script.
|
Add example doc generator script.
PiperOrigin-RevId: 244258412
|
Python
|
apache-2.0
|
tensorflow/docs,tensorflow/docs,tensorflow/docs
|
Add example doc generator script.
PiperOrigin-RevId: 244258412
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Example api reference docs generation script.
This script generates API reference docs for the reference doc generator.
$> pip install -U git+https://github.com/tensorflow/docs
$> python build_docs.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow_docs
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
PROJECT_SHORT_NAME = 'tfdocs'
PROJECT_FULL_NAME = 'TensorFlow Docs'
CODE_URL_PREFIX = 'https://github.com/tensorflow/docs/tree/master/tools/tensorflow_docs'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'output_dir',
default='/tmp/generated_docs',
help='Where to write the resulting docs to.')
def main(argv):
if argv[1:]:
raise ValueError('Unrecognized arguments: {}'.format(argv[1:]))
doc_generator = generate_lib.DocGenerator(
root_title=PROJECT_FULL_NAME,
# Replace `tensorflow_docs` with your module, here.
py_modules=[(PROJECT_SHORT_NAME, tensorflow_docs)],
code_url_prefix=CODE_URL_PREFIX,
# This callback cleans up a lot of aliases caused by internal imports.
callbacks=[public_api.local_definitions_filter])
doc_generator.build(FLAGS.output_dir)
print('Output docs to: ', FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
|
<commit_before><commit_msg>Add example doc generator script.
PiperOrigin-RevId: 244258412<commit_after>
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Example api reference docs generation script.
This script generates API reference docs for the reference doc generator.
$> pip install -U git+https://github.com/tensorflow/docs
$> python build_docs.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow_docs
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
PROJECT_SHORT_NAME = 'tfdocs'
PROJECT_FULL_NAME = 'TensorFlow Docs'
CODE_URL_PREFIX = 'https://github.com/tensorflow/docs/tree/master/tools/tensorflow_docs'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'output_dir',
default='/tmp/generated_docs',
help='Where to write the resulting docs to.')
def main(argv):
if argv[1:]:
raise ValueError('Unrecognized arguments: {}'.format(argv[1:]))
doc_generator = generate_lib.DocGenerator(
root_title=PROJECT_FULL_NAME,
# Replace `tensorflow_docs` with your module, here.
py_modules=[(PROJECT_SHORT_NAME, tensorflow_docs)],
code_url_prefix=CODE_URL_PREFIX,
# This callback cleans up a lot of aliases caused by internal imports.
callbacks=[public_api.local_definitions_filter])
doc_generator.build(FLAGS.output_dir)
print('Output docs to: ', FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
|
Add example doc generator script.
PiperOrigin-RevId: 244258412# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Example api reference docs generation script.
This script generates API reference docs for the reference doc generator.
$> pip install -U git+https://github.com/tensorflow/docs
$> python build_docs.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow_docs
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
PROJECT_SHORT_NAME = 'tfdocs'
PROJECT_FULL_NAME = 'TensorFlow Docs'
CODE_URL_PREFIX = 'https://github.com/tensorflow/docs/tree/master/tools/tensorflow_docs'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'output_dir',
default='/tmp/generated_docs',
help='Where to write the resulting docs to.')
def main(argv):
if argv[1:]:
raise ValueError('Unrecognized arguments: {}'.format(argv[1:]))
doc_generator = generate_lib.DocGenerator(
root_title=PROJECT_FULL_NAME,
# Replace `tensorflow_docs` with your module, here.
py_modules=[(PROJECT_SHORT_NAME, tensorflow_docs)],
code_url_prefix=CODE_URL_PREFIX,
# This callback cleans up a lot of aliases caused by internal imports.
callbacks=[public_api.local_definitions_filter])
doc_generator.build(FLAGS.output_dir)
print('Output docs to: ', FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
|
<commit_before><commit_msg>Add example doc generator script.
PiperOrigin-RevId: 244258412<commit_after># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Example api reference docs generation script.
This script generates API reference docs for the reference doc generator.
$> pip install -U git+https://github.com/tensorflow/docs
$> python build_docs.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow_docs
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
PROJECT_SHORT_NAME = 'tfdocs'
PROJECT_FULL_NAME = 'TensorFlow Docs'
CODE_URL_PREFIX = 'https://github.com/tensorflow/docs/tree/master/tools/tensorflow_docs'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'output_dir',
default='/tmp/generated_docs',
help='Where to write the resulting docs to.')
def main(argv):
if argv[1:]:
raise ValueError('Unrecognized arguments: {}'.format(argv[1:]))
doc_generator = generate_lib.DocGenerator(
root_title=PROJECT_FULL_NAME,
# Replace `tensorflow_docs` with your module, here.
py_modules=[(PROJECT_SHORT_NAME, tensorflow_docs)],
code_url_prefix=CODE_URL_PREFIX,
# This callback cleans up a lot of aliases caused by internal imports.
callbacks=[public_api.local_definitions_filter])
doc_generator.build(FLAGS.output_dir)
print('Output docs to: ', FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
|
|
1d7f8caf0dd5aa28d626d5ae6b45f8d50566f39b
|
moksha/wsgiapp.py
|
moksha/wsgiapp.py
|
# This file is part of Moksha.
#
# Moksha is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Moksha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Moksha. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2008, Red Hat, Inc.
# Authors: Luke Macken <lmacken@redhat.com>
from pylons.wsgiapp import PylonsApp
class MokshaApp(PylonsApp):
""" Moksha WSGI Application.
This class handles resolving and dispatching to moksha applications.
It is instantiated and utilized by the
:class:`moksha.middleware.MokshaMiddleware`.
"""
def resolve(self, environ, start_response):
""" Uses dispatching information found in
``environ['wsgiorg.routing_args']`` to retrieve the application
name and return the controller instance from the appropriate
moksha application.
"""
# Update the Routes config object in case we're using Routes
# (Do we even need/want this routes configuration?)
#config = request_config()
#config.redirect = self.redirect_to
# http://www.wsgi.org/wsgi/Specifications/routing_args
match = environ['wsgiorg.routing_args'][1]
#environ['pylons.routes_dict'] = match
app = match['url'].split('/')[1]
return environ['moksha.plugins'].get(app)
|
Add the Moksha WSGI Application.
|
Add the Moksha WSGI Application.
This class handles resolving and dispatching to moksha applications.
It is instantiated and utilized by the MokshaMiddleware.
|
Python
|
apache-2.0
|
mokshaproject/moksha,lmacken/moksha,lmacken/moksha,pombredanne/moksha,ralphbean/moksha,pombredanne/moksha,mokshaproject/moksha,mokshaproject/moksha,lmacken/moksha,mokshaproject/moksha,pombredanne/moksha,pombredanne/moksha,ralphbean/moksha,ralphbean/moksha
|
Add the Moksha WSGI Application.
This class handles resolving and dispatching to moksha applications.
It is instantiated and utilized by the MokshaMiddleware.
|
# This file is part of Moksha.
#
# Moksha is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Moksha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Moksha. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2008, Red Hat, Inc.
# Authors: Luke Macken <lmacken@redhat.com>
from pylons.wsgiapp import PylonsApp
class MokshaApp(PylonsApp):
""" Moksha WSGI Application.
This class handles resolving and dispatching to moksha applications.
It is instantiated and utilized by the
:class:`moksha.middleware.MokshaMiddleware`.
"""
def resolve(self, environ, start_response):
""" Uses dispatching information found in
``environ['wsgiorg.routing_args']`` to retrieve the application
name and return the controller instance from the appropriate
moksha application.
"""
# Update the Routes config object in case we're using Routes
# (Do we even need/want this routes configuration?)
#config = request_config()
#config.redirect = self.redirect_to
# http://www.wsgi.org/wsgi/Specifications/routing_args
match = environ['wsgiorg.routing_args'][1]
#environ['pylons.routes_dict'] = match
app = match['url'].split('/')[1]
return environ['moksha.plugins'].get(app)
|
<commit_before><commit_msg>Add the Moksha WSGI Application.
This class handles resolving and dispatching to moksha applications.
It is instantiated and utilized by the MokshaMiddleware.<commit_after>
|
# This file is part of Moksha.
#
# Moksha is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Moksha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Moksha. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2008, Red Hat, Inc.
# Authors: Luke Macken <lmacken@redhat.com>
from pylons.wsgiapp import PylonsApp
class MokshaApp(PylonsApp):
""" Moksha WSGI Application.
This class handles resolving and dispatching to moksha applications.
It is instantiated and utilized by the
:class:`moksha.middleware.MokshaMiddleware`.
"""
def resolve(self, environ, start_response):
""" Uses dispatching information found in
``environ['wsgiorg.routing_args']`` to retrieve the application
name and return the controller instance from the appropriate
moksha application.
"""
# Update the Routes config object in case we're using Routes
# (Do we even need/want this routes configuration?)
#config = request_config()
#config.redirect = self.redirect_to
# http://www.wsgi.org/wsgi/Specifications/routing_args
match = environ['wsgiorg.routing_args'][1]
#environ['pylons.routes_dict'] = match
app = match['url'].split('/')[1]
return environ['moksha.plugins'].get(app)
|
Add the Moksha WSGI Application.
This class handles resolving and dispatching to moksha applications.
It is instantiated and utilized by the MokshaMiddleware.# This file is part of Moksha.
#
# Moksha is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Moksha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Moksha. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2008, Red Hat, Inc.
# Authors: Luke Macken <lmacken@redhat.com>
from pylons.wsgiapp import PylonsApp
class MokshaApp(PylonsApp):
""" Moksha WSGI Application.
This class handles resolving and dispatching to moksha applications.
It is instantiated and utilized by the
:class:`moksha.middleware.MokshaMiddleware`.
"""
def resolve(self, environ, start_response):
""" Uses dispatching information found in
``environ['wsgiorg.routing_args']`` to retrieve the application
name and return the controller instance from the appropriate
moksha application.
"""
# Update the Routes config object in case we're using Routes
# (Do we even need/want this routes configuration?)
#config = request_config()
#config.redirect = self.redirect_to
# http://www.wsgi.org/wsgi/Specifications/routing_args
match = environ['wsgiorg.routing_args'][1]
#environ['pylons.routes_dict'] = match
app = match['url'].split('/')[1]
return environ['moksha.plugins'].get(app)
|
<commit_before><commit_msg>Add the Moksha WSGI Application.
This class handles resolving and dispatching to moksha applications.
It is instantiated and utilized by the MokshaMiddleware.<commit_after># This file is part of Moksha.
#
# Moksha is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Moksha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Moksha. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2008, Red Hat, Inc.
# Authors: Luke Macken <lmacken@redhat.com>
from pylons.wsgiapp import PylonsApp
class MokshaApp(PylonsApp):
""" Moksha WSGI Application.
This class handles resolving and dispatching to moksha applications.
It is instantiated and utilized by the
:class:`moksha.middleware.MokshaMiddleware`.
"""
def resolve(self, environ, start_response):
""" Uses dispatching information found in
``environ['wsgiorg.routing_args']`` to retrieve the application
name and return the controller instance from the appropriate
moksha application.
"""
# Update the Routes config object in case we're using Routes
# (Do we even need/want this routes configuration?)
#config = request_config()
#config.redirect = self.redirect_to
# http://www.wsgi.org/wsgi/Specifications/routing_args
match = environ['wsgiorg.routing_args'][1]
#environ['pylons.routes_dict'] = match
app = match['url'].split('/')[1]
return environ['moksha.plugins'].get(app)
|
|
7471efd41006cb9344da23d65c7d605835324d37
|
scenarios/update_replace_missed_cleanup_delete.py
|
scenarios/update_replace_missed_cleanup_delete.py
|
def check_resource_count(expected_count):
test.assertEqual(expected_count, len(reality.all_resources()))
def check_c_count(expected_count):
test.assertEqual(expected_count,
len(reality.resources_by_logical_name('C')))
example_template = Template({
'A': RsrcDef({'a': 'initial'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(5)
engine.call(verify, example_template)
example_template_shrunk = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.update_stack('foo', example_template_shrunk)
engine.noop(7)
engine.delete_stack('foo')
engine.call(check_c_count, 2)
engine.noop(11)
engine.call(verify, Template({}))
|
Add a test for issuing a delete during an update with replacement
|
Add a test for issuing a delete during an update with replacement
|
Python
|
apache-2.0
|
zaneb/heat-convergence-prototype
|
Add a test for issuing a delete during an update with replacement
|
def check_resource_count(expected_count):
test.assertEqual(expected_count, len(reality.all_resources()))
def check_c_count(expected_count):
test.assertEqual(expected_count,
len(reality.resources_by_logical_name('C')))
example_template = Template({
'A': RsrcDef({'a': 'initial'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(5)
engine.call(verify, example_template)
example_template_shrunk = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.update_stack('foo', example_template_shrunk)
engine.noop(7)
engine.delete_stack('foo')
engine.call(check_c_count, 2)
engine.noop(11)
engine.call(verify, Template({}))
|
<commit_before><commit_msg>Add a test for issuing a delete during an update with replacement<commit_after>
|
def check_resource_count(expected_count):
test.assertEqual(expected_count, len(reality.all_resources()))
def check_c_count(expected_count):
test.assertEqual(expected_count,
len(reality.resources_by_logical_name('C')))
example_template = Template({
'A': RsrcDef({'a': 'initial'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(5)
engine.call(verify, example_template)
example_template_shrunk = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.update_stack('foo', example_template_shrunk)
engine.noop(7)
engine.delete_stack('foo')
engine.call(check_c_count, 2)
engine.noop(11)
engine.call(verify, Template({}))
|
Add a test for issuing a delete during an update with replacementdef check_resource_count(expected_count):
test.assertEqual(expected_count, len(reality.all_resources()))
def check_c_count(expected_count):
test.assertEqual(expected_count,
len(reality.resources_by_logical_name('C')))
example_template = Template({
'A': RsrcDef({'a': 'initial'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(5)
engine.call(verify, example_template)
example_template_shrunk = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.update_stack('foo', example_template_shrunk)
engine.noop(7)
engine.delete_stack('foo')
engine.call(check_c_count, 2)
engine.noop(11)
engine.call(verify, Template({}))
|
<commit_before><commit_msg>Add a test for issuing a delete during an update with replacement<commit_after>def check_resource_count(expected_count):
test.assertEqual(expected_count, len(reality.all_resources()))
def check_c_count(expected_count):
test.assertEqual(expected_count,
len(reality.resources_by_logical_name('C')))
example_template = Template({
'A': RsrcDef({'a': 'initial'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(5)
engine.call(verify, example_template)
example_template_shrunk = Template({
'A': RsrcDef({'a': 'updated'}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'!a': GetAtt('A', 'a')}, ['B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', '!a')}, []),
})
engine.update_stack('foo', example_template_shrunk)
engine.noop(7)
engine.delete_stack('foo')
engine.call(check_c_count, 2)
engine.noop(11)
engine.call(verify, Template({}))
|
|
7ac7583d714b15bb3e24bba1c5972bd1d0c235f2
|
python/receive.py
|
python/receive.py
|
#!/usr/bin/env python
import pika
# Host in which RabbitMQ is running.
HOST = 'localhost'
# Name of the queue.
QUEUE = 'pages'
# Specifies what to do after consuming messages from RabbitMQ.
def handler(channel, method, properties, body):
print '-> Handled: [%s]' % body
# Getting the connection using pika.
# Creating the channel.
connection = pika.BlockingConnection(pika.ConnectionParameters(host=HOST))
channel = connection.channel()
print '* Handling messages...'
# The consumption is defined.
# Notice the 'handler' as first argument.
channel.basic_consume(handler, queue=QUEUE, no_ack=True)
# Starting the consumption.
channel.start_consuming()
|
Add consumer written in Python
|
Add consumer written in Python
|
Python
|
apache-2.0
|
jovannypcg/rabbitmq_usage,jovannypcg/rabbitmq_usage
|
Add consumer written in Python
|
#!/usr/bin/env python
import pika
# Host in which RabbitMQ is running.
HOST = 'localhost'
# Name of the queue.
QUEUE = 'pages'
# Specifies what to do after consuming messages from RabbitMQ.
def handler(channel, method, properties, body):
print '-> Handled: [%s]' % body
# Getting the connection using pika.
# Creating the channel.
connection = pika.BlockingConnection(pika.ConnectionParameters(host=HOST))
channel = connection.channel()
print '* Handling messages...'
# The consumption is defined.
# Notice the 'handler' as first argument.
channel.basic_consume(handler, queue=QUEUE, no_ack=True)
# Starting the consumption.
channel.start_consuming()
|
<commit_before><commit_msg>Add consumer written in Python<commit_after>
|
#!/usr/bin/env python
import pika
# Host in which RabbitMQ is running.
HOST = 'localhost'
# Name of the queue.
QUEUE = 'pages'
# Specifies what to do after consuming messages from RabbitMQ.
def handler(channel, method, properties, body):
print '-> Handled: [%s]' % body
# Getting the connection using pika.
# Creating the channel.
connection = pika.BlockingConnection(pika.ConnectionParameters(host=HOST))
channel = connection.channel()
print '* Handling messages...'
# The consumption is defined.
# Notice the 'handler' as first argument.
channel.basic_consume(handler, queue=QUEUE, no_ack=True)
# Starting the consumption.
channel.start_consuming()
|
Add consumer written in Python#!/usr/bin/env python
import pika
# Host in which RabbitMQ is running.
HOST = 'localhost'
# Name of the queue.
QUEUE = 'pages'
# Specifies what to do after consuming messages from RabbitMQ.
def handler(channel, method, properties, body):
print '-> Handled: [%s]' % body
# Getting the connection using pika.
# Creating the channel.
connection = pika.BlockingConnection(pika.ConnectionParameters(host=HOST))
channel = connection.channel()
print '* Handling messages...'
# The consumption is defined.
# Notice the 'handler' as first argument.
channel.basic_consume(handler, queue=QUEUE, no_ack=True)
# Starting the consumption.
channel.start_consuming()
|
<commit_before><commit_msg>Add consumer written in Python<commit_after>#!/usr/bin/env python
import pika
# Host in which RabbitMQ is running.
HOST = 'localhost'
# Name of the queue.
QUEUE = 'pages'
# Specifies what to do after consuming messages from RabbitMQ.
def handler(channel, method, properties, body):
print '-> Handled: [%s]' % body
# Getting the connection using pika.
# Creating the channel.
connection = pika.BlockingConnection(pika.ConnectionParameters(host=HOST))
channel = connection.channel()
print '* Handling messages...'
# The consumption is defined.
# Notice the 'handler' as first argument.
channel.basic_consume(handler, queue=QUEUE, no_ack=True)
# Starting the consumption.
channel.start_consuming()
|
|
8f8812d6f5c2932dadc9e425d65bdc1e4cdaef45
|
admin/metrics/migrations/0002_auto_20170130_1356.py
|
admin/metrics/migrations/0002_auto_20170130_1356.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-30 19:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metrics', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='OSFWebsiteStatistics',
),
]
|
Remove OSFWebsiteStatistics model in favor of using Keen data
|
Remove OSFWebsiteStatistics model in favor of using Keen data
|
Python
|
apache-2.0
|
monikagrabowska/osf.io,acshi/osf.io,leb2dg/osf.io,caseyrollins/osf.io,acshi/osf.io,leb2dg/osf.io,cslzchen/osf.io,acshi/osf.io,hmoco/osf.io,adlius/osf.io,cwisecarver/osf.io,TomBaxter/osf.io,CenterForOpenScience/osf.io,cwisecarver/osf.io,Johnetordoff/osf.io,adlius/osf.io,felliott/osf.io,cslzchen/osf.io,sloria/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,icereval/osf.io,felliott/osf.io,caneruguz/osf.io,baylee-d/osf.io,Nesiehr/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,Nesiehr/osf.io,monikagrabowska/osf.io,Nesiehr/osf.io,binoculars/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,hmoco/osf.io,Johnetordoff/osf.io,laurenrevere/osf.io,leb2dg/osf.io,acshi/osf.io,crcresearch/osf.io,adlius/osf.io,pattisdr/osf.io,cwisecarver/osf.io,chrisseto/osf.io,mattclark/osf.io,hmoco/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,caneruguz/osf.io,monikagrabowska/osf.io,baylee-d/osf.io,mfraezz/osf.io,crcresearch/osf.io,Nesiehr/osf.io,felliott/osf.io,HalcyonChimera/osf.io,sloria/osf.io,adlius/osf.io,laurenrevere/osf.io,leb2dg/osf.io,caneruguz/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,binoculars/osf.io,caneruguz/osf.io,saradbowman/osf.io,aaxelb/osf.io,pattisdr/osf.io,icereval/osf.io,chennan47/osf.io,brianjgeiger/osf.io,binoculars/osf.io,icereval/osf.io,aaxelb/osf.io,chrisseto/osf.io,TomBaxter/osf.io,brianjgeiger/osf.io,saradbowman/osf.io,mattclark/osf.io,erinspace/osf.io,CenterForOpenScience/osf.io,hmoco/osf.io,Johnetordoff/osf.io,chennan47/osf.io,aaxelb/osf.io,erinspace/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,mattclark/osf.io,mfraezz/osf.io,acshi/osf.io,laurenrevere/osf.io,TomBaxter/osf.io,chrisseto/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,cslzchen/osf.io,caseyrollins/osf.io,felliott/osf.io,monikagrabowska/osf.io,monikagrabowska/osf.io,erinspace/osf.io,crcresearch/osf.io,aaxelb/osf.io
|
Remove OSFWebsiteStatistics model in favor of using Keen data
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-30 19:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metrics', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='OSFWebsiteStatistics',
),
]
|
<commit_before><commit_msg>Remove OSFWebsiteStatistics model in favor of using Keen data<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-30 19:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metrics', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='OSFWebsiteStatistics',
),
]
|
Remove OSFWebsiteStatistics model in favor of using Keen data# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-30 19:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metrics', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='OSFWebsiteStatistics',
),
]
|
<commit_before><commit_msg>Remove OSFWebsiteStatistics model in favor of using Keen data<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-30 19:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metrics', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='OSFWebsiteStatistics',
),
]
|
|
92b28dd2e6699423707604146de4a29656b8a8ba
|
runtests.py
|
runtests.py
|
#!/usr/bin/env python
import sys
import unittest
from os.path import dirname, abspath
import tests
def runtests(*test_args):
suite = unittest.TestLoader().loadTestsFromModule(tests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.failures))
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
#!/usr/bin/env python
import sys
import unittest
from os.path import dirname, abspath
import tests
def runtests(*test_args):
suite = unittest.TestLoader().loadTestsFromModule(tests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if result.failures:
sys.exit(1)
elif result.errors:
sys.exit(2)
sys.exit(0)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
Return the proper exit codes in event of failure or error
|
Return the proper exit codes in event of failure or error
|
Python
|
mit
|
zhang625272514/peewee,new-xiaji/peewee,jarrahwu/peewee,Dipsomaniac/peewee,new-xiaji/peewee,wenxer/peewee,bopo/peewee,Sunzhifeng/peewee,stas/peewee,mackjoner/peewee,new-xiaji/peewee,ronyb29/peewee,py4a/peewee,giserh/peewee,Dipsomaniac/peewee,wenxer/peewee,jnovinger/peewee,coreos/peewee,Dipsomaniac/peewee,d1hotpep/peewee,coleifer/peewee,teserak/peewee,coleifer/peewee,fuzeman/peewee,jarrahwu/peewee,coreos/peewee,Sunzhifeng/peewee,d1hotpep/peewee,softside/peewee,jarrahwu/peewee,fuzeman/peewee,zhang625272514/peewee,bopo/peewee,Sunzhifeng/peewee,lez/peewee,zhang625272514/peewee,coreos/peewee,ghukill/peewee,giserh/peewee,bopo/peewee,coleifer/peewee,jnovinger/peewee
|
#!/usr/bin/env python
import sys
import unittest
from os.path import dirname, abspath
import tests
def runtests(*test_args):
suite = unittest.TestLoader().loadTestsFromModule(tests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.failures))
if __name__ == '__main__':
runtests(*sys.argv[1:])
Return the proper exit codes in event of failure or error
|
#!/usr/bin/env python
import sys
import unittest
from os.path import dirname, abspath
import tests
def runtests(*test_args):
suite = unittest.TestLoader().loadTestsFromModule(tests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if result.failures:
sys.exit(1)
elif result.errors:
sys.exit(2)
sys.exit(0)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
<commit_before>#!/usr/bin/env python
import sys
import unittest
from os.path import dirname, abspath
import tests
def runtests(*test_args):
suite = unittest.TestLoader().loadTestsFromModule(tests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.failures))
if __name__ == '__main__':
runtests(*sys.argv[1:])
<commit_msg>Return the proper exit codes in event of failure or error<commit_after>
|
#!/usr/bin/env python
import sys
import unittest
from os.path import dirname, abspath
import tests
def runtests(*test_args):
suite = unittest.TestLoader().loadTestsFromModule(tests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if result.failures:
sys.exit(1)
elif result.errors:
sys.exit(2)
sys.exit(0)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
#!/usr/bin/env python
import sys
import unittest
from os.path import dirname, abspath
import tests
def runtests(*test_args):
suite = unittest.TestLoader().loadTestsFromModule(tests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.failures))
if __name__ == '__main__':
runtests(*sys.argv[1:])
Return the proper exit codes in event of failure or error#!/usr/bin/env python
import sys
import unittest
from os.path import dirname, abspath
import tests
def runtests(*test_args):
suite = unittest.TestLoader().loadTestsFromModule(tests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if result.failures:
sys.exit(1)
elif result.errors:
sys.exit(2)
sys.exit(0)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
<commit_before>#!/usr/bin/env python
import sys
import unittest
from os.path import dirname, abspath
import tests
def runtests(*test_args):
suite = unittest.TestLoader().loadTestsFromModule(tests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.failures))
if __name__ == '__main__':
runtests(*sys.argv[1:])
<commit_msg>Return the proper exit codes in event of failure or error<commit_after>#!/usr/bin/env python
import sys
import unittest
from os.path import dirname, abspath
import tests
def runtests(*test_args):
suite = unittest.TestLoader().loadTestsFromModule(tests)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if result.failures:
sys.exit(1)
elif result.errors:
sys.exit(2)
sys.exit(0)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
a45102e7a22c6fe34e0e4cca26602e90f99f8ead
|
gotify/migrations/0002_poly_meta.py
|
gotify/migrations/0002_poly_meta.py
|
# Generated by Django 2.0.6 on 2018-06-26 07:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gotify', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='emailnotifier',
options={'base_manager_name': 'objects'},
),
migrations.AlterModelOptions(
name='notifier',
options={'base_manager_name': 'objects'},
),
]
|
Add migration for gotify required by upgrade
|
Add migration for gotify required by upgrade
|
Python
|
mit
|
akx/gentry,akx/gentry,akx/gentry,akx/gentry
|
Add migration for gotify required by upgrade
|
# Generated by Django 2.0.6 on 2018-06-26 07:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gotify', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='emailnotifier',
options={'base_manager_name': 'objects'},
),
migrations.AlterModelOptions(
name='notifier',
options={'base_manager_name': 'objects'},
),
]
|
<commit_before><commit_msg>Add migration for gotify required by upgrade<commit_after>
|
# Generated by Django 2.0.6 on 2018-06-26 07:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gotify', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='emailnotifier',
options={'base_manager_name': 'objects'},
),
migrations.AlterModelOptions(
name='notifier',
options={'base_manager_name': 'objects'},
),
]
|
Add migration for gotify required by upgrade# Generated by Django 2.0.6 on 2018-06-26 07:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gotify', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='emailnotifier',
options={'base_manager_name': 'objects'},
),
migrations.AlterModelOptions(
name='notifier',
options={'base_manager_name': 'objects'},
),
]
|
<commit_before><commit_msg>Add migration for gotify required by upgrade<commit_after># Generated by Django 2.0.6 on 2018-06-26 07:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gotify', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='emailnotifier',
options={'base_manager_name': 'objects'},
),
migrations.AlterModelOptions(
name='notifier',
options={'base_manager_name': 'objects'},
),
]
|
|
f3acaa357a9b7e7e151e6bd42e08acbd9d817f25
|
twitch_chat_reader.py
|
twitch_chat_reader.py
|
import socket
import sys
server = "irc.twitch.tv"
password = "oauth:8nq3rymmy4zakeut4p90hx9axnpywj"
nickname = "twitchlosescheckers"
channel = "#twitchlosescheckers"
try:
irc = socket.socket()
except socket.error as msg:
irc = None
try:
irc.connect((server, 6667))
print "connected to %s" %(server)
except socket.error as msg:
irc.close()
irc = None
if irc is None:
print "could not open socket"
sys.exit()
irc.send('PASS ' + password + '\n')
irc.send('NICK ' + nickname + '\n')
irc.send('JOIN ' + channel + '\n')
while True:
twitch_activity = irc.recv(4096)
print twitch_activity
if "PING" in twitch_activity:
irc.send("PONG tmi.twitch.tv");
# If the channel has receieved a twitch chat message
if "PRIVMSG" in twitch_activity:
# extract the contents of the message
i = twitch_activity[1:].find(":")
message = twitch_activity[i+2:]
irc.close()
|
Connect to and read twitch chat
|
Connect to and read twitch chat
|
Python
|
mit
|
mpoegel/TwitchPlaysCheckers
|
Connect to and read twitch chat
|
import socket
import sys
server = "irc.twitch.tv"
password = "oauth:8nq3rymmy4zakeut4p90hx9axnpywj"
nickname = "twitchlosescheckers"
channel = "#twitchlosescheckers"
try:
irc = socket.socket()
except socket.error as msg:
irc = None
try:
irc.connect((server, 6667))
print "connected to %s" %(server)
except socket.error as msg:
irc.close()
irc = None
if irc is None:
print "could not open socket"
sys.exit()
irc.send('PASS ' + password + '\n')
irc.send('NICK ' + nickname + '\n')
irc.send('JOIN ' + channel + '\n')
while True:
twitch_activity = irc.recv(4096)
print twitch_activity
if "PING" in twitch_activity:
irc.send("PONG tmi.twitch.tv");
# If the channel has receieved a twitch chat message
if "PRIVMSG" in twitch_activity:
# extract the contents of the message
i = twitch_activity[1:].find(":")
message = twitch_activity[i+2:]
irc.close()
|
<commit_before><commit_msg>Connect to and read twitch chat<commit_after>
|
import socket
import sys
server = "irc.twitch.tv"
password = "oauth:8nq3rymmy4zakeut4p90hx9axnpywj"
nickname = "twitchlosescheckers"
channel = "#twitchlosescheckers"
try:
irc = socket.socket()
except socket.error as msg:
irc = None
try:
irc.connect((server, 6667))
print "connected to %s" %(server)
except socket.error as msg:
irc.close()
irc = None
if irc is None:
print "could not open socket"
sys.exit()
irc.send('PASS ' + password + '\n')
irc.send('NICK ' + nickname + '\n')
irc.send('JOIN ' + channel + '\n')
while True:
twitch_activity = irc.recv(4096)
print twitch_activity
if "PING" in twitch_activity:
irc.send("PONG tmi.twitch.tv");
# If the channel has receieved a twitch chat message
if "PRIVMSG" in twitch_activity:
# extract the contents of the message
i = twitch_activity[1:].find(":")
message = twitch_activity[i+2:]
irc.close()
|
Connect to and read twitch chatimport socket
import sys
server = "irc.twitch.tv"
password = "oauth:8nq3rymmy4zakeut4p90hx9axnpywj"
nickname = "twitchlosescheckers"
channel = "#twitchlosescheckers"
try:
irc = socket.socket()
except socket.error as msg:
irc = None
try:
irc.connect((server, 6667))
print "connected to %s" %(server)
except socket.error as msg:
irc.close()
irc = None
if irc is None:
print "could not open socket"
sys.exit()
irc.send('PASS ' + password + '\n')
irc.send('NICK ' + nickname + '\n')
irc.send('JOIN ' + channel + '\n')
while True:
twitch_activity = irc.recv(4096)
print twitch_activity
if "PING" in twitch_activity:
irc.send("PONG tmi.twitch.tv");
# If the channel has receieved a twitch chat message
if "PRIVMSG" in twitch_activity:
# extract the contents of the message
i = twitch_activity[1:].find(":")
message = twitch_activity[i+2:]
irc.close()
|
<commit_before><commit_msg>Connect to and read twitch chat<commit_after>import socket
import sys
server = "irc.twitch.tv"
password = "oauth:8nq3rymmy4zakeut4p90hx9axnpywj"
nickname = "twitchlosescheckers"
channel = "#twitchlosescheckers"
try:
irc = socket.socket()
except socket.error as msg:
irc = None
try:
irc.connect((server, 6667))
print "connected to %s" %(server)
except socket.error as msg:
irc.close()
irc = None
if irc is None:
print "could not open socket"
sys.exit()
irc.send('PASS ' + password + '\n')
irc.send('NICK ' + nickname + '\n')
irc.send('JOIN ' + channel + '\n')
while True:
twitch_activity = irc.recv(4096)
print twitch_activity
if "PING" in twitch_activity:
irc.send("PONG tmi.twitch.tv");
# If the channel has receieved a twitch chat message
if "PRIVMSG" in twitch_activity:
# extract the contents of the message
i = twitch_activity[1:].find(":")
message = twitch_activity[i+2:]
irc.close()
|
|
69a00f52db2f1c48839d8035871937c3bf938503
|
AutoCopyrightEventListener.py
|
AutoCopyrightEventListener.py
|
#
# Copyright (c) 2012 by Lifted Studios. All Rights Reserved.
#
import sublime
import sublime_plugin
class AutoCopyrightEventListener(sublime_plugin.EventListener):
'''
Listener for application events.
'''
def on_pre_save(self, view):
'''
Called just before the view is saved.
'''
view.run_command('update_copyright')
|
Add event listener to automatically update the copyright.
|
Add event listener to automatically update the copyright.
|
Python
|
mit
|
lifted-studios/AutoCopyright,lifted-studios/AutoCopyright
|
Add event listener to automatically update the copyright.
|
#
# Copyright (c) 2012 by Lifted Studios. All Rights Reserved.
#
import sublime
import sublime_plugin
class AutoCopyrightEventListener(sublime_plugin.EventListener):
'''
Listener for application events.
'''
def on_pre_save(self, view):
'''
Called just before the view is saved.
'''
view.run_command('update_copyright')
|
<commit_before><commit_msg>Add event listener to automatically update the copyright.<commit_after>
|
#
# Copyright (c) 2012 by Lifted Studios. All Rights Reserved.
#
import sublime
import sublime_plugin
class AutoCopyrightEventListener(sublime_plugin.EventListener):
'''
Listener for application events.
'''
def on_pre_save(self, view):
'''
Called just before the view is saved.
'''
view.run_command('update_copyright')
|
Add event listener to automatically update the copyright.#
# Copyright (c) 2012 by Lifted Studios. All Rights Reserved.
#
import sublime
import sublime_plugin
class AutoCopyrightEventListener(sublime_plugin.EventListener):
'''
Listener for application events.
'''
def on_pre_save(self, view):
'''
Called just before the view is saved.
'''
view.run_command('update_copyright')
|
<commit_before><commit_msg>Add event listener to automatically update the copyright.<commit_after>#
# Copyright (c) 2012 by Lifted Studios. All Rights Reserved.
#
import sublime
import sublime_plugin
class AutoCopyrightEventListener(sublime_plugin.EventListener):
'''
Listener for application events.
'''
def on_pre_save(self, view):
'''
Called just before the view is saved.
'''
view.run_command('update_copyright')
|
|
00376feb1140d75b47c226ec6752daccfa9a24e5
|
doc/pyplots/cobsr_overhead.py
|
doc/pyplots/cobsr_overhead.py
|
from matplotlib import pyplot as plt
import numpy as np
from cobs import cobs
from cobs import cobsr
def cobsr_overhead_calc(num_bytes):
return 257./256 - (255./256)**num_bytes
def cobsr_overhead_measure(num_bytes):
# TODO: review value
NUM_TESTS = 10000
overhead = 0
for _i in xrange(NUM_TESTS):
output = cobsr.encode(np.random.bytes(num_bytes))
overhead += (len(output) - num_bytes)
return overhead / float(NUM_TESTS)
def cobs_overhead_measure(num_bytes):
# TODO: review value
NUM_TESTS = 10000
overhead = 0
for _i in xrange(NUM_TESTS):
output = cobs.encode(np.random.bytes(num_bytes))
overhead += (len(output) - num_bytes)
return overhead / float(NUM_TESTS)
fig = plt.figure()
ax1 = fig.add_subplot(111)
# x-range for plot
num_bytes_list = np.arange(1, 30)
# Calculate values and plot
# Measured values for COBS
#cobs_measured_overhead = [ cobs_overhead_measure(num_bytes) for num_bytes in num_bytes_list ]
#ax1.plot(num_bytes_list, cobs_measured_overhead, 'g.')
# Measured values for COBS/R
cobsr_measured_overhead = [ cobsr_overhead_measure(num_bytes) for num_bytes in num_bytes_list ]
ax1.plot(num_bytes_list, cobsr_measured_overhead, 'r.')
# Calculated values for COBS/R
cobsr_calc_overhead = [ cobsr_overhead_calc(num_bytes) for num_bytes in num_bytes_list ]
ax1.plot(num_bytes_list, cobsr_calc_overhead, 'b.')
ax1.set_xlabel('message length (bytes)')
ax1.set_xlim(min(num_bytes_list), max(num_bytes_list))
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('encoding overhead (bytes)')
if 0:
ax1.set_ylabel('encoding overhead (bytes)', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
plt.show()
|
Add program to graph theoretical vs experimental COBS/R overhead
|
Add program to graph theoretical vs experimental COBS/R overhead
I wrote this a long time ago, but didn't put it into version control
until now.
|
Python
|
mit
|
cmcqueen/cobs-python,cmcqueen/cobs-python
|
Add program to graph theoretical vs experimental COBS/R overhead
I wrote this a long time ago, but didn't put it into version control
until now.
|
from matplotlib import pyplot as plt
import numpy as np
from cobs import cobs
from cobs import cobsr
def cobsr_overhead_calc(num_bytes):
return 257./256 - (255./256)**num_bytes
def cobsr_overhead_measure(num_bytes):
# TODO: review value
NUM_TESTS = 10000
overhead = 0
for _i in xrange(NUM_TESTS):
output = cobsr.encode(np.random.bytes(num_bytes))
overhead += (len(output) - num_bytes)
return overhead / float(NUM_TESTS)
def cobs_overhead_measure(num_bytes):
# TODO: review value
NUM_TESTS = 10000
overhead = 0
for _i in xrange(NUM_TESTS):
output = cobs.encode(np.random.bytes(num_bytes))
overhead += (len(output) - num_bytes)
return overhead / float(NUM_TESTS)
fig = plt.figure()
ax1 = fig.add_subplot(111)
# x-range for plot
num_bytes_list = np.arange(1, 30)
# Calculate values and plot
# Measured values for COBS
#cobs_measured_overhead = [ cobs_overhead_measure(num_bytes) for num_bytes in num_bytes_list ]
#ax1.plot(num_bytes_list, cobs_measured_overhead, 'g.')
# Measured values for COBS/R
cobsr_measured_overhead = [ cobsr_overhead_measure(num_bytes) for num_bytes in num_bytes_list ]
ax1.plot(num_bytes_list, cobsr_measured_overhead, 'r.')
# Calculated values for COBS/R
cobsr_calc_overhead = [ cobsr_overhead_calc(num_bytes) for num_bytes in num_bytes_list ]
ax1.plot(num_bytes_list, cobsr_calc_overhead, 'b.')
ax1.set_xlabel('message length (bytes)')
ax1.set_xlim(min(num_bytes_list), max(num_bytes_list))
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('encoding overhead (bytes)')
if 0:
ax1.set_ylabel('encoding overhead (bytes)', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
plt.show()
|
<commit_before><commit_msg>Add program to graph theoretical vs experimental COBS/R overhead
I wrote this a long time ago, but didn't put it into version control
until now.<commit_after>
|
from matplotlib import pyplot as plt
import numpy as np
from cobs import cobs
from cobs import cobsr
def cobsr_overhead_calc(num_bytes):
return 257./256 - (255./256)**num_bytes
def cobsr_overhead_measure(num_bytes):
# TODO: review value
NUM_TESTS = 10000
overhead = 0
for _i in xrange(NUM_TESTS):
output = cobsr.encode(np.random.bytes(num_bytes))
overhead += (len(output) - num_bytes)
return overhead / float(NUM_TESTS)
def cobs_overhead_measure(num_bytes):
# TODO: review value
NUM_TESTS = 10000
overhead = 0
for _i in xrange(NUM_TESTS):
output = cobs.encode(np.random.bytes(num_bytes))
overhead += (len(output) - num_bytes)
return overhead / float(NUM_TESTS)
fig = plt.figure()
ax1 = fig.add_subplot(111)
# x-range for plot
num_bytes_list = np.arange(1, 30)
# Calculate values and plot
# Measured values for COBS
#cobs_measured_overhead = [ cobs_overhead_measure(num_bytes) for num_bytes in num_bytes_list ]
#ax1.plot(num_bytes_list, cobs_measured_overhead, 'g.')
# Measured values for COBS/R
cobsr_measured_overhead = [ cobsr_overhead_measure(num_bytes) for num_bytes in num_bytes_list ]
ax1.plot(num_bytes_list, cobsr_measured_overhead, 'r.')
# Calculated values for COBS/R
cobsr_calc_overhead = [ cobsr_overhead_calc(num_bytes) for num_bytes in num_bytes_list ]
ax1.plot(num_bytes_list, cobsr_calc_overhead, 'b.')
ax1.set_xlabel('message length (bytes)')
ax1.set_xlim(min(num_bytes_list), max(num_bytes_list))
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('encoding overhead (bytes)')
if 0:
ax1.set_ylabel('encoding overhead (bytes)', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
plt.show()
|
Add program to graph theoretical vs experimental COBS/R overhead
I wrote this a long time ago, but didn't put it into version control
until now.
from matplotlib import pyplot as plt
import numpy as np
from cobs import cobs
from cobs import cobsr
def cobsr_overhead_calc(num_bytes):
return 257./256 - (255./256)**num_bytes
def cobsr_overhead_measure(num_bytes):
# TODO: review value
NUM_TESTS = 10000
overhead = 0
for _i in xrange(NUM_TESTS):
output = cobsr.encode(np.random.bytes(num_bytes))
overhead += (len(output) - num_bytes)
return overhead / float(NUM_TESTS)
def cobs_overhead_measure(num_bytes):
# TODO: review value
NUM_TESTS = 10000
overhead = 0
for _i in xrange(NUM_TESTS):
output = cobs.encode(np.random.bytes(num_bytes))
overhead += (len(output) - num_bytes)
return overhead / float(NUM_TESTS)
fig = plt.figure()
ax1 = fig.add_subplot(111)
# x-range for plot
num_bytes_list = np.arange(1, 30)
# Calculate values and plot
# Measured values for COBS
#cobs_measured_overhead = [ cobs_overhead_measure(num_bytes) for num_bytes in num_bytes_list ]
#ax1.plot(num_bytes_list, cobs_measured_overhead, 'g.')
# Measured values for COBS/R
cobsr_measured_overhead = [ cobsr_overhead_measure(num_bytes) for num_bytes in num_bytes_list ]
ax1.plot(num_bytes_list, cobsr_measured_overhead, 'r.')
# Calculated values for COBS/R
cobsr_calc_overhead = [ cobsr_overhead_calc(num_bytes) for num_bytes in num_bytes_list ]
ax1.plot(num_bytes_list, cobsr_calc_overhead, 'b.')
ax1.set_xlabel('message length (bytes)')
ax1.set_xlim(min(num_bytes_list), max(num_bytes_list))
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('encoding overhead (bytes)')
if 0:
ax1.set_ylabel('encoding overhead (bytes)', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
plt.show()
|
<commit_before><commit_msg>Add program to graph theoretical vs experimental COBS/R overhead
I wrote this a long time ago, but didn't put it into version control
until now.<commit_after>
from matplotlib import pyplot as plt
import numpy as np
from cobs import cobs
from cobs import cobsr
def cobsr_overhead_calc(num_bytes):
return 257./256 - (255./256)**num_bytes
def cobsr_overhead_measure(num_bytes):
# TODO: review value
NUM_TESTS = 10000
overhead = 0
for _i in xrange(NUM_TESTS):
output = cobsr.encode(np.random.bytes(num_bytes))
overhead += (len(output) - num_bytes)
return overhead / float(NUM_TESTS)
def cobs_overhead_measure(num_bytes):
# TODO: review value
NUM_TESTS = 10000
overhead = 0
for _i in xrange(NUM_TESTS):
output = cobs.encode(np.random.bytes(num_bytes))
overhead += (len(output) - num_bytes)
return overhead / float(NUM_TESTS)
fig = plt.figure()
ax1 = fig.add_subplot(111)
# x-range for plot
num_bytes_list = np.arange(1, 30)
# Calculate values and plot
# Measured values for COBS
#cobs_measured_overhead = [ cobs_overhead_measure(num_bytes) for num_bytes in num_bytes_list ]
#ax1.plot(num_bytes_list, cobs_measured_overhead, 'g.')
# Measured values for COBS/R
cobsr_measured_overhead = [ cobsr_overhead_measure(num_bytes) for num_bytes in num_bytes_list ]
ax1.plot(num_bytes_list, cobsr_measured_overhead, 'r.')
# Calculated values for COBS/R
cobsr_calc_overhead = [ cobsr_overhead_calc(num_bytes) for num_bytes in num_bytes_list ]
ax1.plot(num_bytes_list, cobsr_calc_overhead, 'b.')
ax1.set_xlabel('message length (bytes)')
ax1.set_xlim(min(num_bytes_list), max(num_bytes_list))
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('encoding overhead (bytes)')
if 0:
ax1.set_ylabel('encoding overhead (bytes)', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
plt.show()
|
|
3ca17ed50d1124d6590d7f4c172eeefbfaeb49dc
|
strassengezwitscher/strassengezwitscher/tests.py
|
strassengezwitscher/strassengezwitscher/tests.py
|
# pylint: disable=invalid-name,too-many-public-methods
from django.core.urlresolvers import reverse
from django.test import TestCase
class StrassengezwitscherTests(TestCase):
def test_serves_angular_tag(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertIn(b'<sg-app>', response.content)
self.assertIn(b'</sg-app>', response.content)
|
Test serving of frontend page.
|
Test serving of frontend page.
|
Python
|
mit
|
Strassengezwitscher/Strassengezwitscher,Strassengezwitscher/Strassengezwitscher,Strassengezwitscher/Strassengezwitscher,Strassengezwitscher/Strassengezwitscher,Strassengezwitscher/Strassengezwitscher
|
Test serving of frontend page.
|
# pylint: disable=invalid-name,too-many-public-methods
from django.core.urlresolvers import reverse
from django.test import TestCase
class StrassengezwitscherTests(TestCase):
def test_serves_angular_tag(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertIn(b'<sg-app>', response.content)
self.assertIn(b'</sg-app>', response.content)
|
<commit_before><commit_msg>Test serving of frontend page.<commit_after>
|
# pylint: disable=invalid-name,too-many-public-methods
from django.core.urlresolvers import reverse
from django.test import TestCase
class StrassengezwitscherTests(TestCase):
def test_serves_angular_tag(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertIn(b'<sg-app>', response.content)
self.assertIn(b'</sg-app>', response.content)
|
Test serving of frontend page.# pylint: disable=invalid-name,too-many-public-methods
from django.core.urlresolvers import reverse
from django.test import TestCase
class StrassengezwitscherTests(TestCase):
def test_serves_angular_tag(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertIn(b'<sg-app>', response.content)
self.assertIn(b'</sg-app>', response.content)
|
<commit_before><commit_msg>Test serving of frontend page.<commit_after># pylint: disable=invalid-name,too-many-public-methods
from django.core.urlresolvers import reverse
from django.test import TestCase
class StrassengezwitscherTests(TestCase):
def test_serves_angular_tag(self):
response = self.client.get(reverse('index'))
self.assertEqual(response.status_code, 200)
self.assertIn(b'<sg-app>', response.content)
self.assertIn(b'</sg-app>', response.content)
|
|
3aa25f590fcd1a59a7e5f9edde51e19cd0818d95
|
app/data.py
|
app/data.py
|
import json
import os
from typing import Dict
from app.util import cached_function
class Projects():
def __init__(self) -> None:
self.data: Dict[str, Dict[str, Dict[str, str]]] = {}
@staticmethod
def load() -> 'Projects':
current_directory = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(current_directory, 'data', 'projects.json')
with open(path, 'r') as handle:
project_data = handle.read()
projects = Projects()
projects.data = json.loads(project_data)
return projects
@cached_function
def get_projects() -> Projects:
loaded_projects = Projects.load()
return loaded_projects
|
Add ability to load projects
|
Add ability to load projects
|
Python
|
mit
|
albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com
|
Add ability to load projects
|
import json
import os
from typing import Dict
from app.util import cached_function
class Projects():
def __init__(self) -> None:
self.data: Dict[str, Dict[str, Dict[str, str]]] = {}
@staticmethod
def load() -> 'Projects':
current_directory = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(current_directory, 'data', 'projects.json')
with open(path, 'r') as handle:
project_data = handle.read()
projects = Projects()
projects.data = json.loads(project_data)
return projects
@cached_function
def get_projects() -> Projects:
loaded_projects = Projects.load()
return loaded_projects
|
<commit_before><commit_msg>Add ability to load projects<commit_after>
|
import json
import os
from typing import Dict
from app.util import cached_function
class Projects():
def __init__(self) -> None:
self.data: Dict[str, Dict[str, Dict[str, str]]] = {}
@staticmethod
def load() -> 'Projects':
current_directory = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(current_directory, 'data', 'projects.json')
with open(path, 'r') as handle:
project_data = handle.read()
projects = Projects()
projects.data = json.loads(project_data)
return projects
@cached_function
def get_projects() -> Projects:
loaded_projects = Projects.load()
return loaded_projects
|
Add ability to load projectsimport json
import os
from typing import Dict
from app.util import cached_function
class Projects():
def __init__(self) -> None:
self.data: Dict[str, Dict[str, Dict[str, str]]] = {}
@staticmethod
def load() -> 'Projects':
current_directory = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(current_directory, 'data', 'projects.json')
with open(path, 'r') as handle:
project_data = handle.read()
projects = Projects()
projects.data = json.loads(project_data)
return projects
@cached_function
def get_projects() -> Projects:
loaded_projects = Projects.load()
return loaded_projects
|
<commit_before><commit_msg>Add ability to load projects<commit_after>import json
import os
from typing import Dict
from app.util import cached_function
class Projects():
def __init__(self) -> None:
self.data: Dict[str, Dict[str, Dict[str, str]]] = {}
@staticmethod
def load() -> 'Projects':
current_directory = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(current_directory, 'data', 'projects.json')
with open(path, 'r') as handle:
project_data = handle.read()
projects = Projects()
projects.data = json.loads(project_data)
return projects
@cached_function
def get_projects() -> Projects:
loaded_projects = Projects.load()
return loaded_projects
|
|
411f6dfb62f5aa6c91d35aeb6acb0d7246961849
|
examples/petstore/psflask.py
|
examples/petstore/psflask.py
|
import pickle
from flask import Flask, abort
app = Flask(__name__)
import petstore_impl
import petstore_server
store = petstore_impl.PetStore()
server = petstore_server.PetStore_server(store)
@app.route("/" + petstore_server.service_name + "/<args>")
def run_service(args):
try:
command, args, kwargs = pickle.loads(args.decode("base64"))
except Exception:
abort(400)
try:
method = getattr(server, command)
except AttributeError:
abort(404)
try:
output = True, method(*args, **kwargs)
except Exception as exc:
output = False, exc
return pickle.dumps(output)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
Add Flask equivalent for psserver and sample_rpc
|
Add Flask equivalent for psserver and sample_rpc
|
Python
|
apache-2.0
|
datawire/adaptive
|
Add Flask equivalent for psserver and sample_rpc
|
import pickle
from flask import Flask, abort
app = Flask(__name__)
import petstore_impl
import petstore_server
store = petstore_impl.PetStore()
server = petstore_server.PetStore_server(store)
@app.route("/" + petstore_server.service_name + "/<args>")
def run_service(args):
try:
command, args, kwargs = pickle.loads(args.decode("base64"))
except Exception:
abort(400)
try:
method = getattr(server, command)
except AttributeError:
abort(404)
try:
output = True, method(*args, **kwargs)
except Exception as exc:
output = False, exc
return pickle.dumps(output)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
<commit_before><commit_msg>Add Flask equivalent for psserver and sample_rpc<commit_after>
|
import pickle
from flask import Flask, abort
app = Flask(__name__)
import petstore_impl
import petstore_server
store = petstore_impl.PetStore()
server = petstore_server.PetStore_server(store)
@app.route("/" + petstore_server.service_name + "/<args>")
def run_service(args):
try:
command, args, kwargs = pickle.loads(args.decode("base64"))
except Exception:
abort(400)
try:
method = getattr(server, command)
except AttributeError:
abort(404)
try:
output = True, method(*args, **kwargs)
except Exception as exc:
output = False, exc
return pickle.dumps(output)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
Add Flask equivalent for psserver and sample_rpcimport pickle
from flask import Flask, abort
app = Flask(__name__)
import petstore_impl
import petstore_server
store = petstore_impl.PetStore()
server = petstore_server.PetStore_server(store)
@app.route("/" + petstore_server.service_name + "/<args>")
def run_service(args):
try:
command, args, kwargs = pickle.loads(args.decode("base64"))
except Exception:
abort(400)
try:
method = getattr(server, command)
except AttributeError:
abort(404)
try:
output = True, method(*args, **kwargs)
except Exception as exc:
output = False, exc
return pickle.dumps(output)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
<commit_before><commit_msg>Add Flask equivalent for psserver and sample_rpc<commit_after>import pickle
from flask import Flask, abort
app = Flask(__name__)
import petstore_impl
import petstore_server
store = petstore_impl.PetStore()
server = petstore_server.PetStore_server(store)
@app.route("/" + petstore_server.service_name + "/<args>")
def run_service(args):
try:
command, args, kwargs = pickle.loads(args.decode("base64"))
except Exception:
abort(400)
try:
method = getattr(server, command)
except AttributeError:
abort(404)
try:
output = True, method(*args, **kwargs)
except Exception as exc:
output = False, exc
return pickle.dumps(output)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
|
f2871ab4a98669b59e14f6748b47cce6327bf824
|
examples/recurrent-memory.py
|
examples/recurrent-memory.py
|
#!/usr/bin/env python
import climate
import logging
import matplotlib.pyplot as plt
import numpy.random as rng
import theanets
climate.enable_default_logging()
TIME = 10
BATCH_SIZE = 32
e = theanets.Experiment(
theanets.recurrent.Autoencoder,
layers=(3, 100, 3),
recurrent_error_start=TIME - 1,
batch_size=BATCH_SIZE)
def generate():
r = rng.randn(TIME, BATCH_SIZE, 3).astype('f')
r[-1] = r[0]
return [r]
batch = generate()
logging.info('data batches: %s', batch[0].shape)
e.train(generate)
target = batch[0][-1]
predict = e.network.predict(batch[0])[-1]
vm = max(abs(target).max(), abs(predict).max())
ax = plt.subplot(211)
ax.set_frame_on(False)
for loc, spine in ax.spines.items():
spine.set_color('none')
ax.imshow(target.T, cmap='gray', vmin=-vm, vmax=vm)
ax.set_xticks([])
ax.set_yticks([])
ax = plt.subplot(212)
ax.set_frame_on(False)
for loc, spine in ax.spines.items():
spine.set_color('none')
ax.imshow(predict.T, cmap='gray', vmin=-vm, vmax=vm)
ax.set_yticks([])
plt.show()
|
Add example for "memory" recurrent task.
|
Add example for "memory" recurrent task.
|
Python
|
mit
|
lmjohns3/theanets,devdoer/theanets,chrinide/theanets
|
Add example for "memory" recurrent task.
|
#!/usr/bin/env python
import climate
import logging
import matplotlib.pyplot as plt
import numpy.random as rng
import theanets
climate.enable_default_logging()
TIME = 10
BATCH_SIZE = 32
e = theanets.Experiment(
theanets.recurrent.Autoencoder,
layers=(3, 100, 3),
recurrent_error_start=TIME - 1,
batch_size=BATCH_SIZE)
def generate():
r = rng.randn(TIME, BATCH_SIZE, 3).astype('f')
r[-1] = r[0]
return [r]
batch = generate()
logging.info('data batches: %s', batch[0].shape)
e.train(generate)
target = batch[0][-1]
predict = e.network.predict(batch[0])[-1]
vm = max(abs(target).max(), abs(predict).max())
ax = plt.subplot(211)
ax.set_frame_on(False)
for loc, spine in ax.spines.items():
spine.set_color('none')
ax.imshow(target.T, cmap='gray', vmin=-vm, vmax=vm)
ax.set_xticks([])
ax.set_yticks([])
ax = plt.subplot(212)
ax.set_frame_on(False)
for loc, spine in ax.spines.items():
spine.set_color('none')
ax.imshow(predict.T, cmap='gray', vmin=-vm, vmax=vm)
ax.set_yticks([])
plt.show()
|
<commit_before><commit_msg>Add example for "memory" recurrent task.<commit_after>
|
#!/usr/bin/env python
import climate
import logging
import matplotlib.pyplot as plt
import numpy.random as rng
import theanets
climate.enable_default_logging()
TIME = 10
BATCH_SIZE = 32
e = theanets.Experiment(
theanets.recurrent.Autoencoder,
layers=(3, 100, 3),
recurrent_error_start=TIME - 1,
batch_size=BATCH_SIZE)
def generate():
r = rng.randn(TIME, BATCH_SIZE, 3).astype('f')
r[-1] = r[0]
return [r]
batch = generate()
logging.info('data batches: %s', batch[0].shape)
e.train(generate)
target = batch[0][-1]
predict = e.network.predict(batch[0])[-1]
vm = max(abs(target).max(), abs(predict).max())
ax = plt.subplot(211)
ax.set_frame_on(False)
for loc, spine in ax.spines.items():
spine.set_color('none')
ax.imshow(target.T, cmap='gray', vmin=-vm, vmax=vm)
ax.set_xticks([])
ax.set_yticks([])
ax = plt.subplot(212)
ax.set_frame_on(False)
for loc, spine in ax.spines.items():
spine.set_color('none')
ax.imshow(predict.T, cmap='gray', vmin=-vm, vmax=vm)
ax.set_yticks([])
plt.show()
|
Add example for "memory" recurrent task.#!/usr/bin/env python
import climate
import logging
import matplotlib.pyplot as plt
import numpy.random as rng
import theanets
climate.enable_default_logging()
TIME = 10
BATCH_SIZE = 32
e = theanets.Experiment(
theanets.recurrent.Autoencoder,
layers=(3, 100, 3),
recurrent_error_start=TIME - 1,
batch_size=BATCH_SIZE)
def generate():
r = rng.randn(TIME, BATCH_SIZE, 3).astype('f')
r[-1] = r[0]
return [r]
batch = generate()
logging.info('data batches: %s', batch[0].shape)
e.train(generate)
target = batch[0][-1]
predict = e.network.predict(batch[0])[-1]
vm = max(abs(target).max(), abs(predict).max())
ax = plt.subplot(211)
ax.set_frame_on(False)
for loc, spine in ax.spines.items():
spine.set_color('none')
ax.imshow(target.T, cmap='gray', vmin=-vm, vmax=vm)
ax.set_xticks([])
ax.set_yticks([])
ax = plt.subplot(212)
ax.set_frame_on(False)
for loc, spine in ax.spines.items():
spine.set_color('none')
ax.imshow(predict.T, cmap='gray', vmin=-vm, vmax=vm)
ax.set_yticks([])
plt.show()
|
<commit_before><commit_msg>Add example for "memory" recurrent task.<commit_after>#!/usr/bin/env python
import climate
import logging
import matplotlib.pyplot as plt
import numpy.random as rng
import theanets
climate.enable_default_logging()
TIME = 10
BATCH_SIZE = 32
e = theanets.Experiment(
theanets.recurrent.Autoencoder,
layers=(3, 100, 3),
recurrent_error_start=TIME - 1,
batch_size=BATCH_SIZE)
def generate():
r = rng.randn(TIME, BATCH_SIZE, 3).astype('f')
r[-1] = r[0]
return [r]
batch = generate()
logging.info('data batches: %s', batch[0].shape)
e.train(generate)
target = batch[0][-1]
predict = e.network.predict(batch[0])[-1]
vm = max(abs(target).max(), abs(predict).max())
ax = plt.subplot(211)
ax.set_frame_on(False)
for loc, spine in ax.spines.items():
spine.set_color('none')
ax.imshow(target.T, cmap='gray', vmin=-vm, vmax=vm)
ax.set_xticks([])
ax.set_yticks([])
ax = plt.subplot(212)
ax.set_frame_on(False)
for loc, spine in ax.spines.items():
spine.set_color('none')
ax.imshow(predict.T, cmap='gray', vmin=-vm, vmax=vm)
ax.set_yticks([])
plt.show()
|
|
a888a6456da32fc35b94015f0f9de8372ad960a7
|
examples/example_baselines.py
|
examples/example_baselines.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from teamscale_client import TeamscaleClient
from teamscale_client.constants import AssessmentMetricColors
from teamscale_client.data import Baseline
TEAMSCALE_URL = "http://localhost:8080"
USERNAME = "admin"
PASSWORD = "admin"
PROJECT_NAME = "test"
if __name__ == '__main__':
client = TeamscaleClient(TEAMSCALE_URL, USERNAME, PASSWORD, PROJECT_NAME)
baseline = Baseline("Test Baseline", "This is a test description", datetime.datetime.now())
client.add_baseline(baseline)
baselines = client.get_baselines()
print([str(baseline) for baseline in baselines])
|
Add example for adding and listing baselines
|
Add example for adding and listing baselines
|
Python
|
apache-2.0
|
cqse/teamscale-client-python
|
Add example for adding and listing baselines
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from teamscale_client import TeamscaleClient
from teamscale_client.constants import AssessmentMetricColors
from teamscale_client.data import Baseline
TEAMSCALE_URL = "http://localhost:8080"
USERNAME = "admin"
PASSWORD = "admin"
PROJECT_NAME = "test"
if __name__ == '__main__':
client = TeamscaleClient(TEAMSCALE_URL, USERNAME, PASSWORD, PROJECT_NAME)
baseline = Baseline("Test Baseline", "This is a test description", datetime.datetime.now())
client.add_baseline(baseline)
baselines = client.get_baselines()
print([str(baseline) for baseline in baselines])
|
<commit_before><commit_msg>Add example for adding and listing baselines<commit_after>
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from teamscale_client import TeamscaleClient
from teamscale_client.constants import AssessmentMetricColors
from teamscale_client.data import Baseline
TEAMSCALE_URL = "http://localhost:8080"
USERNAME = "admin"
PASSWORD = "admin"
PROJECT_NAME = "test"
if __name__ == '__main__':
client = TeamscaleClient(TEAMSCALE_URL, USERNAME, PASSWORD, PROJECT_NAME)
baseline = Baseline("Test Baseline", "This is a test description", datetime.datetime.now())
client.add_baseline(baseline)
baselines = client.get_baselines()
print([str(baseline) for baseline in baselines])
|
Add example for adding and listing baselinesfrom __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from teamscale_client import TeamscaleClient
from teamscale_client.constants import AssessmentMetricColors
from teamscale_client.data import Baseline
TEAMSCALE_URL = "http://localhost:8080"
USERNAME = "admin"
PASSWORD = "admin"
PROJECT_NAME = "test"
if __name__ == '__main__':
client = TeamscaleClient(TEAMSCALE_URL, USERNAME, PASSWORD, PROJECT_NAME)
baseline = Baseline("Test Baseline", "This is a test description", datetime.datetime.now())
client.add_baseline(baseline)
baselines = client.get_baselines()
print([str(baseline) for baseline in baselines])
|
<commit_before><commit_msg>Add example for adding and listing baselines<commit_after>from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from teamscale_client import TeamscaleClient
from teamscale_client.constants import AssessmentMetricColors
from teamscale_client.data import Baseline
TEAMSCALE_URL = "http://localhost:8080"
USERNAME = "admin"
PASSWORD = "admin"
PROJECT_NAME = "test"
if __name__ == '__main__':
client = TeamscaleClient(TEAMSCALE_URL, USERNAME, PASSWORD, PROJECT_NAME)
baseline = Baseline("Test Baseline", "This is a test description", datetime.datetime.now())
client.add_baseline(baseline)
baselines = client.get_baselines()
print([str(baseline) for baseline in baselines])
|
|
1ed78df61ebe32f6d1edac490e5d07e8c0558451
|
flake8/tests/test_reporter.py
|
flake8/tests/test_reporter.py
|
from __future__ import with_statement
import errno
import unittest
try:
from unittest import mock
except ImportError:
import mock # < PY33
from flake8 import reporter
def ioerror_report_factory(errno_code):
class IOErrorBaseQReport(reporter.BaseQReport):
def _process_main(self):
raise IOError(errno_code, 'Fake bad pipe exception')
options = mock.MagicMock()
options.jobs = 2
return IOErrorBaseQReport(options)
class TestBaseQReport(unittest.TestCase):
def test_does_not_raise_a_bad_pipe_ioerror(self):
"""Test that no EPIPE IOError exception is re-raised or leaked."""
report = ioerror_report_factory(errno.EPIPE)
try:
report.process_main()
except IOError:
self.fail('BaseQReport.process_main raised an IOError for EPIPE'
' but it should have caught this exception.')
def test_raises_a_enoent_ioerror(self):
"""Test that an ENOENT IOError exception is re-raised."""
report = ioerror_report_factory(errno.ENOENT)
self.assertRaises(IOError, report.process_main)
|
Add a regression test for EPIPE IOErrors
|
Add a regression test for EPIPE IOErrors
This should prevent bug 69 from regressing in the future and provides a
framework for testing the addition of new errnos to the ingore list.
|
Python
|
mit
|
wdv4758h/flake8
|
Add a regression test for EPIPE IOErrors
This should prevent bug 69 from regressing in the future and provides a
framework for testing the addition of new errnos to the ingore list.
|
from __future__ import with_statement
import errno
import unittest
try:
from unittest import mock
except ImportError:
import mock # < PY33
from flake8 import reporter
def ioerror_report_factory(errno_code):
class IOErrorBaseQReport(reporter.BaseQReport):
def _process_main(self):
raise IOError(errno_code, 'Fake bad pipe exception')
options = mock.MagicMock()
options.jobs = 2
return IOErrorBaseQReport(options)
class TestBaseQReport(unittest.TestCase):
def test_does_not_raise_a_bad_pipe_ioerror(self):
"""Test that no EPIPE IOError exception is re-raised or leaked."""
report = ioerror_report_factory(errno.EPIPE)
try:
report.process_main()
except IOError:
self.fail('BaseQReport.process_main raised an IOError for EPIPE'
' but it should have caught this exception.')
def test_raises_a_enoent_ioerror(self):
"""Test that an ENOENT IOError exception is re-raised."""
report = ioerror_report_factory(errno.ENOENT)
self.assertRaises(IOError, report.process_main)
|
<commit_before><commit_msg>Add a regression test for EPIPE IOErrors
This should prevent bug 69 from regressing in the future and provides a
framework for testing the addition of new errnos to the ingore list.<commit_after>
|
from __future__ import with_statement
import errno
import unittest
try:
from unittest import mock
except ImportError:
import mock # < PY33
from flake8 import reporter
def ioerror_report_factory(errno_code):
class IOErrorBaseQReport(reporter.BaseQReport):
def _process_main(self):
raise IOError(errno_code, 'Fake bad pipe exception')
options = mock.MagicMock()
options.jobs = 2
return IOErrorBaseQReport(options)
class TestBaseQReport(unittest.TestCase):
def test_does_not_raise_a_bad_pipe_ioerror(self):
"""Test that no EPIPE IOError exception is re-raised or leaked."""
report = ioerror_report_factory(errno.EPIPE)
try:
report.process_main()
except IOError:
self.fail('BaseQReport.process_main raised an IOError for EPIPE'
' but it should have caught this exception.')
def test_raises_a_enoent_ioerror(self):
"""Test that an ENOENT IOError exception is re-raised."""
report = ioerror_report_factory(errno.ENOENT)
self.assertRaises(IOError, report.process_main)
|
Add a regression test for EPIPE IOErrors
This should prevent bug 69 from regressing in the future and provides a
framework for testing the addition of new errnos to the ingore list.from __future__ import with_statement
import errno
import unittest
try:
from unittest import mock
except ImportError:
import mock # < PY33
from flake8 import reporter
def ioerror_report_factory(errno_code):
class IOErrorBaseQReport(reporter.BaseQReport):
def _process_main(self):
raise IOError(errno_code, 'Fake bad pipe exception')
options = mock.MagicMock()
options.jobs = 2
return IOErrorBaseQReport(options)
class TestBaseQReport(unittest.TestCase):
def test_does_not_raise_a_bad_pipe_ioerror(self):
"""Test that no EPIPE IOError exception is re-raised or leaked."""
report = ioerror_report_factory(errno.EPIPE)
try:
report.process_main()
except IOError:
self.fail('BaseQReport.process_main raised an IOError for EPIPE'
' but it should have caught this exception.')
def test_raises_a_enoent_ioerror(self):
"""Test that an ENOENT IOError exception is re-raised."""
report = ioerror_report_factory(errno.ENOENT)
self.assertRaises(IOError, report.process_main)
|
<commit_before><commit_msg>Add a regression test for EPIPE IOErrors
This should prevent bug 69 from regressing in the future and provides a
framework for testing the addition of new errnos to the ingore list.<commit_after>from __future__ import with_statement
import errno
import unittest
try:
from unittest import mock
except ImportError:
import mock # < PY33
from flake8 import reporter
def ioerror_report_factory(errno_code):
class IOErrorBaseQReport(reporter.BaseQReport):
def _process_main(self):
raise IOError(errno_code, 'Fake bad pipe exception')
options = mock.MagicMock()
options.jobs = 2
return IOErrorBaseQReport(options)
class TestBaseQReport(unittest.TestCase):
def test_does_not_raise_a_bad_pipe_ioerror(self):
"""Test that no EPIPE IOError exception is re-raised or leaked."""
report = ioerror_report_factory(errno.EPIPE)
try:
report.process_main()
except IOError:
self.fail('BaseQReport.process_main raised an IOError for EPIPE'
' but it should have caught this exception.')
def test_raises_a_enoent_ioerror(self):
"""Test that an ENOENT IOError exception is re-raised."""
report = ioerror_report_factory(errno.ENOENT)
self.assertRaises(IOError, report.process_main)
|
|
f1abb5dbfb6fb5f13e574565954d147810df5aa0
|
plugins/qrcode.py
|
plugins/qrcode.py
|
# -*- encoding:utf8 -*-
import csv
import cStringIO
import settings
from flask import url_for
from model.googledrive import GoogleDrive
from plugins.base.responsebase import IResponseBase
class QRCode(IResponseBase):
def hear_regex(self, **kwargs):
return "^(QRコード|QRCode)$"
def response(self, **kwargs):
drive_kwargs = {
'document_id': settings.QRCODE_DOCUMENT,
'export_type': 'text/csv'
}
content = GoogleDrive().retrieve_content(**drive_kwargs)
f = cStringIO.StringIO(content)
reader = csv.reader(f, delimiter=',')
next(reader)
attachments = []
for row in reader:
try:
ids_num = int(row[0])
description = row[1]
attachment = {
"fallback": description,
"text": description,
"image_url": url_for('qrcode.qr', id=ids_num, _external=True),
"color": "#6698C8"
}
attachments.append(attachment)
except:
continue
return {
"text": "QRコードです n_n",
"attachments": attachments
}
|
Add new response to show QRcode which original data is placed in GoogleDrive.
|
Add new response to show QRcode which original data is placed in GoogleDrive.
|
Python
|
mit
|
supistar/Botnyan
|
Add new response to show QRcode which original data is placed in GoogleDrive.
|
# -*- encoding:utf8 -*-
import csv
import cStringIO
import settings
from flask import url_for
from model.googledrive import GoogleDrive
from plugins.base.responsebase import IResponseBase
class QRCode(IResponseBase):
def hear_regex(self, **kwargs):
return "^(QRコード|QRCode)$"
def response(self, **kwargs):
drive_kwargs = {
'document_id': settings.QRCODE_DOCUMENT,
'export_type': 'text/csv'
}
content = GoogleDrive().retrieve_content(**drive_kwargs)
f = cStringIO.StringIO(content)
reader = csv.reader(f, delimiter=',')
next(reader)
attachments = []
for row in reader:
try:
ids_num = int(row[0])
description = row[1]
attachment = {
"fallback": description,
"text": description,
"image_url": url_for('qrcode.qr', id=ids_num, _external=True),
"color": "#6698C8"
}
attachments.append(attachment)
except:
continue
return {
"text": "QRコードです n_n",
"attachments": attachments
}
|
<commit_before><commit_msg>Add new response to show QRcode which original data is placed in GoogleDrive.<commit_after>
|
# -*- encoding:utf8 -*-
import csv
import cStringIO
import settings
from flask import url_for
from model.googledrive import GoogleDrive
from plugins.base.responsebase import IResponseBase
class QRCode(IResponseBase):
def hear_regex(self, **kwargs):
return "^(QRコード|QRCode)$"
def response(self, **kwargs):
drive_kwargs = {
'document_id': settings.QRCODE_DOCUMENT,
'export_type': 'text/csv'
}
content = GoogleDrive().retrieve_content(**drive_kwargs)
f = cStringIO.StringIO(content)
reader = csv.reader(f, delimiter=',')
next(reader)
attachments = []
for row in reader:
try:
ids_num = int(row[0])
description = row[1]
attachment = {
"fallback": description,
"text": description,
"image_url": url_for('qrcode.qr', id=ids_num, _external=True),
"color": "#6698C8"
}
attachments.append(attachment)
except:
continue
return {
"text": "QRコードです n_n",
"attachments": attachments
}
|
Add new response to show QRcode which original data is placed in GoogleDrive.# -*- encoding:utf8 -*-
import csv
import cStringIO
import settings
from flask import url_for
from model.googledrive import GoogleDrive
from plugins.base.responsebase import IResponseBase
class QRCode(IResponseBase):
def hear_regex(self, **kwargs):
return "^(QRコード|QRCode)$"
def response(self, **kwargs):
drive_kwargs = {
'document_id': settings.QRCODE_DOCUMENT,
'export_type': 'text/csv'
}
content = GoogleDrive().retrieve_content(**drive_kwargs)
f = cStringIO.StringIO(content)
reader = csv.reader(f, delimiter=',')
next(reader)
attachments = []
for row in reader:
try:
ids_num = int(row[0])
description = row[1]
attachment = {
"fallback": description,
"text": description,
"image_url": url_for('qrcode.qr', id=ids_num, _external=True),
"color": "#6698C8"
}
attachments.append(attachment)
except:
continue
return {
"text": "QRコードです n_n",
"attachments": attachments
}
|
<commit_before><commit_msg>Add new response to show QRcode which original data is placed in GoogleDrive.<commit_after># -*- encoding:utf8 -*-
import csv
import cStringIO
import settings
from flask import url_for
from model.googledrive import GoogleDrive
from plugins.base.responsebase import IResponseBase
class QRCode(IResponseBase):
def hear_regex(self, **kwargs):
return "^(QRコード|QRCode)$"
def response(self, **kwargs):
drive_kwargs = {
'document_id': settings.QRCODE_DOCUMENT,
'export_type': 'text/csv'
}
content = GoogleDrive().retrieve_content(**drive_kwargs)
f = cStringIO.StringIO(content)
reader = csv.reader(f, delimiter=',')
next(reader)
attachments = []
for row in reader:
try:
ids_num = int(row[0])
description = row[1]
attachment = {
"fallback": description,
"text": description,
"image_url": url_for('qrcode.qr', id=ids_num, _external=True),
"color": "#6698C8"
}
attachments.append(attachment)
except:
continue
return {
"text": "QRコードです n_n",
"attachments": attachments
}
|
|
6d70b86b89d682d6bd83fcfc96aef6a582d00101
|
readthedocs/rtd_tests/tests/test_core_management.py
|
readthedocs/rtd_tests/tests/test_core_management.py
|
from StringIO import StringIO
from django.test import TestCase
from mock import patch
from core.management.commands import run_docker
from projects.models import Project
from builds.models import Version
class TestRunDocker(TestCase):
'''Test run_docker command with good input and output'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
def _get_input(self, files=None):
return ('{"project": {"id": 6, "name": "Pip", "slug": "pip"},'
'"id": 71, "type": "tag", "identifier": "437fb316fbbdba1acdd22e07dbe7c4809ffd97e6",'
'"verbose_name": "stable", "slug": "stable"}')
def _docker_build(data):
if isinstance(data, Version):
return {'html': (0, 'DOCKER PASS', '')}
else:
return {'html': (1, '', 'DOCKER FAIL')}
def test_stdin(self):
'''Test docker build command'''
def _input(_, files=None):
return '{"test": "foobar"}'
with patch.object(run_docker.Command, '_get_input', _input):
cmd = run_docker.Command()
assert cmd._get_input() == '{"test": "foobar"}'
@patch.object(run_docker.Command, '_get_input', _get_input)
@patch('projects.tasks.docker_build', _docker_build)
@patch('sys.stdout', new_callable=StringIO)
def test_good_input(self, mock_output):
'''Test docker build command'''
cmd = run_docker.Command()
self.assertEqual(cmd._get_input(), self._get_input())
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
'{"html": [0, "DOCKER PASS", ""]}\n'
)
@patch('projects.tasks.docker_build', _docker_build)
def test_bad_input(self):
'''Test docker build command'''
with patch.object(run_docker.Command, '_get_input') as mock_input:
with patch('sys.stdout', new_callable=StringIO) as mock_output:
mock_input.return_value = 'BAD JSON'
cmd = run_docker.Command()
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
('{"doc_builder": '
'[-1, "", "ValueError: No JSON object could be decoded"]}'
'\n')
)
|
Add tests for run docker mgmt command
|
Add tests for run docker mgmt command
|
Python
|
mit
|
asampat3090/readthedocs.org,atsuyim/readthedocs.org,hach-que/readthedocs.org,CedarLogic/readthedocs.org,davidfischer/readthedocs.org,wijerasa/readthedocs.org,agjohnson/readthedocs.org,cgourlay/readthedocs.org,LukasBoersma/readthedocs.org,tddv/readthedocs.org,d0ugal/readthedocs.org,raven47git/readthedocs.org,emawind84/readthedocs.org,wanghaven/readthedocs.org,sid-kap/readthedocs.org,rtfd/readthedocs.org,atsuyim/readthedocs.org,fujita-shintaro/readthedocs.org,clarkperkins/readthedocs.org,wanghaven/readthedocs.org,Tazer/readthedocs.org,singingwolfboy/readthedocs.org,d0ugal/readthedocs.org,clarkperkins/readthedocs.org,kenshinthebattosai/readthedocs.org,wijerasa/readthedocs.org,dirn/readthedocs.org,wanghaven/readthedocs.org,attakei/readthedocs-oauth,Tazer/readthedocs.org,VishvajitP/readthedocs.org,michaelmcandrew/readthedocs.org,pombredanne/readthedocs.org,kenshinthebattosai/readthedocs.org,emawind84/readthedocs.org,Carreau/readthedocs.org,cgourlay/readthedocs.org,KamranMackey/readthedocs.org,KamranMackey/readthedocs.org,raven47git/readthedocs.org,CedarLogic/readthedocs.org,singingwolfboy/readthedocs.org,tddv/readthedocs.org,emawind84/readthedocs.org,nikolas/readthedocs.org,sunnyzwh/readthedocs.org,Carreau/readthedocs.org,SteveViss/readthedocs.org,CedarLogic/readthedocs.org,KamranMackey/readthedocs.org,d0ugal/readthedocs.org,titiushko/readthedocs.org,asampat3090/readthedocs.org,sid-kap/readthedocs.org,techtonik/readthedocs.org,LukasBoersma/readthedocs.org,espdev/readthedocs.org,rtfd/readthedocs.org,gjtorikian/readthedocs.org,hach-que/readthedocs.org,mhils/readthedocs.org,michaelmcandrew/readthedocs.org,titiushko/readthedocs.org,dirn/readthedocs.org,techtonik/readthedocs.org,nikolas/readthedocs.org,takluyver/readthedocs.org,davidfischer/readthedocs.org,sils1297/readthedocs.org,safwanrahman/readthedocs.org,safwanrahman/readthedocs.org,emawind84/readthedocs.org,atsuyim/readthedocs.org,Tazer/readthedocs.org,soulshake/readthedocs.org,agjohnson/readthedocs.org,hach-que/readthedocs.org,VishvajitP/readthedocs.org,Tazer/readthedocs.org,laplaceliu/readthedocs.org,clarkperkins/readthedocs.org,pombredanne/readthedocs.org,gjtorikian/readthedocs.org,hach-que/readthedocs.org,GovReady/readthedocs.org,kdkeyser/readthedocs.org,istresearch/readthedocs.org,fujita-shintaro/readthedocs.org,techtonik/readthedocs.org,mhils/readthedocs.org,soulshake/readthedocs.org,KamranMackey/readthedocs.org,kenwang76/readthedocs.org,GovReady/readthedocs.org,rtfd/readthedocs.org,royalwang/readthedocs.org,espdev/readthedocs.org,singingwolfboy/readthedocs.org,agjohnson/readthedocs.org,royalwang/readthedocs.org,safwanrahman/readthedocs.org,gjtorikian/readthedocs.org,safwanrahman/readthedocs.org,mrshoki/readthedocs.org,nikolas/readthedocs.org,cgourlay/readthedocs.org,stevepiercy/readthedocs.org,fujita-shintaro/readthedocs.org,soulshake/readthedocs.org,kenwang76/readthedocs.org,michaelmcandrew/readthedocs.org,cgourlay/readthedocs.org,kenwang76/readthedocs.org,LukasBoersma/readthedocs.org,kdkeyser/readthedocs.org,titiushko/readthedocs.org,pombredanne/readthedocs.org,singingwolfboy/readthedocs.org,istresearch/readthedocs.org,mrshoki/readthedocs.org,dirn/readthedocs.org,jerel/readthedocs.org,davidfischer/readthedocs.org,rtfd/readthedocs.org,asampat3090/readthedocs.org,jerel/readthedocs.org,LukasBoersma/readthedocs.org,michaelmcandrew/readthedocs.org,attakei/readthedocs-oauth,wijerasa/readthedocs.org,d0ugal/readthedocs.org,tddv/readthedocs.org,sid-kap/readthedocs.org,istresearch/readthedocs.org,mrshoki/readthedocs.org,istresearch/readthedocs.org,kenshinthebattosai/readthedocs.org,espdev/readthedocs.org,royalwang/readthedocs.org,mrshoki/readthedocs.org,laplaceliu/readthedocs.org,attakei/readthedocs-oauth,soulshake/readthedocs.org,kenwang76/readthedocs.org,SteveViss/readthedocs.org,Carreau/readthedocs.org,jerel/readthedocs.org,Carreau/readthedocs.org,takluyver/readthedocs.org,sils1297/readthedocs.org,nikolas/readthedocs.org,takluyver/readthedocs.org,GovReady/readthedocs.org,stevepiercy/readthedocs.org,kenshinthebattosai/readthedocs.org,stevepiercy/readthedocs.org,CedarLogic/readthedocs.org,titiushko/readthedocs.org,laplaceliu/readthedocs.org,atsuyim/readthedocs.org,jerel/readthedocs.org,fujita-shintaro/readthedocs.org,attakei/readthedocs-oauth,wijerasa/readthedocs.org,sid-kap/readthedocs.org,espdev/readthedocs.org,davidfischer/readthedocs.org,VishvajitP/readthedocs.org,raven47git/readthedocs.org,mhils/readthedocs.org,wanghaven/readthedocs.org,dirn/readthedocs.org,espdev/readthedocs.org,gjtorikian/readthedocs.org,sunnyzwh/readthedocs.org,royalwang/readthedocs.org,SteveViss/readthedocs.org,sunnyzwh/readthedocs.org,asampat3090/readthedocs.org,GovReady/readthedocs.org,techtonik/readthedocs.org,kdkeyser/readthedocs.org,clarkperkins/readthedocs.org,laplaceliu/readthedocs.org,sils1297/readthedocs.org,takluyver/readthedocs.org,kdkeyser/readthedocs.org,SteveViss/readthedocs.org,mhils/readthedocs.org,sils1297/readthedocs.org,stevepiercy/readthedocs.org,sunnyzwh/readthedocs.org,agjohnson/readthedocs.org,VishvajitP/readthedocs.org,raven47git/readthedocs.org
|
Add tests for run docker mgmt command
|
from StringIO import StringIO
from django.test import TestCase
from mock import patch
from core.management.commands import run_docker
from projects.models import Project
from builds.models import Version
class TestRunDocker(TestCase):
'''Test run_docker command with good input and output'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
def _get_input(self, files=None):
return ('{"project": {"id": 6, "name": "Pip", "slug": "pip"},'
'"id": 71, "type": "tag", "identifier": "437fb316fbbdba1acdd22e07dbe7c4809ffd97e6",'
'"verbose_name": "stable", "slug": "stable"}')
def _docker_build(data):
if isinstance(data, Version):
return {'html': (0, 'DOCKER PASS', '')}
else:
return {'html': (1, '', 'DOCKER FAIL')}
def test_stdin(self):
'''Test docker build command'''
def _input(_, files=None):
return '{"test": "foobar"}'
with patch.object(run_docker.Command, '_get_input', _input):
cmd = run_docker.Command()
assert cmd._get_input() == '{"test": "foobar"}'
@patch.object(run_docker.Command, '_get_input', _get_input)
@patch('projects.tasks.docker_build', _docker_build)
@patch('sys.stdout', new_callable=StringIO)
def test_good_input(self, mock_output):
'''Test docker build command'''
cmd = run_docker.Command()
self.assertEqual(cmd._get_input(), self._get_input())
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
'{"html": [0, "DOCKER PASS", ""]}\n'
)
@patch('projects.tasks.docker_build', _docker_build)
def test_bad_input(self):
'''Test docker build command'''
with patch.object(run_docker.Command, '_get_input') as mock_input:
with patch('sys.stdout', new_callable=StringIO) as mock_output:
mock_input.return_value = 'BAD JSON'
cmd = run_docker.Command()
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
('{"doc_builder": '
'[-1, "", "ValueError: No JSON object could be decoded"]}'
'\n')
)
|
<commit_before><commit_msg>Add tests for run docker mgmt command<commit_after>
|
from StringIO import StringIO
from django.test import TestCase
from mock import patch
from core.management.commands import run_docker
from projects.models import Project
from builds.models import Version
class TestRunDocker(TestCase):
'''Test run_docker command with good input and output'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
def _get_input(self, files=None):
return ('{"project": {"id": 6, "name": "Pip", "slug": "pip"},'
'"id": 71, "type": "tag", "identifier": "437fb316fbbdba1acdd22e07dbe7c4809ffd97e6",'
'"verbose_name": "stable", "slug": "stable"}')
def _docker_build(data):
if isinstance(data, Version):
return {'html': (0, 'DOCKER PASS', '')}
else:
return {'html': (1, '', 'DOCKER FAIL')}
def test_stdin(self):
'''Test docker build command'''
def _input(_, files=None):
return '{"test": "foobar"}'
with patch.object(run_docker.Command, '_get_input', _input):
cmd = run_docker.Command()
assert cmd._get_input() == '{"test": "foobar"}'
@patch.object(run_docker.Command, '_get_input', _get_input)
@patch('projects.tasks.docker_build', _docker_build)
@patch('sys.stdout', new_callable=StringIO)
def test_good_input(self, mock_output):
'''Test docker build command'''
cmd = run_docker.Command()
self.assertEqual(cmd._get_input(), self._get_input())
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
'{"html": [0, "DOCKER PASS", ""]}\n'
)
@patch('projects.tasks.docker_build', _docker_build)
def test_bad_input(self):
'''Test docker build command'''
with patch.object(run_docker.Command, '_get_input') as mock_input:
with patch('sys.stdout', new_callable=StringIO) as mock_output:
mock_input.return_value = 'BAD JSON'
cmd = run_docker.Command()
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
('{"doc_builder": '
'[-1, "", "ValueError: No JSON object could be decoded"]}'
'\n')
)
|
Add tests for run docker mgmt commandfrom StringIO import StringIO
from django.test import TestCase
from mock import patch
from core.management.commands import run_docker
from projects.models import Project
from builds.models import Version
class TestRunDocker(TestCase):
'''Test run_docker command with good input and output'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
def _get_input(self, files=None):
return ('{"project": {"id": 6, "name": "Pip", "slug": "pip"},'
'"id": 71, "type": "tag", "identifier": "437fb316fbbdba1acdd22e07dbe7c4809ffd97e6",'
'"verbose_name": "stable", "slug": "stable"}')
def _docker_build(data):
if isinstance(data, Version):
return {'html': (0, 'DOCKER PASS', '')}
else:
return {'html': (1, '', 'DOCKER FAIL')}
def test_stdin(self):
'''Test docker build command'''
def _input(_, files=None):
return '{"test": "foobar"}'
with patch.object(run_docker.Command, '_get_input', _input):
cmd = run_docker.Command()
assert cmd._get_input() == '{"test": "foobar"}'
@patch.object(run_docker.Command, '_get_input', _get_input)
@patch('projects.tasks.docker_build', _docker_build)
@patch('sys.stdout', new_callable=StringIO)
def test_good_input(self, mock_output):
'''Test docker build command'''
cmd = run_docker.Command()
self.assertEqual(cmd._get_input(), self._get_input())
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
'{"html": [0, "DOCKER PASS", ""]}\n'
)
@patch('projects.tasks.docker_build', _docker_build)
def test_bad_input(self):
'''Test docker build command'''
with patch.object(run_docker.Command, '_get_input') as mock_input:
with patch('sys.stdout', new_callable=StringIO) as mock_output:
mock_input.return_value = 'BAD JSON'
cmd = run_docker.Command()
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
('{"doc_builder": '
'[-1, "", "ValueError: No JSON object could be decoded"]}'
'\n')
)
|
<commit_before><commit_msg>Add tests for run docker mgmt command<commit_after>from StringIO import StringIO
from django.test import TestCase
from mock import patch
from core.management.commands import run_docker
from projects.models import Project
from builds.models import Version
class TestRunDocker(TestCase):
'''Test run_docker command with good input and output'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
def _get_input(self, files=None):
return ('{"project": {"id": 6, "name": "Pip", "slug": "pip"},'
'"id": 71, "type": "tag", "identifier": "437fb316fbbdba1acdd22e07dbe7c4809ffd97e6",'
'"verbose_name": "stable", "slug": "stable"}')
def _docker_build(data):
if isinstance(data, Version):
return {'html': (0, 'DOCKER PASS', '')}
else:
return {'html': (1, '', 'DOCKER FAIL')}
def test_stdin(self):
'''Test docker build command'''
def _input(_, files=None):
return '{"test": "foobar"}'
with patch.object(run_docker.Command, '_get_input', _input):
cmd = run_docker.Command()
assert cmd._get_input() == '{"test": "foobar"}'
@patch.object(run_docker.Command, '_get_input', _get_input)
@patch('projects.tasks.docker_build', _docker_build)
@patch('sys.stdout', new_callable=StringIO)
def test_good_input(self, mock_output):
'''Test docker build command'''
cmd = run_docker.Command()
self.assertEqual(cmd._get_input(), self._get_input())
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
'{"html": [0, "DOCKER PASS", ""]}\n'
)
@patch('projects.tasks.docker_build', _docker_build)
def test_bad_input(self):
'''Test docker build command'''
with patch.object(run_docker.Command, '_get_input') as mock_input:
with patch('sys.stdout', new_callable=StringIO) as mock_output:
mock_input.return_value = 'BAD JSON'
cmd = run_docker.Command()
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
('{"doc_builder": '
'[-1, "", "ValueError: No JSON object could be decoded"]}'
'\n')
)
|
|
03f0d55c7a6a893a2cff66d7c1a17ea13b16c4c2
|
pvpython_setup.py
|
pvpython_setup.py
|
#!/bin/bash
die () {
echo >&2 "$@"
exit 1
}
[ "$#" -eq 1 ] || die "Please provide path to pvpython"
echo "Installing requests ..."
REQUESTS_DIR=`mktemp -d`
wget --no-check-certificate https://github.com/kennethreitz/requests/tarball/v2.8.1 -O - | tar xz -C $REQUESTS_DIR
pushd .
cd $REQUESTS_DIR/*requests*
$1 setup.py install
popd
rm -rf $REQUESTS_DIR
echo "Installing requests-toolbelt ..."
# Install setuptools
wget https://bootstrap.pypa.io/ez_setup.py -O - | $1
REQUESTS_TOOLBELT_DIR=`mktemp -d`
wget --no-check-certificate https://github.com/sigmavirus24/requests-toolbelt/tarball/0.4.0 -O - | tar xz -C $REQUESTS_TOOLBELT_DIR
pushd .
cd $REQUESTS_TOOLBELT_DIR/*requests-toolbelt*
$1 setup.py install
popd
rm -rf $REQUESTS_TOOLBELT_DIR
|
Add bash script to install require packages in pvpython
|
Add bash script to install require packages in pvpython
|
Python
|
apache-2.0
|
Kitware/HPCCloud-deploy,Kitware/HPCCloud-deploy,Kitware/HPCCloud-deploy
|
Add bash script to install require packages in pvpython
|
#!/bin/bash
die () {
echo >&2 "$@"
exit 1
}
[ "$#" -eq 1 ] || die "Please provide path to pvpython"
echo "Installing requests ..."
REQUESTS_DIR=`mktemp -d`
wget --no-check-certificate https://github.com/kennethreitz/requests/tarball/v2.8.1 -O - | tar xz -C $REQUESTS_DIR
pushd .
cd $REQUESTS_DIR/*requests*
$1 setup.py install
popd
rm -rf $REQUESTS_DIR
echo "Installing requests-toolbelt ..."
# Install setuptools
wget https://bootstrap.pypa.io/ez_setup.py -O - | $1
REQUESTS_TOOLBELT_DIR=`mktemp -d`
wget --no-check-certificate https://github.com/sigmavirus24/requests-toolbelt/tarball/0.4.0 -O - | tar xz -C $REQUESTS_TOOLBELT_DIR
pushd .
cd $REQUESTS_TOOLBELT_DIR/*requests-toolbelt*
$1 setup.py install
popd
rm -rf $REQUESTS_TOOLBELT_DIR
|
<commit_before><commit_msg>Add bash script to install require packages in pvpython<commit_after>
|
#!/bin/bash
die () {
echo >&2 "$@"
exit 1
}
[ "$#" -eq 1 ] || die "Please provide path to pvpython"
echo "Installing requests ..."
REQUESTS_DIR=`mktemp -d`
wget --no-check-certificate https://github.com/kennethreitz/requests/tarball/v2.8.1 -O - | tar xz -C $REQUESTS_DIR
pushd .
cd $REQUESTS_DIR/*requests*
$1 setup.py install
popd
rm -rf $REQUESTS_DIR
echo "Installing requests-toolbelt ..."
# Install setuptools
wget https://bootstrap.pypa.io/ez_setup.py -O - | $1
REQUESTS_TOOLBELT_DIR=`mktemp -d`
wget --no-check-certificate https://github.com/sigmavirus24/requests-toolbelt/tarball/0.4.0 -O - | tar xz -C $REQUESTS_TOOLBELT_DIR
pushd .
cd $REQUESTS_TOOLBELT_DIR/*requests-toolbelt*
$1 setup.py install
popd
rm -rf $REQUESTS_TOOLBELT_DIR
|
Add bash script to install require packages in pvpython#!/bin/bash
die () {
echo >&2 "$@"
exit 1
}
[ "$#" -eq 1 ] || die "Please provide path to pvpython"
echo "Installing requests ..."
REQUESTS_DIR=`mktemp -d`
wget --no-check-certificate https://github.com/kennethreitz/requests/tarball/v2.8.1 -O - | tar xz -C $REQUESTS_DIR
pushd .
cd $REQUESTS_DIR/*requests*
$1 setup.py install
popd
rm -rf $REQUESTS_DIR
echo "Installing requests-toolbelt ..."
# Install setuptools
wget https://bootstrap.pypa.io/ez_setup.py -O - | $1
REQUESTS_TOOLBELT_DIR=`mktemp -d`
wget --no-check-certificate https://github.com/sigmavirus24/requests-toolbelt/tarball/0.4.0 -O - | tar xz -C $REQUESTS_TOOLBELT_DIR
pushd .
cd $REQUESTS_TOOLBELT_DIR/*requests-toolbelt*
$1 setup.py install
popd
rm -rf $REQUESTS_TOOLBELT_DIR
|
<commit_before><commit_msg>Add bash script to install require packages in pvpython<commit_after>#!/bin/bash
die () {
echo >&2 "$@"
exit 1
}
[ "$#" -eq 1 ] || die "Please provide path to pvpython"
echo "Installing requests ..."
REQUESTS_DIR=`mktemp -d`
wget --no-check-certificate https://github.com/kennethreitz/requests/tarball/v2.8.1 -O - | tar xz -C $REQUESTS_DIR
pushd .
cd $REQUESTS_DIR/*requests*
$1 setup.py install
popd
rm -rf $REQUESTS_DIR
echo "Installing requests-toolbelt ..."
# Install setuptools
wget https://bootstrap.pypa.io/ez_setup.py -O - | $1
REQUESTS_TOOLBELT_DIR=`mktemp -d`
wget --no-check-certificate https://github.com/sigmavirus24/requests-toolbelt/tarball/0.4.0 -O - | tar xz -C $REQUESTS_TOOLBELT_DIR
pushd .
cd $REQUESTS_TOOLBELT_DIR/*requests-toolbelt*
$1 setup.py install
popd
rm -rf $REQUESTS_TOOLBELT_DIR
|
|
4fe36181a3a378fbaa80a5ddb1cfcb71aa6686a5
|
indra/tests/test_databases.py
|
indra/tests/test_databases.py
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.databases import get_identifiers_url
def test_chembl():
cid = '1229517'
assert get_identifiers_url('CHEMBL', cid) == \
'http://identifiers.org/chembl.compound/CHEMBL%s' % cid
assert get_identifiers_url('CHEMBL', 'CHEMBL%s' % cid) == \
'http://identifiers.org/chembl.compound/CHEMBL%s' % cid
def test_signor():
sid = 'SIGNOR-PF15'
assert get_identifiers_url('SIGNOR', sid) == \
'https://signor.uniroma2.it/relation_result.php?id=%s' % sid
|
Add some tests for db URLs
|
Add some tests for db URLs
|
Python
|
bsd-2-clause
|
sorgerlab/indra,johnbachman/belpy,sorgerlab/belpy,sorgerlab/belpy,pvtodorov/indra,johnbachman/indra,sorgerlab/indra,pvtodorov/indra,bgyori/indra,johnbachman/belpy,bgyori/indra,pvtodorov/indra,johnbachman/belpy,sorgerlab/indra,johnbachman/indra,pvtodorov/indra,bgyori/indra,sorgerlab/belpy,johnbachman/indra
|
Add some tests for db URLs
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.databases import get_identifiers_url
def test_chembl():
cid = '1229517'
assert get_identifiers_url('CHEMBL', cid) == \
'http://identifiers.org/chembl.compound/CHEMBL%s' % cid
assert get_identifiers_url('CHEMBL', 'CHEMBL%s' % cid) == \
'http://identifiers.org/chembl.compound/CHEMBL%s' % cid
def test_signor():
sid = 'SIGNOR-PF15'
assert get_identifiers_url('SIGNOR', sid) == \
'https://signor.uniroma2.it/relation_result.php?id=%s' % sid
|
<commit_before><commit_msg>Add some tests for db URLs<commit_after>
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.databases import get_identifiers_url
def test_chembl():
cid = '1229517'
assert get_identifiers_url('CHEMBL', cid) == \
'http://identifiers.org/chembl.compound/CHEMBL%s' % cid
assert get_identifiers_url('CHEMBL', 'CHEMBL%s' % cid) == \
'http://identifiers.org/chembl.compound/CHEMBL%s' % cid
def test_signor():
sid = 'SIGNOR-PF15'
assert get_identifiers_url('SIGNOR', sid) == \
'https://signor.uniroma2.it/relation_result.php?id=%s' % sid
|
Add some tests for db URLsfrom __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.databases import get_identifiers_url
def test_chembl():
cid = '1229517'
assert get_identifiers_url('CHEMBL', cid) == \
'http://identifiers.org/chembl.compound/CHEMBL%s' % cid
assert get_identifiers_url('CHEMBL', 'CHEMBL%s' % cid) == \
'http://identifiers.org/chembl.compound/CHEMBL%s' % cid
def test_signor():
sid = 'SIGNOR-PF15'
assert get_identifiers_url('SIGNOR', sid) == \
'https://signor.uniroma2.it/relation_result.php?id=%s' % sid
|
<commit_before><commit_msg>Add some tests for db URLs<commit_after>from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.databases import get_identifiers_url
def test_chembl():
cid = '1229517'
assert get_identifiers_url('CHEMBL', cid) == \
'http://identifiers.org/chembl.compound/CHEMBL%s' % cid
assert get_identifiers_url('CHEMBL', 'CHEMBL%s' % cid) == \
'http://identifiers.org/chembl.compound/CHEMBL%s' % cid
def test_signor():
sid = 'SIGNOR-PF15'
assert get_identifiers_url('SIGNOR', sid) == \
'https://signor.uniroma2.it/relation_result.php?id=%s' % sid
|
|
2c229c36cc9f8b239bc6886e77aae55e08a38ee6
|
go_optouts/tests/utils.py
|
go_optouts/tests/utils.py
|
from zope.interface import implements
import treq
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from vumi.tests.helpers import IHelper
class SiteHelper(object):
""" Helper for testing HTTP Sites.
:type site:
twisted.web.server.Site
:param site:
Site to server.
:type treq_kw:
Function
:param treq_kw:
Callback function for generating treq request arguments. Any keyword
arguments passed to the request helper methods are passed to this
callback and the returned dictionary is passed to the underlying treq
request function. The default function simple returns the keyword
arguments as given.
"""
implements(IHelper)
def __init__(self, site, treq_kw=None):
self.site = site
self.server = None
self.url = None
self.treq_kw = treq_kw
if self.treq_kw is None:
self.treq_kw = lambda **kw: kw
@inlineCallbacks
def setup(self):
self.server = yield reactor.listenTCP(0, self.site)
addr = self.server.getHost()
self.url = "http://%s:%s" % (addr.host, addr.port)
@inlineCallbacks
def cleanup(self):
if self.server is not None:
yield self.server.loseConnection()
def _call(self, handler, path, **kw):
url = "%s%s" % (self.url, path)
kw = self.treq_kw(**kw)
return handler(url, persistent=False, **kw)
def get(self, path, **kw):
return self._call(treq.get, path, **kw)
def post(self, path, **kw):
return self._call(treq.post, path, **kw)
def put(self, path, **kw):
return self._call(treq.put, path, **kw)
def delete(self, path, **kw):
return self._call(treq.delete, path, **kw)
|
Add helper utility for testing HTTP sites.
|
Add helper utility for testing HTTP sites.
|
Python
|
bsd-3-clause
|
praekelt/go-optouts-api,praekelt/go-optouts-api
|
Add helper utility for testing HTTP sites.
|
from zope.interface import implements
import treq
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from vumi.tests.helpers import IHelper
class SiteHelper(object):
""" Helper for testing HTTP Sites.
:type site:
twisted.web.server.Site
:param site:
Site to server.
:type treq_kw:
Function
:param treq_kw:
Callback function for generating treq request arguments. Any keyword
arguments passed to the request helper methods are passed to this
callback and the returned dictionary is passed to the underlying treq
request function. The default function simple returns the keyword
arguments as given.
"""
implements(IHelper)
def __init__(self, site, treq_kw=None):
self.site = site
self.server = None
self.url = None
self.treq_kw = treq_kw
if self.treq_kw is None:
self.treq_kw = lambda **kw: kw
@inlineCallbacks
def setup(self):
self.server = yield reactor.listenTCP(0, self.site)
addr = self.server.getHost()
self.url = "http://%s:%s" % (addr.host, addr.port)
@inlineCallbacks
def cleanup(self):
if self.server is not None:
yield self.server.loseConnection()
def _call(self, handler, path, **kw):
url = "%s%s" % (self.url, path)
kw = self.treq_kw(**kw)
return handler(url, persistent=False, **kw)
def get(self, path, **kw):
return self._call(treq.get, path, **kw)
def post(self, path, **kw):
return self._call(treq.post, path, **kw)
def put(self, path, **kw):
return self._call(treq.put, path, **kw)
def delete(self, path, **kw):
return self._call(treq.delete, path, **kw)
|
<commit_before><commit_msg>Add helper utility for testing HTTP sites.<commit_after>
|
from zope.interface import implements
import treq
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from vumi.tests.helpers import IHelper
class SiteHelper(object):
""" Helper for testing HTTP Sites.
:type site:
twisted.web.server.Site
:param site:
Site to server.
:type treq_kw:
Function
:param treq_kw:
Callback function for generating treq request arguments. Any keyword
arguments passed to the request helper methods are passed to this
callback and the returned dictionary is passed to the underlying treq
request function. The default function simple returns the keyword
arguments as given.
"""
implements(IHelper)
def __init__(self, site, treq_kw=None):
self.site = site
self.server = None
self.url = None
self.treq_kw = treq_kw
if self.treq_kw is None:
self.treq_kw = lambda **kw: kw
@inlineCallbacks
def setup(self):
self.server = yield reactor.listenTCP(0, self.site)
addr = self.server.getHost()
self.url = "http://%s:%s" % (addr.host, addr.port)
@inlineCallbacks
def cleanup(self):
if self.server is not None:
yield self.server.loseConnection()
def _call(self, handler, path, **kw):
url = "%s%s" % (self.url, path)
kw = self.treq_kw(**kw)
return handler(url, persistent=False, **kw)
def get(self, path, **kw):
return self._call(treq.get, path, **kw)
def post(self, path, **kw):
return self._call(treq.post, path, **kw)
def put(self, path, **kw):
return self._call(treq.put, path, **kw)
def delete(self, path, **kw):
return self._call(treq.delete, path, **kw)
|
Add helper utility for testing HTTP sites.from zope.interface import implements
import treq
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from vumi.tests.helpers import IHelper
class SiteHelper(object):
""" Helper for testing HTTP Sites.
:type site:
twisted.web.server.Site
:param site:
Site to server.
:type treq_kw:
Function
:param treq_kw:
Callback function for generating treq request arguments. Any keyword
arguments passed to the request helper methods are passed to this
callback and the returned dictionary is passed to the underlying treq
request function. The default function simple returns the keyword
arguments as given.
"""
implements(IHelper)
def __init__(self, site, treq_kw=None):
self.site = site
self.server = None
self.url = None
self.treq_kw = treq_kw
if self.treq_kw is None:
self.treq_kw = lambda **kw: kw
@inlineCallbacks
def setup(self):
self.server = yield reactor.listenTCP(0, self.site)
addr = self.server.getHost()
self.url = "http://%s:%s" % (addr.host, addr.port)
@inlineCallbacks
def cleanup(self):
if self.server is not None:
yield self.server.loseConnection()
def _call(self, handler, path, **kw):
url = "%s%s" % (self.url, path)
kw = self.treq_kw(**kw)
return handler(url, persistent=False, **kw)
def get(self, path, **kw):
return self._call(treq.get, path, **kw)
def post(self, path, **kw):
return self._call(treq.post, path, **kw)
def put(self, path, **kw):
return self._call(treq.put, path, **kw)
def delete(self, path, **kw):
return self._call(treq.delete, path, **kw)
|
<commit_before><commit_msg>Add helper utility for testing HTTP sites.<commit_after>from zope.interface import implements
import treq
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from vumi.tests.helpers import IHelper
class SiteHelper(object):
""" Helper for testing HTTP Sites.
:type site:
twisted.web.server.Site
:param site:
Site to server.
:type treq_kw:
Function
:param treq_kw:
Callback function for generating treq request arguments. Any keyword
arguments passed to the request helper methods are passed to this
callback and the returned dictionary is passed to the underlying treq
request function. The default function simple returns the keyword
arguments as given.
"""
implements(IHelper)
def __init__(self, site, treq_kw=None):
self.site = site
self.server = None
self.url = None
self.treq_kw = treq_kw
if self.treq_kw is None:
self.treq_kw = lambda **kw: kw
@inlineCallbacks
def setup(self):
self.server = yield reactor.listenTCP(0, self.site)
addr = self.server.getHost()
self.url = "http://%s:%s" % (addr.host, addr.port)
@inlineCallbacks
def cleanup(self):
if self.server is not None:
yield self.server.loseConnection()
def _call(self, handler, path, **kw):
url = "%s%s" % (self.url, path)
kw = self.treq_kw(**kw)
return handler(url, persistent=False, **kw)
def get(self, path, **kw):
return self._call(treq.get, path, **kw)
def post(self, path, **kw):
return self._call(treq.post, path, **kw)
def put(self, path, **kw):
return self._call(treq.put, path, **kw)
def delete(self, path, **kw):
return self._call(treq.delete, path, **kw)
|
|
8216ba599b6c33207f413381b755d8db25c01440
|
spacy/tests/en/tokenizer/test_text.py
|
spacy/tests/en/tokenizer/test_text.py
|
# coding: utf-8
"""Test that longer and mixed texts are tokenized correctly."""
from __future__ import unicode_literals
import pytest
def test_tokenizer_handles_long_text(en_tokenizer):
text = """Tributes pour in for late British Labour Party leader
Tributes poured in from around the world Thursday
to the late Labour Party leader John Smith, who died earlier from a massive
heart attack aged 55.
In Washington, the US State Department issued a statement regretting "the
untimely death" of the rapier-tongued Scottish barrister and parliamentarian.
"Mr. Smith, throughout his distinguished"""
tokens = en_tokenizer(text)
assert len(tokens) == 76
@pytest.mark.parametrize('text,length', [
("The U.S. Army likes Shock and Awe.", 8),
("U.N. regulations are not a part of their concern.", 10),
("“Isn't it?”", 6),
("""Yes! "I'd rather have a walk", Ms. Comble sighed. """, 15),
("""'Me too!', Mr. P. Delaware cried. """, 11),
("They ran about 10km.", 6),
# ("But then the 6,000-year ice age came...", 10)
])
def test_tokenizer_handles_cnts(en_tokenizer, text, length):
tokens = en_tokenizer(text)
assert len(tokens) == length
|
Add tests for longer and mixed English texts
|
Add tests for longer and mixed English texts
|
Python
|
mit
|
raphael0202/spaCy,banglakit/spaCy,honnibal/spaCy,oroszgy/spaCy.hu,aikramer2/spaCy,aikramer2/spaCy,banglakit/spaCy,raphael0202/spaCy,aikramer2/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,spacy-io/spaCy,spacy-io/spaCy,oroszgy/spaCy.hu,Gregory-Howard/spaCy,recognai/spaCy,banglakit/spaCy,banglakit/spaCy,honnibal/spaCy,banglakit/spaCy,recognai/spaCy,oroszgy/spaCy.hu,explosion/spaCy,recognai/spaCy,oroszgy/spaCy.hu,explosion/spaCy,explosion/spaCy,honnibal/spaCy,explosion/spaCy,banglakit/spaCy,spacy-io/spaCy,spacy-io/spaCy,raphael0202/spaCy,recognai/spaCy,recognai/spaCy,explosion/spaCy,aikramer2/spaCy,raphael0202/spaCy,raphael0202/spaCy,raphael0202/spaCy,Gregory-Howard/spaCy,explosion/spaCy,oroszgy/spaCy.hu,spacy-io/spaCy,honnibal/spaCy,aikramer2/spaCy,Gregory-Howard/spaCy,recognai/spaCy,oroszgy/spaCy.hu,spacy-io/spaCy,Gregory-Howard/spaCy
|
Add tests for longer and mixed English texts
|
# coding: utf-8
"""Test that longer and mixed texts are tokenized correctly."""
from __future__ import unicode_literals
import pytest
def test_tokenizer_handles_long_text(en_tokenizer):
text = """Tributes pour in for late British Labour Party leader
Tributes poured in from around the world Thursday
to the late Labour Party leader John Smith, who died earlier from a massive
heart attack aged 55.
In Washington, the US State Department issued a statement regretting "the
untimely death" of the rapier-tongued Scottish barrister and parliamentarian.
"Mr. Smith, throughout his distinguished"""
tokens = en_tokenizer(text)
assert len(tokens) == 76
@pytest.mark.parametrize('text,length', [
("The U.S. Army likes Shock and Awe.", 8),
("U.N. regulations are not a part of their concern.", 10),
("“Isn't it?”", 6),
("""Yes! "I'd rather have a walk", Ms. Comble sighed. """, 15),
("""'Me too!', Mr. P. Delaware cried. """, 11),
("They ran about 10km.", 6),
# ("But then the 6,000-year ice age came...", 10)
])
def test_tokenizer_handles_cnts(en_tokenizer, text, length):
tokens = en_tokenizer(text)
assert len(tokens) == length
|
<commit_before><commit_msg>Add tests for longer and mixed English texts<commit_after>
|
# coding: utf-8
"""Test that longer and mixed texts are tokenized correctly."""
from __future__ import unicode_literals
import pytest
def test_tokenizer_handles_long_text(en_tokenizer):
text = """Tributes pour in for late British Labour Party leader
Tributes poured in from around the world Thursday
to the late Labour Party leader John Smith, who died earlier from a massive
heart attack aged 55.
In Washington, the US State Department issued a statement regretting "the
untimely death" of the rapier-tongued Scottish barrister and parliamentarian.
"Mr. Smith, throughout his distinguished"""
tokens = en_tokenizer(text)
assert len(tokens) == 76
@pytest.mark.parametrize('text,length', [
("The U.S. Army likes Shock and Awe.", 8),
("U.N. regulations are not a part of their concern.", 10),
("“Isn't it?”", 6),
("""Yes! "I'd rather have a walk", Ms. Comble sighed. """, 15),
("""'Me too!', Mr. P. Delaware cried. """, 11),
("They ran about 10km.", 6),
# ("But then the 6,000-year ice age came...", 10)
])
def test_tokenizer_handles_cnts(en_tokenizer, text, length):
tokens = en_tokenizer(text)
assert len(tokens) == length
|
Add tests for longer and mixed English texts# coding: utf-8
"""Test that longer and mixed texts are tokenized correctly."""
from __future__ import unicode_literals
import pytest
def test_tokenizer_handles_long_text(en_tokenizer):
text = """Tributes pour in for late British Labour Party leader
Tributes poured in from around the world Thursday
to the late Labour Party leader John Smith, who died earlier from a massive
heart attack aged 55.
In Washington, the US State Department issued a statement regretting "the
untimely death" of the rapier-tongued Scottish barrister and parliamentarian.
"Mr. Smith, throughout his distinguished"""
tokens = en_tokenizer(text)
assert len(tokens) == 76
@pytest.mark.parametrize('text,length', [
("The U.S. Army likes Shock and Awe.", 8),
("U.N. regulations are not a part of their concern.", 10),
("“Isn't it?”", 6),
("""Yes! "I'd rather have a walk", Ms. Comble sighed. """, 15),
("""'Me too!', Mr. P. Delaware cried. """, 11),
("They ran about 10km.", 6),
# ("But then the 6,000-year ice age came...", 10)
])
def test_tokenizer_handles_cnts(en_tokenizer, text, length):
tokens = en_tokenizer(text)
assert len(tokens) == length
|
<commit_before><commit_msg>Add tests for longer and mixed English texts<commit_after># coding: utf-8
"""Test that longer and mixed texts are tokenized correctly."""
from __future__ import unicode_literals
import pytest
def test_tokenizer_handles_long_text(en_tokenizer):
text = """Tributes pour in for late British Labour Party leader
Tributes poured in from around the world Thursday
to the late Labour Party leader John Smith, who died earlier from a massive
heart attack aged 55.
In Washington, the US State Department issued a statement regretting "the
untimely death" of the rapier-tongued Scottish barrister and parliamentarian.
"Mr. Smith, throughout his distinguished"""
tokens = en_tokenizer(text)
assert len(tokens) == 76
@pytest.mark.parametrize('text,length', [
("The U.S. Army likes Shock and Awe.", 8),
("U.N. regulations are not a part of their concern.", 10),
("“Isn't it?”", 6),
("""Yes! "I'd rather have a walk", Ms. Comble sighed. """, 15),
("""'Me too!', Mr. P. Delaware cried. """, 11),
("They ran about 10km.", 6),
# ("But then the 6,000-year ice age came...", 10)
])
def test_tokenizer_handles_cnts(en_tokenizer, text, length):
tokens = en_tokenizer(text)
assert len(tokens) == length
|
|
f9354e295ff88c8084021bc7474a18a2a78bd5f4
|
apps/schools/migrations/0002_auto_20150519_2205.py
|
apps/schools/migrations/0002_auto_20150519_2205.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schools', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='academicyear',
name='end_year',
field=models.IntegerField(default=0, blank=True),
),
migrations.AddField(
model_name='academicyear',
name='start_year',
field=models.IntegerField(default=0, blank=True),
),
]
|
Add start and end year in the academic year table and model - migrations file
|
Add start and end year in the academic year table and model - migrations file
|
Python
|
mit
|
klpdotorg/tada,klpdotorg/tada
|
Add start and end year in the academic year table and model - migrations file
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schools', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='academicyear',
name='end_year',
field=models.IntegerField(default=0, blank=True),
),
migrations.AddField(
model_name='academicyear',
name='start_year',
field=models.IntegerField(default=0, blank=True),
),
]
|
<commit_before><commit_msg>Add start and end year in the academic year table and model - migrations file<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schools', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='academicyear',
name='end_year',
field=models.IntegerField(default=0, blank=True),
),
migrations.AddField(
model_name='academicyear',
name='start_year',
field=models.IntegerField(default=0, blank=True),
),
]
|
Add start and end year in the academic year table and model - migrations file# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schools', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='academicyear',
name='end_year',
field=models.IntegerField(default=0, blank=True),
),
migrations.AddField(
model_name='academicyear',
name='start_year',
field=models.IntegerField(default=0, blank=True),
),
]
|
<commit_before><commit_msg>Add start and end year in the academic year table and model - migrations file<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schools', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='academicyear',
name='end_year',
field=models.IntegerField(default=0, blank=True),
),
migrations.AddField(
model_name='academicyear',
name='start_year',
field=models.IntegerField(default=0, blank=True),
),
]
|
|
6eeeb98e4fab1edac2bf85b7a89cc5cdedead808
|
cibopath/github_api.py
|
cibopath/github_api.py
|
# -*- coding: utf-8 -*-
import base64
import logging
import json
import aiohttp
logger = logging.getLogger('cibopath')
API_ROOT = 'https://api.github.com/'
API_README = API_ROOT + 'repos/{user}/{repo}/readme'
API_COOKIECUTTER_JSON = (
API_ROOT + 'repos/{user}/{repo}/contents/cookiecutter.json'
)
async def get(client, url, *, headers=None):
if headers is None:
headers = {}
async with client.get(url, headers=headers) as response:
assert response.status == 200, response.reason + url
content = await response.read()
return content.decode()
async def get_cookiecutter_json(semaphore, client, user, repo):
headers = {'Accept': 'application/json'}
url = API_COOKIECUTTER_JSON.format(user=user, repo=repo)
async with semaphore:
with aiohttp.Timeout(10):
content = await get(client, url, headers=headers)
json_content = json.loads(content)
decoded = base64.b64decode(json_content['content']).decode()
return json.loads(decoded)
async def get_readme(semaphore, client, user, repo):
headers = {'Accept': 'application/vnd.github.V3.html+json'}
url = API_README.format(user=user, repo=repo)
async with semaphore:
with aiohttp.Timeout(10):
html_content = await get(client, url, headers=headers)
return html_content
async def get_template(semaphore, client, link):
user = link['user']
name = link['name']
repo = link['repo']
logger.debug('{}/{} GET'.format(user, repo))
try:
logger.debug('{}/{} JSON'.format(user, repo))
cookiecutter_json = await get_cookiecutter_json(
semaphore, client, user, repo
)
logger.debug('{}/{} README'.format(user, repo))
readme = await get_readme(semaphore, client, user, repo)
except AssertionError:
logger.debug('{}/{} FAIL'.format(user, repo))
return False
else:
logger.debug('{}/{} SUCCESS'.format(user, repo))
return name, user, repo, cookiecutter_json, readme
|
Implement async github api client
|
Implement async github api client
|
Python
|
bsd-3-clause
|
hackebrot/cibopath
|
Implement async github api client
|
# -*- coding: utf-8 -*-
import base64
import logging
import json
import aiohttp
logger = logging.getLogger('cibopath')
API_ROOT = 'https://api.github.com/'
API_README = API_ROOT + 'repos/{user}/{repo}/readme'
API_COOKIECUTTER_JSON = (
API_ROOT + 'repos/{user}/{repo}/contents/cookiecutter.json'
)
async def get(client, url, *, headers=None):
if headers is None:
headers = {}
async with client.get(url, headers=headers) as response:
assert response.status == 200, response.reason + url
content = await response.read()
return content.decode()
async def get_cookiecutter_json(semaphore, client, user, repo):
headers = {'Accept': 'application/json'}
url = API_COOKIECUTTER_JSON.format(user=user, repo=repo)
async with semaphore:
with aiohttp.Timeout(10):
content = await get(client, url, headers=headers)
json_content = json.loads(content)
decoded = base64.b64decode(json_content['content']).decode()
return json.loads(decoded)
async def get_readme(semaphore, client, user, repo):
headers = {'Accept': 'application/vnd.github.V3.html+json'}
url = API_README.format(user=user, repo=repo)
async with semaphore:
with aiohttp.Timeout(10):
html_content = await get(client, url, headers=headers)
return html_content
async def get_template(semaphore, client, link):
user = link['user']
name = link['name']
repo = link['repo']
logger.debug('{}/{} GET'.format(user, repo))
try:
logger.debug('{}/{} JSON'.format(user, repo))
cookiecutter_json = await get_cookiecutter_json(
semaphore, client, user, repo
)
logger.debug('{}/{} README'.format(user, repo))
readme = await get_readme(semaphore, client, user, repo)
except AssertionError:
logger.debug('{}/{} FAIL'.format(user, repo))
return False
else:
logger.debug('{}/{} SUCCESS'.format(user, repo))
return name, user, repo, cookiecutter_json, readme
|
<commit_before><commit_msg>Implement async github api client<commit_after>
|
# -*- coding: utf-8 -*-
import base64
import logging
import json
import aiohttp
logger = logging.getLogger('cibopath')
API_ROOT = 'https://api.github.com/'
API_README = API_ROOT + 'repos/{user}/{repo}/readme'
API_COOKIECUTTER_JSON = (
API_ROOT + 'repos/{user}/{repo}/contents/cookiecutter.json'
)
async def get(client, url, *, headers=None):
if headers is None:
headers = {}
async with client.get(url, headers=headers) as response:
assert response.status == 200, response.reason + url
content = await response.read()
return content.decode()
async def get_cookiecutter_json(semaphore, client, user, repo):
headers = {'Accept': 'application/json'}
url = API_COOKIECUTTER_JSON.format(user=user, repo=repo)
async with semaphore:
with aiohttp.Timeout(10):
content = await get(client, url, headers=headers)
json_content = json.loads(content)
decoded = base64.b64decode(json_content['content']).decode()
return json.loads(decoded)
async def get_readme(semaphore, client, user, repo):
headers = {'Accept': 'application/vnd.github.V3.html+json'}
url = API_README.format(user=user, repo=repo)
async with semaphore:
with aiohttp.Timeout(10):
html_content = await get(client, url, headers=headers)
return html_content
async def get_template(semaphore, client, link):
user = link['user']
name = link['name']
repo = link['repo']
logger.debug('{}/{} GET'.format(user, repo))
try:
logger.debug('{}/{} JSON'.format(user, repo))
cookiecutter_json = await get_cookiecutter_json(
semaphore, client, user, repo
)
logger.debug('{}/{} README'.format(user, repo))
readme = await get_readme(semaphore, client, user, repo)
except AssertionError:
logger.debug('{}/{} FAIL'.format(user, repo))
return False
else:
logger.debug('{}/{} SUCCESS'.format(user, repo))
return name, user, repo, cookiecutter_json, readme
|
Implement async github api client# -*- coding: utf-8 -*-
import base64
import logging
import json
import aiohttp
logger = logging.getLogger('cibopath')
API_ROOT = 'https://api.github.com/'
API_README = API_ROOT + 'repos/{user}/{repo}/readme'
API_COOKIECUTTER_JSON = (
API_ROOT + 'repos/{user}/{repo}/contents/cookiecutter.json'
)
async def get(client, url, *, headers=None):
if headers is None:
headers = {}
async with client.get(url, headers=headers) as response:
assert response.status == 200, response.reason + url
content = await response.read()
return content.decode()
async def get_cookiecutter_json(semaphore, client, user, repo):
headers = {'Accept': 'application/json'}
url = API_COOKIECUTTER_JSON.format(user=user, repo=repo)
async with semaphore:
with aiohttp.Timeout(10):
content = await get(client, url, headers=headers)
json_content = json.loads(content)
decoded = base64.b64decode(json_content['content']).decode()
return json.loads(decoded)
async def get_readme(semaphore, client, user, repo):
headers = {'Accept': 'application/vnd.github.V3.html+json'}
url = API_README.format(user=user, repo=repo)
async with semaphore:
with aiohttp.Timeout(10):
html_content = await get(client, url, headers=headers)
return html_content
async def get_template(semaphore, client, link):
user = link['user']
name = link['name']
repo = link['repo']
logger.debug('{}/{} GET'.format(user, repo))
try:
logger.debug('{}/{} JSON'.format(user, repo))
cookiecutter_json = await get_cookiecutter_json(
semaphore, client, user, repo
)
logger.debug('{}/{} README'.format(user, repo))
readme = await get_readme(semaphore, client, user, repo)
except AssertionError:
logger.debug('{}/{} FAIL'.format(user, repo))
return False
else:
logger.debug('{}/{} SUCCESS'.format(user, repo))
return name, user, repo, cookiecutter_json, readme
|
<commit_before><commit_msg>Implement async github api client<commit_after># -*- coding: utf-8 -*-
import base64
import logging
import json
import aiohttp
logger = logging.getLogger('cibopath')
API_ROOT = 'https://api.github.com/'
API_README = API_ROOT + 'repos/{user}/{repo}/readme'
API_COOKIECUTTER_JSON = (
API_ROOT + 'repos/{user}/{repo}/contents/cookiecutter.json'
)
async def get(client, url, *, headers=None):
if headers is None:
headers = {}
async with client.get(url, headers=headers) as response:
assert response.status == 200, response.reason + url
content = await response.read()
return content.decode()
async def get_cookiecutter_json(semaphore, client, user, repo):
headers = {'Accept': 'application/json'}
url = API_COOKIECUTTER_JSON.format(user=user, repo=repo)
async with semaphore:
with aiohttp.Timeout(10):
content = await get(client, url, headers=headers)
json_content = json.loads(content)
decoded = base64.b64decode(json_content['content']).decode()
return json.loads(decoded)
async def get_readme(semaphore, client, user, repo):
headers = {'Accept': 'application/vnd.github.V3.html+json'}
url = API_README.format(user=user, repo=repo)
async with semaphore:
with aiohttp.Timeout(10):
html_content = await get(client, url, headers=headers)
return html_content
async def get_template(semaphore, client, link):
user = link['user']
name = link['name']
repo = link['repo']
logger.debug('{}/{} GET'.format(user, repo))
try:
logger.debug('{}/{} JSON'.format(user, repo))
cookiecutter_json = await get_cookiecutter_json(
semaphore, client, user, repo
)
logger.debug('{}/{} README'.format(user, repo))
readme = await get_readme(semaphore, client, user, repo)
except AssertionError:
logger.debug('{}/{} FAIL'.format(user, repo))
return False
else:
logger.debug('{}/{} SUCCESS'.format(user, repo))
return name, user, repo, cookiecutter_json, readme
|
|
bb4e705d646c2c6d45b3fb146c1c318d1fd917ff
|
test/unittests/util/test_plugins.py
|
test/unittests/util/test_plugins.py
|
# Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase, mock
import mycroft.util.plugins as mycroft_plugins
def get_plug_mock(name):
load_mock = mock.Mock(name=name)
load_mock.name = name
plug_mock = mock.Mock(name=name)
plug_mock.name = name
plug_mock.load.return_value = load_mock
return plug_mock
def mock_iter_entry_points(plug_type):
"""Function to return mocked plugins."""
plugs = {
'mycroft.plugins.tts': [get_plug_mock('dummy'),
get_plug_mock('remote')],
'mycroft.plugins.stt': [get_plug_mock('dummy'),
get_plug_mock('deepspeech')]
}
return plugs.get(plug_type, [])
@mock.patch('mycroft.util.plugins.pkg_resources')
class TestPlugins(TestCase):
def test_load_existing(self, mock_pkg_res):
"""Ensure that plugin objects are returned if found."""
mock_pkg_res.iter_entry_points.side_effect = mock_iter_entry_points
# Load a couple of existing modules and verify that they're Ok
plug = mycroft_plugins.load_plugin('mycroft.plugins.tts', 'dummy')
self.assertEqual(plug.name, 'dummy')
plug = mycroft_plugins.load_plugin('mycroft.plugins.stt', 'deepspeech')
self.assertEqual(plug.name, 'deepspeech')
def test_load_nonexisting(self, mock_pkg_res):
"""Ensure that the return value is None when no plugin is found."""
mock_pkg_res.iter_entry_points.side_effect = mock_iter_entry_points
plug = mycroft_plugins.load_plugin('mycroft.plugins.tts', 'blah')
self.assertEqual(plug, None)
|
Add unittests for the plugin util
|
Add unittests for the plugin util
|
Python
|
apache-2.0
|
MycroftAI/mycroft-core,forslund/mycroft-core,MycroftAI/mycroft-core,forslund/mycroft-core
|
Add unittests for the plugin util
|
# Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase, mock
import mycroft.util.plugins as mycroft_plugins
def get_plug_mock(name):
load_mock = mock.Mock(name=name)
load_mock.name = name
plug_mock = mock.Mock(name=name)
plug_mock.name = name
plug_mock.load.return_value = load_mock
return plug_mock
def mock_iter_entry_points(plug_type):
"""Function to return mocked plugins."""
plugs = {
'mycroft.plugins.tts': [get_plug_mock('dummy'),
get_plug_mock('remote')],
'mycroft.plugins.stt': [get_plug_mock('dummy'),
get_plug_mock('deepspeech')]
}
return plugs.get(plug_type, [])
@mock.patch('mycroft.util.plugins.pkg_resources')
class TestPlugins(TestCase):
def test_load_existing(self, mock_pkg_res):
"""Ensure that plugin objects are returned if found."""
mock_pkg_res.iter_entry_points.side_effect = mock_iter_entry_points
# Load a couple of existing modules and verify that they're Ok
plug = mycroft_plugins.load_plugin('mycroft.plugins.tts', 'dummy')
self.assertEqual(plug.name, 'dummy')
plug = mycroft_plugins.load_plugin('mycroft.plugins.stt', 'deepspeech')
self.assertEqual(plug.name, 'deepspeech')
def test_load_nonexisting(self, mock_pkg_res):
"""Ensure that the return value is None when no plugin is found."""
mock_pkg_res.iter_entry_points.side_effect = mock_iter_entry_points
plug = mycroft_plugins.load_plugin('mycroft.plugins.tts', 'blah')
self.assertEqual(plug, None)
|
<commit_before><commit_msg>Add unittests for the plugin util<commit_after>
|
# Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase, mock
import mycroft.util.plugins as mycroft_plugins
def get_plug_mock(name):
load_mock = mock.Mock(name=name)
load_mock.name = name
plug_mock = mock.Mock(name=name)
plug_mock.name = name
plug_mock.load.return_value = load_mock
return plug_mock
def mock_iter_entry_points(plug_type):
"""Function to return mocked plugins."""
plugs = {
'mycroft.plugins.tts': [get_plug_mock('dummy'),
get_plug_mock('remote')],
'mycroft.plugins.stt': [get_plug_mock('dummy'),
get_plug_mock('deepspeech')]
}
return plugs.get(plug_type, [])
@mock.patch('mycroft.util.plugins.pkg_resources')
class TestPlugins(TestCase):
def test_load_existing(self, mock_pkg_res):
"""Ensure that plugin objects are returned if found."""
mock_pkg_res.iter_entry_points.side_effect = mock_iter_entry_points
# Load a couple of existing modules and verify that they're Ok
plug = mycroft_plugins.load_plugin('mycroft.plugins.tts', 'dummy')
self.assertEqual(plug.name, 'dummy')
plug = mycroft_plugins.load_plugin('mycroft.plugins.stt', 'deepspeech')
self.assertEqual(plug.name, 'deepspeech')
def test_load_nonexisting(self, mock_pkg_res):
"""Ensure that the return value is None when no plugin is found."""
mock_pkg_res.iter_entry_points.side_effect = mock_iter_entry_points
plug = mycroft_plugins.load_plugin('mycroft.plugins.tts', 'blah')
self.assertEqual(plug, None)
|
Add unittests for the plugin util# Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase, mock
import mycroft.util.plugins as mycroft_plugins
def get_plug_mock(name):
load_mock = mock.Mock(name=name)
load_mock.name = name
plug_mock = mock.Mock(name=name)
plug_mock.name = name
plug_mock.load.return_value = load_mock
return plug_mock
def mock_iter_entry_points(plug_type):
"""Function to return mocked plugins."""
plugs = {
'mycroft.plugins.tts': [get_plug_mock('dummy'),
get_plug_mock('remote')],
'mycroft.plugins.stt': [get_plug_mock('dummy'),
get_plug_mock('deepspeech')]
}
return plugs.get(plug_type, [])
@mock.patch('mycroft.util.plugins.pkg_resources')
class TestPlugins(TestCase):
def test_load_existing(self, mock_pkg_res):
"""Ensure that plugin objects are returned if found."""
mock_pkg_res.iter_entry_points.side_effect = mock_iter_entry_points
# Load a couple of existing modules and verify that they're Ok
plug = mycroft_plugins.load_plugin('mycroft.plugins.tts', 'dummy')
self.assertEqual(plug.name, 'dummy')
plug = mycroft_plugins.load_plugin('mycroft.plugins.stt', 'deepspeech')
self.assertEqual(plug.name, 'deepspeech')
def test_load_nonexisting(self, mock_pkg_res):
"""Ensure that the return value is None when no plugin is found."""
mock_pkg_res.iter_entry_points.side_effect = mock_iter_entry_points
plug = mycroft_plugins.load_plugin('mycroft.plugins.tts', 'blah')
self.assertEqual(plug, None)
|
<commit_before><commit_msg>Add unittests for the plugin util<commit_after># Copyright 2020 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase, mock
import mycroft.util.plugins as mycroft_plugins
def get_plug_mock(name):
load_mock = mock.Mock(name=name)
load_mock.name = name
plug_mock = mock.Mock(name=name)
plug_mock.name = name
plug_mock.load.return_value = load_mock
return plug_mock
def mock_iter_entry_points(plug_type):
"""Function to return mocked plugins."""
plugs = {
'mycroft.plugins.tts': [get_plug_mock('dummy'),
get_plug_mock('remote')],
'mycroft.plugins.stt': [get_plug_mock('dummy'),
get_plug_mock('deepspeech')]
}
return plugs.get(plug_type, [])
@mock.patch('mycroft.util.plugins.pkg_resources')
class TestPlugins(TestCase):
def test_load_existing(self, mock_pkg_res):
"""Ensure that plugin objects are returned if found."""
mock_pkg_res.iter_entry_points.side_effect = mock_iter_entry_points
# Load a couple of existing modules and verify that they're Ok
plug = mycroft_plugins.load_plugin('mycroft.plugins.tts', 'dummy')
self.assertEqual(plug.name, 'dummy')
plug = mycroft_plugins.load_plugin('mycroft.plugins.stt', 'deepspeech')
self.assertEqual(plug.name, 'deepspeech')
def test_load_nonexisting(self, mock_pkg_res):
"""Ensure that the return value is None when no plugin is found."""
mock_pkg_res.iter_entry_points.side_effect = mock_iter_entry_points
plug = mycroft_plugins.load_plugin('mycroft.plugins.tts', 'blah')
self.assertEqual(plug, None)
|
|
ac1c8d10ac6106decf59bb20622f42aef670c8a4
|
wafer/management/commands/wafer_add_default_groups.py
|
wafer/management/commands/wafer_add_default_groups.py
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group, Permission
class Command(BaseCommand):
help = "Add some useful default groups"
option_list = BaseCommand.option_list + tuple([
])
GROUPS = {
# Permissions are specified as (app, code_name) pairs
'Page Editors': (('pages', 'add_page'), ('pages', 'delete_page'),
('pages', 'change_page'), ('pages', 'add_file'),
('pages', 'delete_file'), ('pages', 'change_file')),
'Talk Mentors': (('talks', 'change_talk'),),
}
def add_wafer_groups(self):
# This creates the groups we need for page editor and talk mentor
# roles.
for wafer_group, permission_list in self.GROUPS.items():
group, created = Group.objects.all().get_or_create(name=wafer_group)
if not created:
print 'Using existing %s group' % wafer_group
for app, perm_code in permission_list:
try:
perm = Permission.objects.filter(
codename=perm_code, content_type__app_label=app).get()
except Permission.DoesNotExist:
print 'Unable to find permission %s' % perm_code
continue
except Permission.MultipleObjectsReturned:
print 'Non-unique permission %s' % perm_code
if perm not in group.permissions.all():
print 'Adding %s to %s' % (perm_code, wafer_group)
group.permissions.add(perm)
group.save()
def handle(self, *args, **options):
self.add_wafer_groups()
|
Add a management command to add the default groups we want
|
Add a management command to add the default groups we want
|
Python
|
isc
|
CTPUG/wafer,CarlFK/wafer,CarlFK/wafer,CTPUG/wafer,CTPUG/wafer,CarlFK/wafer,CTPUG/wafer,CarlFK/wafer
|
Add a management command to add the default groups we want
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group, Permission
class Command(BaseCommand):
help = "Add some useful default groups"
option_list = BaseCommand.option_list + tuple([
])
GROUPS = {
# Permissions are specified as (app, code_name) pairs
'Page Editors': (('pages', 'add_page'), ('pages', 'delete_page'),
('pages', 'change_page'), ('pages', 'add_file'),
('pages', 'delete_file'), ('pages', 'change_file')),
'Talk Mentors': (('talks', 'change_talk'),),
}
def add_wafer_groups(self):
# This creates the groups we need for page editor and talk mentor
# roles.
for wafer_group, permission_list in self.GROUPS.items():
group, created = Group.objects.all().get_or_create(name=wafer_group)
if not created:
print 'Using existing %s group' % wafer_group
for app, perm_code in permission_list:
try:
perm = Permission.objects.filter(
codename=perm_code, content_type__app_label=app).get()
except Permission.DoesNotExist:
print 'Unable to find permission %s' % perm_code
continue
except Permission.MultipleObjectsReturned:
print 'Non-unique permission %s' % perm_code
if perm not in group.permissions.all():
print 'Adding %s to %s' % (perm_code, wafer_group)
group.permissions.add(perm)
group.save()
def handle(self, *args, **options):
self.add_wafer_groups()
|
<commit_before><commit_msg>Add a management command to add the default groups we want<commit_after>
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group, Permission
class Command(BaseCommand):
help = "Add some useful default groups"
option_list = BaseCommand.option_list + tuple([
])
GROUPS = {
# Permissions are specified as (app, code_name) pairs
'Page Editors': (('pages', 'add_page'), ('pages', 'delete_page'),
('pages', 'change_page'), ('pages', 'add_file'),
('pages', 'delete_file'), ('pages', 'change_file')),
'Talk Mentors': (('talks', 'change_talk'),),
}
def add_wafer_groups(self):
# This creates the groups we need for page editor and talk mentor
# roles.
for wafer_group, permission_list in self.GROUPS.items():
group, created = Group.objects.all().get_or_create(name=wafer_group)
if not created:
print 'Using existing %s group' % wafer_group
for app, perm_code in permission_list:
try:
perm = Permission.objects.filter(
codename=perm_code, content_type__app_label=app).get()
except Permission.DoesNotExist:
print 'Unable to find permission %s' % perm_code
continue
except Permission.MultipleObjectsReturned:
print 'Non-unique permission %s' % perm_code
if perm not in group.permissions.all():
print 'Adding %s to %s' % (perm_code, wafer_group)
group.permissions.add(perm)
group.save()
def handle(self, *args, **options):
self.add_wafer_groups()
|
Add a management command to add the default groups we want# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group, Permission
class Command(BaseCommand):
help = "Add some useful default groups"
option_list = BaseCommand.option_list + tuple([
])
GROUPS = {
# Permissions are specified as (app, code_name) pairs
'Page Editors': (('pages', 'add_page'), ('pages', 'delete_page'),
('pages', 'change_page'), ('pages', 'add_file'),
('pages', 'delete_file'), ('pages', 'change_file')),
'Talk Mentors': (('talks', 'change_talk'),),
}
def add_wafer_groups(self):
# This creates the groups we need for page editor and talk mentor
# roles.
for wafer_group, permission_list in self.GROUPS.items():
group, created = Group.objects.all().get_or_create(name=wafer_group)
if not created:
print 'Using existing %s group' % wafer_group
for app, perm_code in permission_list:
try:
perm = Permission.objects.filter(
codename=perm_code, content_type__app_label=app).get()
except Permission.DoesNotExist:
print 'Unable to find permission %s' % perm_code
continue
except Permission.MultipleObjectsReturned:
print 'Non-unique permission %s' % perm_code
if perm not in group.permissions.all():
print 'Adding %s to %s' % (perm_code, wafer_group)
group.permissions.add(perm)
group.save()
def handle(self, *args, **options):
self.add_wafer_groups()
|
<commit_before><commit_msg>Add a management command to add the default groups we want<commit_after># -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Group, Permission
class Command(BaseCommand):
help = "Add some useful default groups"
option_list = BaseCommand.option_list + tuple([
])
GROUPS = {
# Permissions are specified as (app, code_name) pairs
'Page Editors': (('pages', 'add_page'), ('pages', 'delete_page'),
('pages', 'change_page'), ('pages', 'add_file'),
('pages', 'delete_file'), ('pages', 'change_file')),
'Talk Mentors': (('talks', 'change_talk'),),
}
def add_wafer_groups(self):
# This creates the groups we need for page editor and talk mentor
# roles.
for wafer_group, permission_list in self.GROUPS.items():
group, created = Group.objects.all().get_or_create(name=wafer_group)
if not created:
print 'Using existing %s group' % wafer_group
for app, perm_code in permission_list:
try:
perm = Permission.objects.filter(
codename=perm_code, content_type__app_label=app).get()
except Permission.DoesNotExist:
print 'Unable to find permission %s' % perm_code
continue
except Permission.MultipleObjectsReturned:
print 'Non-unique permission %s' % perm_code
if perm not in group.permissions.all():
print 'Adding %s to %s' % (perm_code, wafer_group)
group.permissions.add(perm)
group.save()
def handle(self, *args, **options):
self.add_wafer_groups()
|
|
5b298e30fd5251bd9bf2c154c267bd86f1bc03cc
|
libexec/check_shinken_mem.py
|
libexec/check_shinken_mem.py
|
#!/usr/bin/env python
# Autor : David Hannequin <david.hannequin@gmail.com>
# Date : 29 Nov 2011
#
# Script init
#
import sys
import os
import argparse
import getopt
#
# Usage
#
def usage():
print 'Usage :'
print sys.argv[0] + ' -w <80> -c <90>'
print '-p --port : snmp port by default 161'
print ' -c (--critical) Critical tresholds (defaults : 90%)\n';
print ' -w (--warning) Warning tresholds (defaults : 80%)\n';
print ' -h (--help) Usage help\n';
#
# Main
#
def readLines(filename):
f = open(filename, "r")
lines = f.readlines()
return lines
def MemValues():
global memTotal, memCached, memFree
for line in readLines('/proc/meminfo'):
if line.split()[0] == 'MemTotal:':
memTotal = line.split()[1]
if line.split()[0] == 'MemFree:':
memFree = line.split()[1]
if line.split()[0] == 'Cached:':
memCached = line.split()[1]
def percentMem():
MemValues()
return (((int(memFree) + int(memCached)) * 100) / int(memTotal))
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hwc:v", ["help", "warning", "critical"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err)
usage()
sys.exit(2)
output = None
verbose = False
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-w", "--warning"):
notification = a
elif o in ("-c", "--critical"):
notification = a
else :
assert False , "unknown options"
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--warning', default = '80')
parser.add_argument('-c', '--critical', default = '90' )
args = parser.parse_args()
critical = args.critical
warning = args.warning
cmem = str(critical)
wmem = str(warning)
pmemFree = percentMem()
pmemUsage = 100 - pmemFree
pmemUsage = str(pmemUsage)
if pmemUsage >= cmem :
print 'CRITICAL - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(2)
elif pmemUsage >= wmem :
print 'WARNING - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(1)
else :
print 'OK - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(0)
if __name__ == "__main__":
main()
|
Add shinken plugin to check memory usage
|
Add shinken plugin to check memory usage
|
Python
|
agpl-3.0
|
rledisez/shinken,staute/shinken_deb,peeyush-tm/shinken,h4wkmoon/shinken,fpeyre/shinken,savoirfairelinux/shinken,peeyush-tm/shinken,rledisez/shinken,Simage/shinken,ddurieux/alignak,kaji-project/shinken,rednach/krill,dfranco/shinken,fpeyre/shinken,lets-software/shinken,fpeyre/shinken,tal-nino/shinken,baloo/shinken,geektophe/shinken,xorpaul/shinken,Aimage/shinken,Simage/shinken,claneys/shinken,naparuba/shinken,KerkhoffTechnologies/shinken,h4wkmoon/shinken,fpeyre/shinken,dfranco/shinken,Aimage/shinken,dfranco/shinken,naparuba/shinken,staute/shinken_deb,kaji-project/shinken,titilambert/alignak,kaji-project/shinken,geektophe/shinken,gst/alignak,ddurieux/alignak,Simage/shinken,rednach/krill,KerkhoffTechnologies/shinken,titilambert/alignak,rednach/krill,xorpaul/shinken,savoirfairelinux/shinken,kaji-project/shinken,Aimage/shinken,rednach/krill,rednach/krill,tal-nino/shinken,tal-nino/shinken,mohierf/shinken,lets-software/shinken,xorpaul/shinken,geektophe/shinken,Aimage/shinken,tal-nino/shinken,KerkhoffTechnologies/shinken,dfranco/shinken,KerkhoffTechnologies/shinken,claneys/shinken,lets-software/shinken,Alignak-monitoring/alignak,h4wkmoon/shinken,staute/shinken_package,rledisez/shinken,baloo/shinken,mohierf/shinken,kaji-project/shinken,staute/shinken_deb,xorpaul/shinken,staute/shinken_package,dfranco/shinken,lets-software/shinken,savoirfairelinux/shinken,titilambert/alignak,baloo/shinken,naparuba/shinken,ddurieux/alignak,rednach/krill,savoirfairelinux/shinken,rledisez/shinken,peeyush-tm/shinken,baloo/shinken,xorpaul/shinken,KerkhoffTechnologies/shinken,gst/alignak,mohierf/shinken,fpeyre/shinken,naparuba/shinken,peeyush-tm/shinken,naparuba/shinken,fpeyre/shinken,h4wkmoon/shinken,Aimage/shinken,lets-software/shinken,geektophe/shinken,staute/shinken_package,mohierf/shinken,tal-nino/shinken,claneys/shinken,staute/shinken_package,dfranco/shinken,kaji-project/shinken,h4wkmoon/shinken,h4wkmoon/shinken,xorpaul/shinken,claneys/shinken,baloo/shinken,Aimage/shinken,ddurieux/alignak,mohierf/shinken,Simage/shinken,gst/alignak,Simage/shinken,ddurieux/alignak,mohierf/shinken,xorpaul/shinken,staute/shinken_deb,Alignak-monitoring/alignak,geektophe/shinken,titilambert/alignak,staute/shinken_deb,savoirfairelinux/shinken,claneys/shinken,kaji-project/shinken,gst/alignak,h4wkmoon/shinken,ddurieux/alignak,rledisez/shinken,naparuba/shinken,lets-software/shinken,peeyush-tm/shinken,Simage/shinken,geektophe/shinken,xorpaul/shinken,savoirfairelinux/shinken,KerkhoffTechnologies/shinken,peeyush-tm/shinken,tal-nino/shinken,staute/shinken_deb,claneys/shinken,h4wkmoon/shinken,baloo/shinken,rledisez/shinken,staute/shinken_package,staute/shinken_package
|
Add shinken plugin to check memory usage
|
#!/usr/bin/env python
# Autor : David Hannequin <david.hannequin@gmail.com>
# Date : 29 Nov 2011
#
# Script init
#
import sys
import os
import argparse
import getopt
#
# Usage
#
def usage():
print 'Usage :'
print sys.argv[0] + ' -w <80> -c <90>'
print '-p --port : snmp port by default 161'
print ' -c (--critical) Critical tresholds (defaults : 90%)\n';
print ' -w (--warning) Warning tresholds (defaults : 80%)\n';
print ' -h (--help) Usage help\n';
#
# Main
#
def readLines(filename):
f = open(filename, "r")
lines = f.readlines()
return lines
def MemValues():
global memTotal, memCached, memFree
for line in readLines('/proc/meminfo'):
if line.split()[0] == 'MemTotal:':
memTotal = line.split()[1]
if line.split()[0] == 'MemFree:':
memFree = line.split()[1]
if line.split()[0] == 'Cached:':
memCached = line.split()[1]
def percentMem():
MemValues()
return (((int(memFree) + int(memCached)) * 100) / int(memTotal))
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hwc:v", ["help", "warning", "critical"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err)
usage()
sys.exit(2)
output = None
verbose = False
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-w", "--warning"):
notification = a
elif o in ("-c", "--critical"):
notification = a
else :
assert False , "unknown options"
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--warning', default = '80')
parser.add_argument('-c', '--critical', default = '90' )
args = parser.parse_args()
critical = args.critical
warning = args.warning
cmem = str(critical)
wmem = str(warning)
pmemFree = percentMem()
pmemUsage = 100 - pmemFree
pmemUsage = str(pmemUsage)
if pmemUsage >= cmem :
print 'CRITICAL - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(2)
elif pmemUsage >= wmem :
print 'WARNING - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(1)
else :
print 'OK - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(0)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add shinken plugin to check memory usage<commit_after>
|
#!/usr/bin/env python
# Autor : David Hannequin <david.hannequin@gmail.com>
# Date : 29 Nov 2011
#
# Script init
#
import sys
import os
import argparse
import getopt
#
# Usage
#
def usage():
print 'Usage :'
print sys.argv[0] + ' -w <80> -c <90>'
print '-p --port : snmp port by default 161'
print ' -c (--critical) Critical tresholds (defaults : 90%)\n';
print ' -w (--warning) Warning tresholds (defaults : 80%)\n';
print ' -h (--help) Usage help\n';
#
# Main
#
def readLines(filename):
f = open(filename, "r")
lines = f.readlines()
return lines
def MemValues():
global memTotal, memCached, memFree
for line in readLines('/proc/meminfo'):
if line.split()[0] == 'MemTotal:':
memTotal = line.split()[1]
if line.split()[0] == 'MemFree:':
memFree = line.split()[1]
if line.split()[0] == 'Cached:':
memCached = line.split()[1]
def percentMem():
MemValues()
return (((int(memFree) + int(memCached)) * 100) / int(memTotal))
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hwc:v", ["help", "warning", "critical"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err)
usage()
sys.exit(2)
output = None
verbose = False
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-w", "--warning"):
notification = a
elif o in ("-c", "--critical"):
notification = a
else :
assert False , "unknown options"
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--warning', default = '80')
parser.add_argument('-c', '--critical', default = '90' )
args = parser.parse_args()
critical = args.critical
warning = args.warning
cmem = str(critical)
wmem = str(warning)
pmemFree = percentMem()
pmemUsage = 100 - pmemFree
pmemUsage = str(pmemUsage)
if pmemUsage >= cmem :
print 'CRITICAL - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(2)
elif pmemUsage >= wmem :
print 'WARNING - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(1)
else :
print 'OK - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(0)
if __name__ == "__main__":
main()
|
Add shinken plugin to check memory usage#!/usr/bin/env python
# Autor : David Hannequin <david.hannequin@gmail.com>
# Date : 29 Nov 2011
#
# Script init
#
import sys
import os
import argparse
import getopt
#
# Usage
#
def usage():
print 'Usage :'
print sys.argv[0] + ' -w <80> -c <90>'
print '-p --port : snmp port by default 161'
print ' -c (--critical) Critical tresholds (defaults : 90%)\n';
print ' -w (--warning) Warning tresholds (defaults : 80%)\n';
print ' -h (--help) Usage help\n';
#
# Main
#
def readLines(filename):
f = open(filename, "r")
lines = f.readlines()
return lines
def MemValues():
global memTotal, memCached, memFree
for line in readLines('/proc/meminfo'):
if line.split()[0] == 'MemTotal:':
memTotal = line.split()[1]
if line.split()[0] == 'MemFree:':
memFree = line.split()[1]
if line.split()[0] == 'Cached:':
memCached = line.split()[1]
def percentMem():
MemValues()
return (((int(memFree) + int(memCached)) * 100) / int(memTotal))
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hwc:v", ["help", "warning", "critical"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err)
usage()
sys.exit(2)
output = None
verbose = False
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-w", "--warning"):
notification = a
elif o in ("-c", "--critical"):
notification = a
else :
assert False , "unknown options"
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--warning', default = '80')
parser.add_argument('-c', '--critical', default = '90' )
args = parser.parse_args()
critical = args.critical
warning = args.warning
cmem = str(critical)
wmem = str(warning)
pmemFree = percentMem()
pmemUsage = 100 - pmemFree
pmemUsage = str(pmemUsage)
if pmemUsage >= cmem :
print 'CRITICAL - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(2)
elif pmemUsage >= wmem :
print 'WARNING - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(1)
else :
print 'OK - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(0)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add shinken plugin to check memory usage<commit_after>#!/usr/bin/env python
# Autor : David Hannequin <david.hannequin@gmail.com>
# Date : 29 Nov 2011
#
# Script init
#
import sys
import os
import argparse
import getopt
#
# Usage
#
def usage():
print 'Usage :'
print sys.argv[0] + ' -w <80> -c <90>'
print '-p --port : snmp port by default 161'
print ' -c (--critical) Critical tresholds (defaults : 90%)\n';
print ' -w (--warning) Warning tresholds (defaults : 80%)\n';
print ' -h (--help) Usage help\n';
#
# Main
#
def readLines(filename):
f = open(filename, "r")
lines = f.readlines()
return lines
def MemValues():
global memTotal, memCached, memFree
for line in readLines('/proc/meminfo'):
if line.split()[0] == 'MemTotal:':
memTotal = line.split()[1]
if line.split()[0] == 'MemFree:':
memFree = line.split()[1]
if line.split()[0] == 'Cached:':
memCached = line.split()[1]
def percentMem():
MemValues()
return (((int(memFree) + int(memCached)) * 100) / int(memTotal))
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hwc:v", ["help", "warning", "critical"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err)
usage()
sys.exit(2)
output = None
verbose = False
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-w", "--warning"):
notification = a
elif o in ("-c", "--critical"):
notification = a
else :
assert False , "unknown options"
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--warning', default = '80')
parser.add_argument('-c', '--critical', default = '90' )
args = parser.parse_args()
critical = args.critical
warning = args.warning
cmem = str(critical)
wmem = str(warning)
pmemFree = percentMem()
pmemUsage = 100 - pmemFree
pmemUsage = str(pmemUsage)
if pmemUsage >= cmem :
print 'CRITICAL - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(2)
elif pmemUsage >= wmem :
print 'WARNING - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(1)
else :
print 'OK - Memory usage : '+pmemUsage+'% |mem='+pmemUsage
sys.exit(0)
if __name__ == "__main__":
main()
|
|
08ae11ddea167e8f42f3567cb3655d8c0571459b
|
custom/enikshay/integrations/tests/test_case_properties_changed.py
|
custom/enikshay/integrations/tests/test_case_properties_changed.py
|
from __future__ import absolute_import
import uuid
from django.test import TestCase, override_settings
from casexml.apps.case.mock import CaseBlock
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from custom.enikshay.integrations.utils import case_properties_changed
@override_settings(TESTS_SHOULD_USE_SQL_BACKEND=True)
class TestCasePropertiesChanged(TestCase):
def setUp(self):
self.domain = 'domain'
create_domain(self.domain)
case_type = "case"
self.case_id = uuid.uuid4().hex
submit_case_blocks(CaseBlock(
self.case_id, case_type=case_type, create=True).as_string(), self.domain
)
caseblock1 = CaseBlock(
self.case_id,
case_type=case_type,
update={'property_1': 'updated'},
)
caseblock2 = CaseBlock(
self.case_id,
case_type=case_type,
update={'property_2': 'updated'},
)
blocks = [caseblock1.as_string(), caseblock2.as_string()]
submit_case_blocks(blocks, self.domain)[1][0]
def test_case_properties_changed(self):
case = CaseAccessors(self.domain).get_case(self.case_id)
self.assertTrue(case_properties_changed(case, ['property_1', 'property_2']))
|
Add test for case properties changed function
|
Add test for case properties changed function
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add test for case properties changed function
|
from __future__ import absolute_import
import uuid
from django.test import TestCase, override_settings
from casexml.apps.case.mock import CaseBlock
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from custom.enikshay.integrations.utils import case_properties_changed
@override_settings(TESTS_SHOULD_USE_SQL_BACKEND=True)
class TestCasePropertiesChanged(TestCase):
def setUp(self):
self.domain = 'domain'
create_domain(self.domain)
case_type = "case"
self.case_id = uuid.uuid4().hex
submit_case_blocks(CaseBlock(
self.case_id, case_type=case_type, create=True).as_string(), self.domain
)
caseblock1 = CaseBlock(
self.case_id,
case_type=case_type,
update={'property_1': 'updated'},
)
caseblock2 = CaseBlock(
self.case_id,
case_type=case_type,
update={'property_2': 'updated'},
)
blocks = [caseblock1.as_string(), caseblock2.as_string()]
submit_case_blocks(blocks, self.domain)[1][0]
def test_case_properties_changed(self):
case = CaseAccessors(self.domain).get_case(self.case_id)
self.assertTrue(case_properties_changed(case, ['property_1', 'property_2']))
|
<commit_before><commit_msg>Add test for case properties changed function<commit_after>
|
from __future__ import absolute_import
import uuid
from django.test import TestCase, override_settings
from casexml.apps.case.mock import CaseBlock
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from custom.enikshay.integrations.utils import case_properties_changed
@override_settings(TESTS_SHOULD_USE_SQL_BACKEND=True)
class TestCasePropertiesChanged(TestCase):
def setUp(self):
self.domain = 'domain'
create_domain(self.domain)
case_type = "case"
self.case_id = uuid.uuid4().hex
submit_case_blocks(CaseBlock(
self.case_id, case_type=case_type, create=True).as_string(), self.domain
)
caseblock1 = CaseBlock(
self.case_id,
case_type=case_type,
update={'property_1': 'updated'},
)
caseblock2 = CaseBlock(
self.case_id,
case_type=case_type,
update={'property_2': 'updated'},
)
blocks = [caseblock1.as_string(), caseblock2.as_string()]
submit_case_blocks(blocks, self.domain)[1][0]
def test_case_properties_changed(self):
case = CaseAccessors(self.domain).get_case(self.case_id)
self.assertTrue(case_properties_changed(case, ['property_1', 'property_2']))
|
Add test for case properties changed functionfrom __future__ import absolute_import
import uuid
from django.test import TestCase, override_settings
from casexml.apps.case.mock import CaseBlock
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from custom.enikshay.integrations.utils import case_properties_changed
@override_settings(TESTS_SHOULD_USE_SQL_BACKEND=True)
class TestCasePropertiesChanged(TestCase):
def setUp(self):
self.domain = 'domain'
create_domain(self.domain)
case_type = "case"
self.case_id = uuid.uuid4().hex
submit_case_blocks(CaseBlock(
self.case_id, case_type=case_type, create=True).as_string(), self.domain
)
caseblock1 = CaseBlock(
self.case_id,
case_type=case_type,
update={'property_1': 'updated'},
)
caseblock2 = CaseBlock(
self.case_id,
case_type=case_type,
update={'property_2': 'updated'},
)
blocks = [caseblock1.as_string(), caseblock2.as_string()]
submit_case_blocks(blocks, self.domain)[1][0]
def test_case_properties_changed(self):
case = CaseAccessors(self.domain).get_case(self.case_id)
self.assertTrue(case_properties_changed(case, ['property_1', 'property_2']))
|
<commit_before><commit_msg>Add test for case properties changed function<commit_after>from __future__ import absolute_import
import uuid
from django.test import TestCase, override_settings
from casexml.apps.case.mock import CaseBlock
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from custom.enikshay.integrations.utils import case_properties_changed
@override_settings(TESTS_SHOULD_USE_SQL_BACKEND=True)
class TestCasePropertiesChanged(TestCase):
def setUp(self):
self.domain = 'domain'
create_domain(self.domain)
case_type = "case"
self.case_id = uuid.uuid4().hex
submit_case_blocks(CaseBlock(
self.case_id, case_type=case_type, create=True).as_string(), self.domain
)
caseblock1 = CaseBlock(
self.case_id,
case_type=case_type,
update={'property_1': 'updated'},
)
caseblock2 = CaseBlock(
self.case_id,
case_type=case_type,
update={'property_2': 'updated'},
)
blocks = [caseblock1.as_string(), caseblock2.as_string()]
submit_case_blocks(blocks, self.domain)[1][0]
def test_case_properties_changed(self):
case = CaseAccessors(self.domain).get_case(self.case_id)
self.assertTrue(case_properties_changed(case, ['property_1', 'property_2']))
|
|
176889933feca3c1231634062b0507afde6ddac3
|
skimage/io/tests/test_mpl_imshow.py
|
skimage/io/tests/test_mpl_imshow.py
|
from __future__ import division
import numpy as np
from skimage import io
io.use_plugin('matplotlib', 'imshow')
# test images. Note that they don't have their full range for their dtype,
# but we still expect the display range to equal the full dtype range.
im8 = np.array([[0, 64], [128, 240]], np.uint8)
im16 = im8.astype(np.uint16) * 256
imf = im8 / 255
im_lo = imf / 1000
im_hi = imf + 10
def n_subplots(ax_im):
"""Return the number of subplots in the figure containing an ``AxesImage``.
Parameters
----------
ax_im : matplotlib.pyplot.AxesImage object
The input ``AxesImage``.
Returns
-------
n : int
The number of subplots in the corresponding figure.
Notes
-----
This function is intended to check whether a colorbar was drawn, in
which case two subplots are expected. For standard imshows, one
subplot is expected.
"""
return len(ax_im.get_figure().get_axes())
def test_uint8():
ax_im = io.imshow(im8)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 255)
assert n_subplots(ax_im) == 1
def test_uint16():
ax_im = io.imshow(im16)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 65535)
assert n_subplots(ax_im) == 1
def test_float():
ax_im = io.imshow(imf)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 1)
assert n_subplots(ax_im) == 1
if __name__ == '__main__':
np.testing.run_module_suite()
|
Add tests for io.imshow mpl plugin
|
Add tests for io.imshow mpl plugin
|
Python
|
bsd-3-clause
|
WarrenWeckesser/scikits-image,keflavich/scikit-image,bennlich/scikit-image,rjeli/scikit-image,robintw/scikit-image,rjeli/scikit-image,vighneshbirodkar/scikit-image,vighneshbirodkar/scikit-image,youprofit/scikit-image,michaelpacer/scikit-image,GaZ3ll3/scikit-image,pratapvardhan/scikit-image,Britefury/scikit-image,vighneshbirodkar/scikit-image,juliusbierk/scikit-image,newville/scikit-image,bennlich/scikit-image,rjeli/scikit-image,robintw/scikit-image,warmspringwinds/scikit-image,ClinicalGraphics/scikit-image,keflavich/scikit-image,paalge/scikit-image,paalge/scikit-image,ClinicalGraphics/scikit-image,ofgulban/scikit-image,GaZ3ll3/scikit-image,emon10005/scikit-image,michaelpacer/scikit-image,oew1v07/scikit-image,WarrenWeckesser/scikits-image,Hiyorimi/scikit-image,Midafi/scikit-image,bsipocz/scikit-image,dpshelio/scikit-image,michaelaye/scikit-image,chriscrosscutler/scikit-image,ajaybhat/scikit-image,Britefury/scikit-image,ofgulban/scikit-image,ajaybhat/scikit-image,ofgulban/scikit-image,dpshelio/scikit-image,michaelaye/scikit-image,newville/scikit-image,warmspringwinds/scikit-image,chriscrosscutler/scikit-image,oew1v07/scikit-image,youprofit/scikit-image,paalge/scikit-image,blink1073/scikit-image,pratapvardhan/scikit-image,jwiggins/scikit-image,emon10005/scikit-image,Hiyorimi/scikit-image,bsipocz/scikit-image,jwiggins/scikit-image,juliusbierk/scikit-image,Midafi/scikit-image,blink1073/scikit-image
|
Add tests for io.imshow mpl plugin
|
from __future__ import division
import numpy as np
from skimage import io
io.use_plugin('matplotlib', 'imshow')
# test images. Note that they don't have their full range for their dtype,
# but we still expect the display range to equal the full dtype range.
im8 = np.array([[0, 64], [128, 240]], np.uint8)
im16 = im8.astype(np.uint16) * 256
imf = im8 / 255
im_lo = imf / 1000
im_hi = imf + 10
def n_subplots(ax_im):
"""Return the number of subplots in the figure containing an ``AxesImage``.
Parameters
----------
ax_im : matplotlib.pyplot.AxesImage object
The input ``AxesImage``.
Returns
-------
n : int
The number of subplots in the corresponding figure.
Notes
-----
This function is intended to check whether a colorbar was drawn, in
which case two subplots are expected. For standard imshows, one
subplot is expected.
"""
return len(ax_im.get_figure().get_axes())
def test_uint8():
ax_im = io.imshow(im8)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 255)
assert n_subplots(ax_im) == 1
def test_uint16():
ax_im = io.imshow(im16)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 65535)
assert n_subplots(ax_im) == 1
def test_float():
ax_im = io.imshow(imf)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 1)
assert n_subplots(ax_im) == 1
if __name__ == '__main__':
np.testing.run_module_suite()
|
<commit_before><commit_msg>Add tests for io.imshow mpl plugin<commit_after>
|
from __future__ import division
import numpy as np
from skimage import io
io.use_plugin('matplotlib', 'imshow')
# test images. Note that they don't have their full range for their dtype,
# but we still expect the display range to equal the full dtype range.
im8 = np.array([[0, 64], [128, 240]], np.uint8)
im16 = im8.astype(np.uint16) * 256
imf = im8 / 255
im_lo = imf / 1000
im_hi = imf + 10
def n_subplots(ax_im):
"""Return the number of subplots in the figure containing an ``AxesImage``.
Parameters
----------
ax_im : matplotlib.pyplot.AxesImage object
The input ``AxesImage``.
Returns
-------
n : int
The number of subplots in the corresponding figure.
Notes
-----
This function is intended to check whether a colorbar was drawn, in
which case two subplots are expected. For standard imshows, one
subplot is expected.
"""
return len(ax_im.get_figure().get_axes())
def test_uint8():
ax_im = io.imshow(im8)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 255)
assert n_subplots(ax_im) == 1
def test_uint16():
ax_im = io.imshow(im16)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 65535)
assert n_subplots(ax_im) == 1
def test_float():
ax_im = io.imshow(imf)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 1)
assert n_subplots(ax_im) == 1
if __name__ == '__main__':
np.testing.run_module_suite()
|
Add tests for io.imshow mpl pluginfrom __future__ import division
import numpy as np
from skimage import io
io.use_plugin('matplotlib', 'imshow')
# test images. Note that they don't have their full range for their dtype,
# but we still expect the display range to equal the full dtype range.
im8 = np.array([[0, 64], [128, 240]], np.uint8)
im16 = im8.astype(np.uint16) * 256
imf = im8 / 255
im_lo = imf / 1000
im_hi = imf + 10
def n_subplots(ax_im):
"""Return the number of subplots in the figure containing an ``AxesImage``.
Parameters
----------
ax_im : matplotlib.pyplot.AxesImage object
The input ``AxesImage``.
Returns
-------
n : int
The number of subplots in the corresponding figure.
Notes
-----
This function is intended to check whether a colorbar was drawn, in
which case two subplots are expected. For standard imshows, one
subplot is expected.
"""
return len(ax_im.get_figure().get_axes())
def test_uint8():
ax_im = io.imshow(im8)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 255)
assert n_subplots(ax_im) == 1
def test_uint16():
ax_im = io.imshow(im16)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 65535)
assert n_subplots(ax_im) == 1
def test_float():
ax_im = io.imshow(imf)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 1)
assert n_subplots(ax_im) == 1
if __name__ == '__main__':
np.testing.run_module_suite()
|
<commit_before><commit_msg>Add tests for io.imshow mpl plugin<commit_after>from __future__ import division
import numpy as np
from skimage import io
io.use_plugin('matplotlib', 'imshow')
# test images. Note that they don't have their full range for their dtype,
# but we still expect the display range to equal the full dtype range.
im8 = np.array([[0, 64], [128, 240]], np.uint8)
im16 = im8.astype(np.uint16) * 256
imf = im8 / 255
im_lo = imf / 1000
im_hi = imf + 10
def n_subplots(ax_im):
"""Return the number of subplots in the figure containing an ``AxesImage``.
Parameters
----------
ax_im : matplotlib.pyplot.AxesImage object
The input ``AxesImage``.
Returns
-------
n : int
The number of subplots in the corresponding figure.
Notes
-----
This function is intended to check whether a colorbar was drawn, in
which case two subplots are expected. For standard imshows, one
subplot is expected.
"""
return len(ax_im.get_figure().get_axes())
def test_uint8():
ax_im = io.imshow(im8)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 255)
assert n_subplots(ax_im) == 1
def test_uint16():
ax_im = io.imshow(im16)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 65535)
assert n_subplots(ax_im) == 1
def test_float():
ax_im = io.imshow(imf)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 1)
assert n_subplots(ax_im) == 1
if __name__ == '__main__':
np.testing.run_module_suite()
|
|
bbbc5bf17ae50829bcee5d65156e0e03b7df3150
|
gradefiles-assemble.py
|
gradefiles-assemble.py
|
#####################################################################
##
## gradefiles-assemble.py
##
## Script template for assembling a collection of gradefiles from
## a collection of graded submissions (processed using the grading
## script).
##
##
import os # File/folder work (walk, path, system).
from shutil import rmtree # Deleting a folder.
#####################################################################
## Script to extract grade information from a compelted grade sheet.
##
def summary(txt):
summary = []
summary += [txt.split('Total:')[1].strip().split('/')[0]]
if (txt.find('Extra credit:') != -1):
summary += [txt.split('Extra credit:')[1].split('\n')[0].replace('+','').strip()]
else:
summary += [' ']
if (txt.find('Late penalty:') != -1):
summary += ['-'+txt.split('Late penalty:')[1].split('\n')[0].replace('-','').strip()]
else:
summary += [' ']
return "\t".join(summary)
#####################################################################
## Convert every file into a gradefile ready for the gradefiles-push
## script; simultaneously display the columns for the grade sheet.
##
# Check if source directory exists.
if os.path.exists('./processed'):
# Create and clear destination folder.
if os.path.exists('./grades'):
rmtree('grades')
os.makedirs('grades')
count = 0
for curdir, dirs, files in os.walk('./processed/'):
for file in files:
txt = open('./processed/'+file, 'r').read().replace('"""', "'''").split("'''")
if len(txt) >= 2:
txt = txt[1]
name = file.split('.')[0]
target = './grades/'+name+'.py'
open(target, 'w').write(txt[1:])
print('Wrote file: ' + target + '\t' + summary(txt))
count += 1
# Display count for double-checking purposes.
print('Wrote ' + str(count) + ' files.')
#eof
|
Add script for assembling grade files and spreadsheet summary from graded submissions.
|
Add script for assembling grade files and spreadsheet summary from graded submissions.
|
Python
|
mit
|
lapets/bu-gsubmit-grading
|
Add script for assembling grade files and spreadsheet summary from graded submissions.
|
#####################################################################
##
## gradefiles-assemble.py
##
## Script template for assembling a collection of gradefiles from
## a collection of graded submissions (processed using the grading
## script).
##
##
import os # File/folder work (walk, path, system).
from shutil import rmtree # Deleting a folder.
#####################################################################
## Script to extract grade information from a compelted grade sheet.
##
def summary(txt):
summary = []
summary += [txt.split('Total:')[1].strip().split('/')[0]]
if (txt.find('Extra credit:') != -1):
summary += [txt.split('Extra credit:')[1].split('\n')[0].replace('+','').strip()]
else:
summary += [' ']
if (txt.find('Late penalty:') != -1):
summary += ['-'+txt.split('Late penalty:')[1].split('\n')[0].replace('-','').strip()]
else:
summary += [' ']
return "\t".join(summary)
#####################################################################
## Convert every file into a gradefile ready for the gradefiles-push
## script; simultaneously display the columns for the grade sheet.
##
# Check if source directory exists.
if os.path.exists('./processed'):
# Create and clear destination folder.
if os.path.exists('./grades'):
rmtree('grades')
os.makedirs('grades')
count = 0
for curdir, dirs, files in os.walk('./processed/'):
for file in files:
txt = open('./processed/'+file, 'r').read().replace('"""', "'''").split("'''")
if len(txt) >= 2:
txt = txt[1]
name = file.split('.')[0]
target = './grades/'+name+'.py'
open(target, 'w').write(txt[1:])
print('Wrote file: ' + target + '\t' + summary(txt))
count += 1
# Display count for double-checking purposes.
print('Wrote ' + str(count) + ' files.')
#eof
|
<commit_before><commit_msg>Add script for assembling grade files and spreadsheet summary from graded submissions.<commit_after>
|
#####################################################################
##
## gradefiles-assemble.py
##
## Script template for assembling a collection of gradefiles from
## a collection of graded submissions (processed using the grading
## script).
##
##
import os # File/folder work (walk, path, system).
from shutil import rmtree # Deleting a folder.
#####################################################################
## Script to extract grade information from a compelted grade sheet.
##
def summary(txt):
summary = []
summary += [txt.split('Total:')[1].strip().split('/')[0]]
if (txt.find('Extra credit:') != -1):
summary += [txt.split('Extra credit:')[1].split('\n')[0].replace('+','').strip()]
else:
summary += [' ']
if (txt.find('Late penalty:') != -1):
summary += ['-'+txt.split('Late penalty:')[1].split('\n')[0].replace('-','').strip()]
else:
summary += [' ']
return "\t".join(summary)
#####################################################################
## Convert every file into a gradefile ready for the gradefiles-push
## script; simultaneously display the columns for the grade sheet.
##
# Check if source directory exists.
if os.path.exists('./processed'):
# Create and clear destination folder.
if os.path.exists('./grades'):
rmtree('grades')
os.makedirs('grades')
count = 0
for curdir, dirs, files in os.walk('./processed/'):
for file in files:
txt = open('./processed/'+file, 'r').read().replace('"""', "'''").split("'''")
if len(txt) >= 2:
txt = txt[1]
name = file.split('.')[0]
target = './grades/'+name+'.py'
open(target, 'w').write(txt[1:])
print('Wrote file: ' + target + '\t' + summary(txt))
count += 1
# Display count for double-checking purposes.
print('Wrote ' + str(count) + ' files.')
#eof
|
Add script for assembling grade files and spreadsheet summary from graded submissions.#####################################################################
##
## gradefiles-assemble.py
##
## Script template for assembling a collection of gradefiles from
## a collection of graded submissions (processed using the grading
## script).
##
##
import os # File/folder work (walk, path, system).
from shutil import rmtree # Deleting a folder.
#####################################################################
## Script to extract grade information from a compelted grade sheet.
##
def summary(txt):
summary = []
summary += [txt.split('Total:')[1].strip().split('/')[0]]
if (txt.find('Extra credit:') != -1):
summary += [txt.split('Extra credit:')[1].split('\n')[0].replace('+','').strip()]
else:
summary += [' ']
if (txt.find('Late penalty:') != -1):
summary += ['-'+txt.split('Late penalty:')[1].split('\n')[0].replace('-','').strip()]
else:
summary += [' ']
return "\t".join(summary)
#####################################################################
## Convert every file into a gradefile ready for the gradefiles-push
## script; simultaneously display the columns for the grade sheet.
##
# Check if source directory exists.
if os.path.exists('./processed'):
# Create and clear destination folder.
if os.path.exists('./grades'):
rmtree('grades')
os.makedirs('grades')
count = 0
for curdir, dirs, files in os.walk('./processed/'):
for file in files:
txt = open('./processed/'+file, 'r').read().replace('"""', "'''").split("'''")
if len(txt) >= 2:
txt = txt[1]
name = file.split('.')[0]
target = './grades/'+name+'.py'
open(target, 'w').write(txt[1:])
print('Wrote file: ' + target + '\t' + summary(txt))
count += 1
# Display count for double-checking purposes.
print('Wrote ' + str(count) + ' files.')
#eof
|
<commit_before><commit_msg>Add script for assembling grade files and spreadsheet summary from graded submissions.<commit_after>#####################################################################
##
## gradefiles-assemble.py
##
## Script template for assembling a collection of gradefiles from
## a collection of graded submissions (processed using the grading
## script).
##
##
import os # File/folder work (walk, path, system).
from shutil import rmtree # Deleting a folder.
#####################################################################
## Script to extract grade information from a compelted grade sheet.
##
def summary(txt):
summary = []
summary += [txt.split('Total:')[1].strip().split('/')[0]]
if (txt.find('Extra credit:') != -1):
summary += [txt.split('Extra credit:')[1].split('\n')[0].replace('+','').strip()]
else:
summary += [' ']
if (txt.find('Late penalty:') != -1):
summary += ['-'+txt.split('Late penalty:')[1].split('\n')[0].replace('-','').strip()]
else:
summary += [' ']
return "\t".join(summary)
#####################################################################
## Convert every file into a gradefile ready for the gradefiles-push
## script; simultaneously display the columns for the grade sheet.
##
# Check if source directory exists.
if os.path.exists('./processed'):
# Create and clear destination folder.
if os.path.exists('./grades'):
rmtree('grades')
os.makedirs('grades')
count = 0
for curdir, dirs, files in os.walk('./processed/'):
for file in files:
txt = open('./processed/'+file, 'r').read().replace('"""', "'''").split("'''")
if len(txt) >= 2:
txt = txt[1]
name = file.split('.')[0]
target = './grades/'+name+'.py'
open(target, 'w').write(txt[1:])
print('Wrote file: ' + target + '\t' + summary(txt))
count += 1
# Display count for double-checking purposes.
print('Wrote ' + str(count) + ' files.')
#eof
|
|
a7af552f0959908e82c4bcd1736697337c35a6d3
|
test/selenium/src/tests/test_people_groups_page.py
|
test/selenium/src/tests/test_people_groups_page.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
"""All smoke tests relevant to program page"""
import pytest # pylint: disable=import-error
from lib import base
from lib.constants import url
class TestOrgGroupPage(base.Test):
"""Tests the org group page a part of smoke tests, section 7."""
@pytest.mark.smoke_tests
def test_app_redirects_to_new_org_group_page(self, new_org_group):
"""Tests if after saving and closing the lhn_modal the app redirects to
the object page.
Generally we start at a random url. Here we verify that after saving
and closing the lhn_modal we're redirected to an url that contains an
object id.
"""
# pylint: disable=no-self-use
# pylint: disable=invalid-name
assert url.ORG_GROUPS + "/" + new_org_group.object_id in \
new_org_group.url
|
Add people groups page tests
|
Add people groups page tests
|
Python
|
apache-2.0
|
edofic/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core
|
Add people groups page tests
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
"""All smoke tests relevant to program page"""
import pytest # pylint: disable=import-error
from lib import base
from lib.constants import url
class TestOrgGroupPage(base.Test):
"""Tests the org group page a part of smoke tests, section 7."""
@pytest.mark.smoke_tests
def test_app_redirects_to_new_org_group_page(self, new_org_group):
"""Tests if after saving and closing the lhn_modal the app redirects to
the object page.
Generally we start at a random url. Here we verify that after saving
and closing the lhn_modal we're redirected to an url that contains an
object id.
"""
# pylint: disable=no-self-use
# pylint: disable=invalid-name
assert url.ORG_GROUPS + "/" + new_org_group.object_id in \
new_org_group.url
|
<commit_before><commit_msg>Add people groups page tests<commit_after>
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
"""All smoke tests relevant to program page"""
import pytest # pylint: disable=import-error
from lib import base
from lib.constants import url
class TestOrgGroupPage(base.Test):
"""Tests the org group page a part of smoke tests, section 7."""
@pytest.mark.smoke_tests
def test_app_redirects_to_new_org_group_page(self, new_org_group):
"""Tests if after saving and closing the lhn_modal the app redirects to
the object page.
Generally we start at a random url. Here we verify that after saving
and closing the lhn_modal we're redirected to an url that contains an
object id.
"""
# pylint: disable=no-self-use
# pylint: disable=invalid-name
assert url.ORG_GROUPS + "/" + new_org_group.object_id in \
new_org_group.url
|
Add people groups page tests# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
"""All smoke tests relevant to program page"""
import pytest # pylint: disable=import-error
from lib import base
from lib.constants import url
class TestOrgGroupPage(base.Test):
"""Tests the org group page a part of smoke tests, section 7."""
@pytest.mark.smoke_tests
def test_app_redirects_to_new_org_group_page(self, new_org_group):
"""Tests if after saving and closing the lhn_modal the app redirects to
the object page.
Generally we start at a random url. Here we verify that after saving
and closing the lhn_modal we're redirected to an url that contains an
object id.
"""
# pylint: disable=no-self-use
# pylint: disable=invalid-name
assert url.ORG_GROUPS + "/" + new_org_group.object_id in \
new_org_group.url
|
<commit_before><commit_msg>Add people groups page tests<commit_after># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
"""All smoke tests relevant to program page"""
import pytest # pylint: disable=import-error
from lib import base
from lib.constants import url
class TestOrgGroupPage(base.Test):
"""Tests the org group page a part of smoke tests, section 7."""
@pytest.mark.smoke_tests
def test_app_redirects_to_new_org_group_page(self, new_org_group):
"""Tests if after saving and closing the lhn_modal the app redirects to
the object page.
Generally we start at a random url. Here we verify that after saving
and closing the lhn_modal we're redirected to an url that contains an
object id.
"""
# pylint: disable=no-self-use
# pylint: disable=invalid-name
assert url.ORG_GROUPS + "/" + new_org_group.object_id in \
new_org_group.url
|
|
04f3cdc3ca261c747aab72e2866fe0718eb410b1
|
tests/test_interoperables.py
|
tests/test_interoperables.py
|
import pytest
def test_default_app():
from repocribro.app import app
import flask
assert isinstance(app, flask.Flask)
def test_main_manager(capsys):
with pytest.raises(SystemExit):
import repocribro.__main__
out, err = capsys.readouterr()
assert 'usage' in out
assert 'arguments' in out
assert 'config ' in out
assert 'version ' in out
assert 'help ' in out
def test_manager(capsys):
with pytest.raises(SystemExit):
from repocribro.manage import run
run()
out, err = capsys.readouterr()
assert 'usage' in out
assert 'arguments' in out
assert 'config ' in out
assert 'version ' in out
assert 'help ' in out
|
Test interfaces for std manipulation with app
|
Test interfaces for std manipulation with app
|
Python
|
mit
|
MarekSuchanek/repocribro,MarekSuchanek/repocribro,MarekSuchanek/repocribro
|
Test interfaces for std manipulation with app
|
import pytest
def test_default_app():
from repocribro.app import app
import flask
assert isinstance(app, flask.Flask)
def test_main_manager(capsys):
with pytest.raises(SystemExit):
import repocribro.__main__
out, err = capsys.readouterr()
assert 'usage' in out
assert 'arguments' in out
assert 'config ' in out
assert 'version ' in out
assert 'help ' in out
def test_manager(capsys):
with pytest.raises(SystemExit):
from repocribro.manage import run
run()
out, err = capsys.readouterr()
assert 'usage' in out
assert 'arguments' in out
assert 'config ' in out
assert 'version ' in out
assert 'help ' in out
|
<commit_before><commit_msg>Test interfaces for std manipulation with app<commit_after>
|
import pytest
def test_default_app():
from repocribro.app import app
import flask
assert isinstance(app, flask.Flask)
def test_main_manager(capsys):
with pytest.raises(SystemExit):
import repocribro.__main__
out, err = capsys.readouterr()
assert 'usage' in out
assert 'arguments' in out
assert 'config ' in out
assert 'version ' in out
assert 'help ' in out
def test_manager(capsys):
with pytest.raises(SystemExit):
from repocribro.manage import run
run()
out, err = capsys.readouterr()
assert 'usage' in out
assert 'arguments' in out
assert 'config ' in out
assert 'version ' in out
assert 'help ' in out
|
Test interfaces for std manipulation with appimport pytest
def test_default_app():
from repocribro.app import app
import flask
assert isinstance(app, flask.Flask)
def test_main_manager(capsys):
with pytest.raises(SystemExit):
import repocribro.__main__
out, err = capsys.readouterr()
assert 'usage' in out
assert 'arguments' in out
assert 'config ' in out
assert 'version ' in out
assert 'help ' in out
def test_manager(capsys):
with pytest.raises(SystemExit):
from repocribro.manage import run
run()
out, err = capsys.readouterr()
assert 'usage' in out
assert 'arguments' in out
assert 'config ' in out
assert 'version ' in out
assert 'help ' in out
|
<commit_before><commit_msg>Test interfaces for std manipulation with app<commit_after>import pytest
def test_default_app():
from repocribro.app import app
import flask
assert isinstance(app, flask.Flask)
def test_main_manager(capsys):
with pytest.raises(SystemExit):
import repocribro.__main__
out, err = capsys.readouterr()
assert 'usage' in out
assert 'arguments' in out
assert 'config ' in out
assert 'version ' in out
assert 'help ' in out
def test_manager(capsys):
with pytest.raises(SystemExit):
from repocribro.manage import run
run()
out, err = capsys.readouterr()
assert 'usage' in out
assert 'arguments' in out
assert 'config ' in out
assert 'version ' in out
assert 'help ' in out
|
|
b3aeb1b1270e86d8c85a286de3a5f8443cfee2e5
|
planetstack/model_policies/model_policy_Slice.py
|
planetstack/model_policies/model_policy_Slice.py
|
from core.models import *
def handle(slice):
site_deployments = SiteDeployments.objects.all()
site_deploy_lookup = defaultdict(list)
for site_deployment in site_deployments:
site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
slice_deployments = SliceDeployments.objects.all()
slice_deploy_lookup = defaultdict(list)
for slice_deployment in slice_deployments:
slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
all_deployments = Deployment.objects.all()
# slices are added to all deployments for now
expected_deployments = all_deployments
#expected_deployments = site_deploy_lookup[slice.site]
for expected_deployment in expected_deployments:
if slice not in slice_deploy_lookup or \
expected_deployment not in slice_deploy_lookup[slice]:
sd = SliceDeployments(slice=slice, deployment=expected_deployment)
sd.save()
|
Add new slices to all deployments
|
Policy: Add new slices to all deployments
|
Python
|
apache-2.0
|
wathsalav/xos,zdw/xos,xmaruto/mcord,cboling/xos,cboling/xos,wathsalav/xos,zdw/xos,xmaruto/mcord,zdw/xos,cboling/xos,opencord/xos,jermowery/xos,open-cloud/xos,xmaruto/mcord,opencord/xos,wathsalav/xos,opencord/xos,jermowery/xos,cboling/xos,cboling/xos,jermowery/xos,zdw/xos,open-cloud/xos,jermowery/xos,wathsalav/xos,xmaruto/mcord,open-cloud/xos
|
Policy: Add new slices to all deployments
|
from core.models import *
def handle(slice):
site_deployments = SiteDeployments.objects.all()
site_deploy_lookup = defaultdict(list)
for site_deployment in site_deployments:
site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
slice_deployments = SliceDeployments.objects.all()
slice_deploy_lookup = defaultdict(list)
for slice_deployment in slice_deployments:
slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
all_deployments = Deployment.objects.all()
# slices are added to all deployments for now
expected_deployments = all_deployments
#expected_deployments = site_deploy_lookup[slice.site]
for expected_deployment in expected_deployments:
if slice not in slice_deploy_lookup or \
expected_deployment not in slice_deploy_lookup[slice]:
sd = SliceDeployments(slice=slice, deployment=expected_deployment)
sd.save()
|
<commit_before><commit_msg>Policy: Add new slices to all deployments<commit_after>
|
from core.models import *
def handle(slice):
site_deployments = SiteDeployments.objects.all()
site_deploy_lookup = defaultdict(list)
for site_deployment in site_deployments:
site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
slice_deployments = SliceDeployments.objects.all()
slice_deploy_lookup = defaultdict(list)
for slice_deployment in slice_deployments:
slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
all_deployments = Deployment.objects.all()
# slices are added to all deployments for now
expected_deployments = all_deployments
#expected_deployments = site_deploy_lookup[slice.site]
for expected_deployment in expected_deployments:
if slice not in slice_deploy_lookup or \
expected_deployment not in slice_deploy_lookup[slice]:
sd = SliceDeployments(slice=slice, deployment=expected_deployment)
sd.save()
|
Policy: Add new slices to all deploymentsfrom core.models import *
def handle(slice):
site_deployments = SiteDeployments.objects.all()
site_deploy_lookup = defaultdict(list)
for site_deployment in site_deployments:
site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
slice_deployments = SliceDeployments.objects.all()
slice_deploy_lookup = defaultdict(list)
for slice_deployment in slice_deployments:
slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
all_deployments = Deployment.objects.all()
# slices are added to all deployments for now
expected_deployments = all_deployments
#expected_deployments = site_deploy_lookup[slice.site]
for expected_deployment in expected_deployments:
if slice not in slice_deploy_lookup or \
expected_deployment not in slice_deploy_lookup[slice]:
sd = SliceDeployments(slice=slice, deployment=expected_deployment)
sd.save()
|
<commit_before><commit_msg>Policy: Add new slices to all deployments<commit_after>from core.models import *
def handle(slice):
site_deployments = SiteDeployments.objects.all()
site_deploy_lookup = defaultdict(list)
for site_deployment in site_deployments:
site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
slice_deployments = SliceDeployments.objects.all()
slice_deploy_lookup = defaultdict(list)
for slice_deployment in slice_deployments:
slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
all_deployments = Deployment.objects.all()
# slices are added to all deployments for now
expected_deployments = all_deployments
#expected_deployments = site_deploy_lookup[slice.site]
for expected_deployment in expected_deployments:
if slice not in slice_deploy_lookup or \
expected_deployment not in slice_deploy_lookup[slice]:
sd = SliceDeployments(slice=slice, deployment=expected_deployment)
sd.save()
|
|
7b44e91065a5dd8428a06949f797cbfab81d3cd9
|
lily/users/migrations/0024_auto_20171221_1325.py
|
lily/users/migrations/0024_auto_20171221_1325.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-21 13:25
from __future__ import unicode_literals
import django.contrib.auth.models
from django.db import migrations
import lily.users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0023_userinvite'),
]
operations = [
migrations.AlterModelManagers(
name='lilyuser',
managers=[
('objects', lily.users.models.LilyUserManager()),
('all_objects', django.contrib.auth.models.UserManager()),
],
),
]
|
Add migration for new manager on user
|
Add migration for new manager on user
|
Python
|
agpl-3.0
|
HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily
|
Add migration for new manager on user
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-21 13:25
from __future__ import unicode_literals
import django.contrib.auth.models
from django.db import migrations
import lily.users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0023_userinvite'),
]
operations = [
migrations.AlterModelManagers(
name='lilyuser',
managers=[
('objects', lily.users.models.LilyUserManager()),
('all_objects', django.contrib.auth.models.UserManager()),
],
),
]
|
<commit_before><commit_msg>Add migration for new manager on user<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-21 13:25
from __future__ import unicode_literals
import django.contrib.auth.models
from django.db import migrations
import lily.users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0023_userinvite'),
]
operations = [
migrations.AlterModelManagers(
name='lilyuser',
managers=[
('objects', lily.users.models.LilyUserManager()),
('all_objects', django.contrib.auth.models.UserManager()),
],
),
]
|
Add migration for new manager on user# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-21 13:25
from __future__ import unicode_literals
import django.contrib.auth.models
from django.db import migrations
import lily.users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0023_userinvite'),
]
operations = [
migrations.AlterModelManagers(
name='lilyuser',
managers=[
('objects', lily.users.models.LilyUserManager()),
('all_objects', django.contrib.auth.models.UserManager()),
],
),
]
|
<commit_before><commit_msg>Add migration for new manager on user<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-21 13:25
from __future__ import unicode_literals
import django.contrib.auth.models
from django.db import migrations
import lily.users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0023_userinvite'),
]
operations = [
migrations.AlterModelManagers(
name='lilyuser',
managers=[
('objects', lily.users.models.LilyUserManager()),
('all_objects', django.contrib.auth.models.UserManager()),
],
),
]
|
|
1851dff98bea8b48266a76688d5370ea8a4b30b1
|
tools/tools.py
|
tools/tools.py
|
import numpy as np
import cPickle as pickle
import os
def collage(data):
if type(data) is not list:
if data.shape[3] != 3:
data = data.transpose(0, 2, 3, 1)
images = [img for img in data]
else:
images = list(data)
side = int(np.ceil(len(images)**0.5))
for i in range(side**2 - len(images)):
images.append(images[-1])
collage = [np.concatenate(images[i::side], axis=0)
for i in range(side)]
collage = np.concatenate(collage, axis=1)
#collage -= collage.min()
#collage = collage / np.absolute(collage).max() * 256
return collage
def mapLabelsOneHot(data):
data = np.asarray(data)
class_no = int(data.max()+1)
out = np.zeros((data.shape[0], class_no)).astype(np.float32)
out[range(out.shape[0]), data.astype(int)] = 1
return out
def readCIFAR(path):
trnData = []
trnLabels = []
for i in range(1,6):
with open(os.path.join(path,'data_batch_{}'.format(i))) as f:
data = pickle.load(f)
trnData.append(data['data'])
trnLabels.append(data['labels'])
trnData = np.concatenate(trnData).reshape(-1, 3, 32, 32)
trnData = np.concatenate([trnData[:,:,:,::-1], trnData[:,:,:,:]])
trnLabels = np.concatenate(trnLabels)
trnLabels = np.concatenate([trnLabels, trnLabels])
with open(os.path.join(path,'test_batch'.format(i))) as f:
data = pickle.load(f)
tstData = data['data']
tstLabels = data['labels']
tstData = tstData.reshape(-1, 3, 32, 32)
tstData = np.concatenate([tstData[:,:,:,::-1], tstData[:,:,:,:]])
tstLabels = np.concatenate([tstLabels, tstLabels])
trnData = trnData.transpose(0, 2, 3, 1)
tstData = tstData.transpose(0, 2, 3, 1)
print('Trn data shape:', trnData.shape)
print('Tst data shape:', tstData.shape)
print('Trn labels shape: ', trnLabels.shape)
print('Tst labels shape: ', tstLabels.shape)
return trnData, tstData, trnLabels, tstLabels
|
Add CIFAR reading and image collage.
|
Add CIFAR reading and image collage.
|
Python
|
bsd-3-clause
|
michal-hradis/CNN_seminar
|
Add CIFAR reading and image collage.
|
import numpy as np
import cPickle as pickle
import os
def collage(data):
if type(data) is not list:
if data.shape[3] != 3:
data = data.transpose(0, 2, 3, 1)
images = [img for img in data]
else:
images = list(data)
side = int(np.ceil(len(images)**0.5))
for i in range(side**2 - len(images)):
images.append(images[-1])
collage = [np.concatenate(images[i::side], axis=0)
for i in range(side)]
collage = np.concatenate(collage, axis=1)
#collage -= collage.min()
#collage = collage / np.absolute(collage).max() * 256
return collage
def mapLabelsOneHot(data):
data = np.asarray(data)
class_no = int(data.max()+1)
out = np.zeros((data.shape[0], class_no)).astype(np.float32)
out[range(out.shape[0]), data.astype(int)] = 1
return out
def readCIFAR(path):
trnData = []
trnLabels = []
for i in range(1,6):
with open(os.path.join(path,'data_batch_{}'.format(i))) as f:
data = pickle.load(f)
trnData.append(data['data'])
trnLabels.append(data['labels'])
trnData = np.concatenate(trnData).reshape(-1, 3, 32, 32)
trnData = np.concatenate([trnData[:,:,:,::-1], trnData[:,:,:,:]])
trnLabels = np.concatenate(trnLabels)
trnLabels = np.concatenate([trnLabels, trnLabels])
with open(os.path.join(path,'test_batch'.format(i))) as f:
data = pickle.load(f)
tstData = data['data']
tstLabels = data['labels']
tstData = tstData.reshape(-1, 3, 32, 32)
tstData = np.concatenate([tstData[:,:,:,::-1], tstData[:,:,:,:]])
tstLabels = np.concatenate([tstLabels, tstLabels])
trnData = trnData.transpose(0, 2, 3, 1)
tstData = tstData.transpose(0, 2, 3, 1)
print('Trn data shape:', trnData.shape)
print('Tst data shape:', tstData.shape)
print('Trn labels shape: ', trnLabels.shape)
print('Tst labels shape: ', tstLabels.shape)
return trnData, tstData, trnLabels, tstLabels
|
<commit_before><commit_msg>Add CIFAR reading and image collage.<commit_after>
|
import numpy as np
import cPickle as pickle
import os
def collage(data):
if type(data) is not list:
if data.shape[3] != 3:
data = data.transpose(0, 2, 3, 1)
images = [img for img in data]
else:
images = list(data)
side = int(np.ceil(len(images)**0.5))
for i in range(side**2 - len(images)):
images.append(images[-1])
collage = [np.concatenate(images[i::side], axis=0)
for i in range(side)]
collage = np.concatenate(collage, axis=1)
#collage -= collage.min()
#collage = collage / np.absolute(collage).max() * 256
return collage
def mapLabelsOneHot(data):
data = np.asarray(data)
class_no = int(data.max()+1)
out = np.zeros((data.shape[0], class_no)).astype(np.float32)
out[range(out.shape[0]), data.astype(int)] = 1
return out
def readCIFAR(path):
trnData = []
trnLabels = []
for i in range(1,6):
with open(os.path.join(path,'data_batch_{}'.format(i))) as f:
data = pickle.load(f)
trnData.append(data['data'])
trnLabels.append(data['labels'])
trnData = np.concatenate(trnData).reshape(-1, 3, 32, 32)
trnData = np.concatenate([trnData[:,:,:,::-1], trnData[:,:,:,:]])
trnLabels = np.concatenate(trnLabels)
trnLabels = np.concatenate([trnLabels, trnLabels])
with open(os.path.join(path,'test_batch'.format(i))) as f:
data = pickle.load(f)
tstData = data['data']
tstLabels = data['labels']
tstData = tstData.reshape(-1, 3, 32, 32)
tstData = np.concatenate([tstData[:,:,:,::-1], tstData[:,:,:,:]])
tstLabels = np.concatenate([tstLabels, tstLabels])
trnData = trnData.transpose(0, 2, 3, 1)
tstData = tstData.transpose(0, 2, 3, 1)
print('Trn data shape:', trnData.shape)
print('Tst data shape:', tstData.shape)
print('Trn labels shape: ', trnLabels.shape)
print('Tst labels shape: ', tstLabels.shape)
return trnData, tstData, trnLabels, tstLabels
|
Add CIFAR reading and image collage.import numpy as np
import cPickle as pickle
import os
def collage(data):
if type(data) is not list:
if data.shape[3] != 3:
data = data.transpose(0, 2, 3, 1)
images = [img for img in data]
else:
images = list(data)
side = int(np.ceil(len(images)**0.5))
for i in range(side**2 - len(images)):
images.append(images[-1])
collage = [np.concatenate(images[i::side], axis=0)
for i in range(side)]
collage = np.concatenate(collage, axis=1)
#collage -= collage.min()
#collage = collage / np.absolute(collage).max() * 256
return collage
def mapLabelsOneHot(data):
data = np.asarray(data)
class_no = int(data.max()+1)
out = np.zeros((data.shape[0], class_no)).astype(np.float32)
out[range(out.shape[0]), data.astype(int)] = 1
return out
def readCIFAR(path):
trnData = []
trnLabels = []
for i in range(1,6):
with open(os.path.join(path,'data_batch_{}'.format(i))) as f:
data = pickle.load(f)
trnData.append(data['data'])
trnLabels.append(data['labels'])
trnData = np.concatenate(trnData).reshape(-1, 3, 32, 32)
trnData = np.concatenate([trnData[:,:,:,::-1], trnData[:,:,:,:]])
trnLabels = np.concatenate(trnLabels)
trnLabels = np.concatenate([trnLabels, trnLabels])
with open(os.path.join(path,'test_batch'.format(i))) as f:
data = pickle.load(f)
tstData = data['data']
tstLabels = data['labels']
tstData = tstData.reshape(-1, 3, 32, 32)
tstData = np.concatenate([tstData[:,:,:,::-1], tstData[:,:,:,:]])
tstLabels = np.concatenate([tstLabels, tstLabels])
trnData = trnData.transpose(0, 2, 3, 1)
tstData = tstData.transpose(0, 2, 3, 1)
print('Trn data shape:', trnData.shape)
print('Tst data shape:', tstData.shape)
print('Trn labels shape: ', trnLabels.shape)
print('Tst labels shape: ', tstLabels.shape)
return trnData, tstData, trnLabels, tstLabels
|
<commit_before><commit_msg>Add CIFAR reading and image collage.<commit_after>import numpy as np
import cPickle as pickle
import os
def collage(data):
if type(data) is not list:
if data.shape[3] != 3:
data = data.transpose(0, 2, 3, 1)
images = [img for img in data]
else:
images = list(data)
side = int(np.ceil(len(images)**0.5))
for i in range(side**2 - len(images)):
images.append(images[-1])
collage = [np.concatenate(images[i::side], axis=0)
for i in range(side)]
collage = np.concatenate(collage, axis=1)
#collage -= collage.min()
#collage = collage / np.absolute(collage).max() * 256
return collage
def mapLabelsOneHot(data):
data = np.asarray(data)
class_no = int(data.max()+1)
out = np.zeros((data.shape[0], class_no)).astype(np.float32)
out[range(out.shape[0]), data.astype(int)] = 1
return out
def readCIFAR(path):
trnData = []
trnLabels = []
for i in range(1,6):
with open(os.path.join(path,'data_batch_{}'.format(i))) as f:
data = pickle.load(f)
trnData.append(data['data'])
trnLabels.append(data['labels'])
trnData = np.concatenate(trnData).reshape(-1, 3, 32, 32)
trnData = np.concatenate([trnData[:,:,:,::-1], trnData[:,:,:,:]])
trnLabels = np.concatenate(trnLabels)
trnLabels = np.concatenate([trnLabels, trnLabels])
with open(os.path.join(path,'test_batch'.format(i))) as f:
data = pickle.load(f)
tstData = data['data']
tstLabels = data['labels']
tstData = tstData.reshape(-1, 3, 32, 32)
tstData = np.concatenate([tstData[:,:,:,::-1], tstData[:,:,:,:]])
tstLabels = np.concatenate([tstLabels, tstLabels])
trnData = trnData.transpose(0, 2, 3, 1)
tstData = tstData.transpose(0, 2, 3, 1)
print('Trn data shape:', trnData.shape)
print('Tst data shape:', tstData.shape)
print('Trn labels shape: ', trnLabels.shape)
print('Tst labels shape: ', tstLabels.shape)
return trnData, tstData, trnLabels, tstLabels
|
|
c4d05d03629a056ef79f4dcd2cd2f2da24c35500
|
third_party/tvcm/tvcm/generate_unittest.py
|
third_party/tvcm/tvcm/generate_unittest.py
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from tvcm import generate
from tvcm import fake_fs
from tvcm import project as project_module
from tvcm import resource_loader
from tvcm import parse_deps
class GenerateTests(unittest.TestCase):
def testHTMLGeneration(self):
fs = fake_fs.FakeFS()
fs.AddFile('/x/foo/my_module.js', """
'use strict';
tvcm.require('foo.other_module');
tvcm.exportTo('foo', function() {
});
""")
fs.AddFile('/x/foo/other_module.js', """
'use strict';
tvcm.exportTo('foo', function() {
HelloWorld();
});
""")
project = project_module.Project(['/x'],
include_tvcm_paths=True)
with fs:
load_sequence = parse_deps.CalcLoadSequence(['foo/my_module.js'], project)
res = generate.GenerateStandaloneHTMLFile(load_sequence, 'Title')
assert 'HelloWorld();' in res
|
Add smoke test for HTML generation
|
Add smoke test for HTML generation
git-svn-id: 3a56fcae908c7e16d23cb53443ea4795ac387cf2@1150 0e6d7f2b-9903-5b78-7403-59d27f066143
|
Python
|
bsd-3-clause
|
bpsinc-native/src_third_party_trace-viewer,bpsinc-native/src_third_party_trace-viewer,bpsinc-native/src_third_party_trace-viewer,bpsinc-native/src_third_party_trace-viewer
|
Add smoke test for HTML generation
git-svn-id: 3a56fcae908c7e16d23cb53443ea4795ac387cf2@1150 0e6d7f2b-9903-5b78-7403-59d27f066143
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from tvcm import generate
from tvcm import fake_fs
from tvcm import project as project_module
from tvcm import resource_loader
from tvcm import parse_deps
class GenerateTests(unittest.TestCase):
def testHTMLGeneration(self):
fs = fake_fs.FakeFS()
fs.AddFile('/x/foo/my_module.js', """
'use strict';
tvcm.require('foo.other_module');
tvcm.exportTo('foo', function() {
});
""")
fs.AddFile('/x/foo/other_module.js', """
'use strict';
tvcm.exportTo('foo', function() {
HelloWorld();
});
""")
project = project_module.Project(['/x'],
include_tvcm_paths=True)
with fs:
load_sequence = parse_deps.CalcLoadSequence(['foo/my_module.js'], project)
res = generate.GenerateStandaloneHTMLFile(load_sequence, 'Title')
assert 'HelloWorld();' in res
|
<commit_before><commit_msg>Add smoke test for HTML generation
git-svn-id: 3a56fcae908c7e16d23cb53443ea4795ac387cf2@1150 0e6d7f2b-9903-5b78-7403-59d27f066143<commit_after>
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from tvcm import generate
from tvcm import fake_fs
from tvcm import project as project_module
from tvcm import resource_loader
from tvcm import parse_deps
class GenerateTests(unittest.TestCase):
def testHTMLGeneration(self):
fs = fake_fs.FakeFS()
fs.AddFile('/x/foo/my_module.js', """
'use strict';
tvcm.require('foo.other_module');
tvcm.exportTo('foo', function() {
});
""")
fs.AddFile('/x/foo/other_module.js', """
'use strict';
tvcm.exportTo('foo', function() {
HelloWorld();
});
""")
project = project_module.Project(['/x'],
include_tvcm_paths=True)
with fs:
load_sequence = parse_deps.CalcLoadSequence(['foo/my_module.js'], project)
res = generate.GenerateStandaloneHTMLFile(load_sequence, 'Title')
assert 'HelloWorld();' in res
|
Add smoke test for HTML generation
git-svn-id: 3a56fcae908c7e16d23cb53443ea4795ac387cf2@1150 0e6d7f2b-9903-5b78-7403-59d27f066143# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from tvcm import generate
from tvcm import fake_fs
from tvcm import project as project_module
from tvcm import resource_loader
from tvcm import parse_deps
class GenerateTests(unittest.TestCase):
def testHTMLGeneration(self):
fs = fake_fs.FakeFS()
fs.AddFile('/x/foo/my_module.js', """
'use strict';
tvcm.require('foo.other_module');
tvcm.exportTo('foo', function() {
});
""")
fs.AddFile('/x/foo/other_module.js', """
'use strict';
tvcm.exportTo('foo', function() {
HelloWorld();
});
""")
project = project_module.Project(['/x'],
include_tvcm_paths=True)
with fs:
load_sequence = parse_deps.CalcLoadSequence(['foo/my_module.js'], project)
res = generate.GenerateStandaloneHTMLFile(load_sequence, 'Title')
assert 'HelloWorld();' in res
|
<commit_before><commit_msg>Add smoke test for HTML generation
git-svn-id: 3a56fcae908c7e16d23cb53443ea4795ac387cf2@1150 0e6d7f2b-9903-5b78-7403-59d27f066143<commit_after># Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from tvcm import generate
from tvcm import fake_fs
from tvcm import project as project_module
from tvcm import resource_loader
from tvcm import parse_deps
class GenerateTests(unittest.TestCase):
def testHTMLGeneration(self):
fs = fake_fs.FakeFS()
fs.AddFile('/x/foo/my_module.js', """
'use strict';
tvcm.require('foo.other_module');
tvcm.exportTo('foo', function() {
});
""")
fs.AddFile('/x/foo/other_module.js', """
'use strict';
tvcm.exportTo('foo', function() {
HelloWorld();
});
""")
project = project_module.Project(['/x'],
include_tvcm_paths=True)
with fs:
load_sequence = parse_deps.CalcLoadSequence(['foo/my_module.js'], project)
res = generate.GenerateStandaloneHTMLFile(load_sequence, 'Title')
assert 'HelloWorld();' in res
|
|
3671e814810aa2c36e2c377ca16fcd0eae5766c5
|
openid/test/test_codecutil.py
|
openid/test/test_codecutil.py
|
import unittest
from openid import codecutil # registers encoder
class EncoderTest(unittest.TestCase):
def test_handler_registered(self):
self.assertEqual(
"foo".encode('ascii', errors='oid_percent_escape'),
b"foo")
def test_encoding(self):
s = 'l\xa1m\U00101010n'
expected = b'l%C2%A1m%F4%81%80%90n'
self.assertEqual(
s.encode('ascii', errors='oid_percent_escape'),
expected)
if __name__ == '__main__':
unittest.main()
|
Add tests for new codecutil module -- very basic for now
|
Add tests for new codecutil module -- very basic for now
|
Python
|
apache-2.0
|
isagalaev/sm-openid,moreati/python3-openid,misli/python3-openid,misli/python3-openid,necaris/python3-openid,moreati/python3-openid,necaris/python3-openid,misli/python3-openid,moreati/python3-openid
|
Add tests for new codecutil module -- very basic for now
|
import unittest
from openid import codecutil # registers encoder
class EncoderTest(unittest.TestCase):
def test_handler_registered(self):
self.assertEqual(
"foo".encode('ascii', errors='oid_percent_escape'),
b"foo")
def test_encoding(self):
s = 'l\xa1m\U00101010n'
expected = b'l%C2%A1m%F4%81%80%90n'
self.assertEqual(
s.encode('ascii', errors='oid_percent_escape'),
expected)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for new codecutil module -- very basic for now<commit_after>
|
import unittest
from openid import codecutil # registers encoder
class EncoderTest(unittest.TestCase):
def test_handler_registered(self):
self.assertEqual(
"foo".encode('ascii', errors='oid_percent_escape'),
b"foo")
def test_encoding(self):
s = 'l\xa1m\U00101010n'
expected = b'l%C2%A1m%F4%81%80%90n'
self.assertEqual(
s.encode('ascii', errors='oid_percent_escape'),
expected)
if __name__ == '__main__':
unittest.main()
|
Add tests for new codecutil module -- very basic for nowimport unittest
from openid import codecutil # registers encoder
class EncoderTest(unittest.TestCase):
def test_handler_registered(self):
self.assertEqual(
"foo".encode('ascii', errors='oid_percent_escape'),
b"foo")
def test_encoding(self):
s = 'l\xa1m\U00101010n'
expected = b'l%C2%A1m%F4%81%80%90n'
self.assertEqual(
s.encode('ascii', errors='oid_percent_escape'),
expected)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for new codecutil module -- very basic for now<commit_after>import unittest
from openid import codecutil # registers encoder
class EncoderTest(unittest.TestCase):
def test_handler_registered(self):
self.assertEqual(
"foo".encode('ascii', errors='oid_percent_escape'),
b"foo")
def test_encoding(self):
s = 'l\xa1m\U00101010n'
expected = b'l%C2%A1m%F4%81%80%90n'
self.assertEqual(
s.encode('ascii', errors='oid_percent_escape'),
expected)
if __name__ == '__main__':
unittest.main()
|
|
65d5f4f3947b115421f273b7edb22420035c3ca3
|
obfsproxy/common/modexp.py
|
obfsproxy/common/modexp.py
|
import gmpy
def powMod( x, y, mod ):
"""
Efficiently calculate and return `x' to the power of `y' mod `mod'.
Before the modular exponentiation, the three numbers are converted to
GMPY's bignum representation which speeds up exponentiation.
"""
x = gmpy.mpz(x)
y = gmpy.mpz(y)
mod = gmpy.mpz(mod)
return pow(x, y, mod)
|
Add function for fast modular exponentiation.
|
Add function for fast modular exponentiation.
The function uses GMPY's bignum arithmetic which speeds up the calculation.
|
Python
|
bsd-3-clause
|
qdzheng/obfsproxy,infinity0/obfsproxy,catinred2/obfsproxy,NullHypothesis/obfsproxy,isislovecruft/obfsproxy,Yawning/obfsproxy,masterkorp/obfsproxy,Yawning/obfsproxy-wfpadtools,sunsong/obfsproxy,david415/obfsproxy
|
Add function for fast modular exponentiation.
The function uses GMPY's bignum arithmetic which speeds up the calculation.
|
import gmpy
def powMod( x, y, mod ):
"""
Efficiently calculate and return `x' to the power of `y' mod `mod'.
Before the modular exponentiation, the three numbers are converted to
GMPY's bignum representation which speeds up exponentiation.
"""
x = gmpy.mpz(x)
y = gmpy.mpz(y)
mod = gmpy.mpz(mod)
return pow(x, y, mod)
|
<commit_before><commit_msg>Add function for fast modular exponentiation.
The function uses GMPY's bignum arithmetic which speeds up the calculation.<commit_after>
|
import gmpy
def powMod( x, y, mod ):
"""
Efficiently calculate and return `x' to the power of `y' mod `mod'.
Before the modular exponentiation, the three numbers are converted to
GMPY's bignum representation which speeds up exponentiation.
"""
x = gmpy.mpz(x)
y = gmpy.mpz(y)
mod = gmpy.mpz(mod)
return pow(x, y, mod)
|
Add function for fast modular exponentiation.
The function uses GMPY's bignum arithmetic which speeds up the calculation.import gmpy
def powMod( x, y, mod ):
"""
Efficiently calculate and return `x' to the power of `y' mod `mod'.
Before the modular exponentiation, the three numbers are converted to
GMPY's bignum representation which speeds up exponentiation.
"""
x = gmpy.mpz(x)
y = gmpy.mpz(y)
mod = gmpy.mpz(mod)
return pow(x, y, mod)
|
<commit_before><commit_msg>Add function for fast modular exponentiation.
The function uses GMPY's bignum arithmetic which speeds up the calculation.<commit_after>import gmpy
def powMod( x, y, mod ):
"""
Efficiently calculate and return `x' to the power of `y' mod `mod'.
Before the modular exponentiation, the three numbers are converted to
GMPY's bignum representation which speeds up exponentiation.
"""
x = gmpy.mpz(x)
y = gmpy.mpz(y)
mod = gmpy.mpz(mod)
return pow(x, y, mod)
|
|
28ee0bca9ea36b61a48345271f0eef650896e17f
|
crawler/cocktails/spiders/kindredcocktails.py
|
crawler/cocktails/spiders/kindredcocktails.py
|
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from lxml.cssselect import css_to_xpath
from cocktails.items import CocktailItem
from cocktails.utils import html_to_text
xp_ingredients = css_to_xpath('.cocktail-ingredients tr')
class KindredCocktails(CrawlSpider):
name = 'kindredcocktails'
allowed_domains = ['www.kindredcocktails.com']
start_urls = ['http://www.kindredcocktails.com']
rules = (
Rule(SgmlLinkExtractor(allow=r'/cocktail/[^/?]+$'), callback='parse_recipe'),
Rule(SgmlLinkExtractor(allow=r'.*')),
)
def parse_recipe(self, response):
hxs = HtmlXPathSelector(response)
for title in hxs.select('//h1').extract():
break
else:
return []
ingredients = hxs.select(xp_ingredients).extract()
return [CocktailItem(
title=html_to_text(title),
picture=None,
url=response.url,
source='Kindred Cocktails',
ingredients=map(html_to_text, ingredients),
)]
|
Add spider for Kindred Cocktails
|
Add spider for Kindred Cocktails
|
Python
|
agpl-3.0
|
snoack/cocktail-search,snoack/cocktail-search,snoack/cocktail-search,snoack/cocktail-search
|
Add spider for Kindred Cocktails
|
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from lxml.cssselect import css_to_xpath
from cocktails.items import CocktailItem
from cocktails.utils import html_to_text
xp_ingredients = css_to_xpath('.cocktail-ingredients tr')
class KindredCocktails(CrawlSpider):
name = 'kindredcocktails'
allowed_domains = ['www.kindredcocktails.com']
start_urls = ['http://www.kindredcocktails.com']
rules = (
Rule(SgmlLinkExtractor(allow=r'/cocktail/[^/?]+$'), callback='parse_recipe'),
Rule(SgmlLinkExtractor(allow=r'.*')),
)
def parse_recipe(self, response):
hxs = HtmlXPathSelector(response)
for title in hxs.select('//h1').extract():
break
else:
return []
ingredients = hxs.select(xp_ingredients).extract()
return [CocktailItem(
title=html_to_text(title),
picture=None,
url=response.url,
source='Kindred Cocktails',
ingredients=map(html_to_text, ingredients),
)]
|
<commit_before><commit_msg>Add spider for Kindred Cocktails<commit_after>
|
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from lxml.cssselect import css_to_xpath
from cocktails.items import CocktailItem
from cocktails.utils import html_to_text
xp_ingredients = css_to_xpath('.cocktail-ingredients tr')
class KindredCocktails(CrawlSpider):
name = 'kindredcocktails'
allowed_domains = ['www.kindredcocktails.com']
start_urls = ['http://www.kindredcocktails.com']
rules = (
Rule(SgmlLinkExtractor(allow=r'/cocktail/[^/?]+$'), callback='parse_recipe'),
Rule(SgmlLinkExtractor(allow=r'.*')),
)
def parse_recipe(self, response):
hxs = HtmlXPathSelector(response)
for title in hxs.select('//h1').extract():
break
else:
return []
ingredients = hxs.select(xp_ingredients).extract()
return [CocktailItem(
title=html_to_text(title),
picture=None,
url=response.url,
source='Kindred Cocktails',
ingredients=map(html_to_text, ingredients),
)]
|
Add spider for Kindred Cocktailsfrom scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from lxml.cssselect import css_to_xpath
from cocktails.items import CocktailItem
from cocktails.utils import html_to_text
xp_ingredients = css_to_xpath('.cocktail-ingredients tr')
class KindredCocktails(CrawlSpider):
name = 'kindredcocktails'
allowed_domains = ['www.kindredcocktails.com']
start_urls = ['http://www.kindredcocktails.com']
rules = (
Rule(SgmlLinkExtractor(allow=r'/cocktail/[^/?]+$'), callback='parse_recipe'),
Rule(SgmlLinkExtractor(allow=r'.*')),
)
def parse_recipe(self, response):
hxs = HtmlXPathSelector(response)
for title in hxs.select('//h1').extract():
break
else:
return []
ingredients = hxs.select(xp_ingredients).extract()
return [CocktailItem(
title=html_to_text(title),
picture=None,
url=response.url,
source='Kindred Cocktails',
ingredients=map(html_to_text, ingredients),
)]
|
<commit_before><commit_msg>Add spider for Kindred Cocktails<commit_after>from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from lxml.cssselect import css_to_xpath
from cocktails.items import CocktailItem
from cocktails.utils import html_to_text
xp_ingredients = css_to_xpath('.cocktail-ingredients tr')
class KindredCocktails(CrawlSpider):
name = 'kindredcocktails'
allowed_domains = ['www.kindredcocktails.com']
start_urls = ['http://www.kindredcocktails.com']
rules = (
Rule(SgmlLinkExtractor(allow=r'/cocktail/[^/?]+$'), callback='parse_recipe'),
Rule(SgmlLinkExtractor(allow=r'.*')),
)
def parse_recipe(self, response):
hxs = HtmlXPathSelector(response)
for title in hxs.select('//h1').extract():
break
else:
return []
ingredients = hxs.select(xp_ingredients).extract()
return [CocktailItem(
title=html_to_text(title),
picture=None,
url=response.url,
source='Kindred Cocktails',
ingredients=map(html_to_text, ingredients),
)]
|
|
82bf5a22e132827b5466ce541be40876e766fb88
|
encoding/scrub_page.py
|
encoding/scrub_page.py
|
import re
import sys
def remove_lang_templates(string):
# Preserves text inside template, as it is displayed in the page.
re_str_lang = ur'{{lang\|.*?\|(.*?)}}'
return re.sub(re_str_lang, r'\1', string, re.U | re.S)
def remove_all_templates(string):
re_str_templates = ur'(\{\{.*?\}\})'
while re.search(re_str_templates, string, re.U | re.S):
string = re.sub(re_str_templates, ur'', string, flags=re.U | re.S)
return string
def remove_refs(string):
re_str_refs = ur'(<ref.*?>.*?</ref>)'
while re.search(re_str_refs, string, re.U | re.S | re.M):
string = re.sub(re_str_refs, '', string, flags=re.U | re.S | re.M)
return string
def remove_html_tags(string):
re_str_html_tags = ur'<.*?>'
return re.sub(re_str_html_tags, '', string, flags=re.U | re.S)
def remove_nonimage_wikilinks(string):
# Removes in a manner that preserves link as word in text.
re_str_nonimage_wikilinks = ur'\[\[(?![Ii]mage:).*?\|?([^|]*?)\]\]'
while re.search(re_str_nonimage_wikilinks, string, re.U | re.S):
string = re.sub(re_str_nonimage_wikilinks, ur'\1', string,
flags=re.U | re.S)
return string
def remove_image_wikilinks(string):
# Removes all content
re_str_image_wikilinks = ur'\[\[[Ii]mage:.*?\]\]'
while re.search(re_str_image_wikilinks, string, re.U | re.S):
string = re.sub(re_str_image_wikilinks, '', string,
flags=re.U | re.S)
return string
def compress_newlines(string):
re_str_dbl_newline = ur'\n\n'
while re.search(re_str_dbl_newline, string, re.U | re.S):
string = re.sub(re_str_dbl_newline, '\n', string, flags=re.U | re.S)
return string
def scrub_page(page):
page = remove_refs(page)
page = remove_html_tags(page)
page = remove_lang_templates(page)
page = remove_all_templates(page)
page = remove_nonimage_wikilinks(page)
page = remove_image_wikilinks(page)
page = compress_newlines(page)
return page
if __name__ == "__main__":
with open(sys.argv[1]) as f:
data = f.read().decode('utf8')
scrubbed = scrub_page(data)
print scrubbed.encode('utf-8').strip()
|
Add a script for cleaning a page
|
Add a script for cleaning a page
|
Python
|
mit
|
madprime/wikipedia_squish
|
Add a script for cleaning a page
|
import re
import sys
def remove_lang_templates(string):
# Preserves text inside template, as it is displayed in the page.
re_str_lang = ur'{{lang\|.*?\|(.*?)}}'
return re.sub(re_str_lang, r'\1', string, re.U | re.S)
def remove_all_templates(string):
re_str_templates = ur'(\{\{.*?\}\})'
while re.search(re_str_templates, string, re.U | re.S):
string = re.sub(re_str_templates, ur'', string, flags=re.U | re.S)
return string
def remove_refs(string):
re_str_refs = ur'(<ref.*?>.*?</ref>)'
while re.search(re_str_refs, string, re.U | re.S | re.M):
string = re.sub(re_str_refs, '', string, flags=re.U | re.S | re.M)
return string
def remove_html_tags(string):
re_str_html_tags = ur'<.*?>'
return re.sub(re_str_html_tags, '', string, flags=re.U | re.S)
def remove_nonimage_wikilinks(string):
# Removes in a manner that preserves link as word in text.
re_str_nonimage_wikilinks = ur'\[\[(?![Ii]mage:).*?\|?([^|]*?)\]\]'
while re.search(re_str_nonimage_wikilinks, string, re.U | re.S):
string = re.sub(re_str_nonimage_wikilinks, ur'\1', string,
flags=re.U | re.S)
return string
def remove_image_wikilinks(string):
# Removes all content
re_str_image_wikilinks = ur'\[\[[Ii]mage:.*?\]\]'
while re.search(re_str_image_wikilinks, string, re.U | re.S):
string = re.sub(re_str_image_wikilinks, '', string,
flags=re.U | re.S)
return string
def compress_newlines(string):
re_str_dbl_newline = ur'\n\n'
while re.search(re_str_dbl_newline, string, re.U | re.S):
string = re.sub(re_str_dbl_newline, '\n', string, flags=re.U | re.S)
return string
def scrub_page(page):
page = remove_refs(page)
page = remove_html_tags(page)
page = remove_lang_templates(page)
page = remove_all_templates(page)
page = remove_nonimage_wikilinks(page)
page = remove_image_wikilinks(page)
page = compress_newlines(page)
return page
if __name__ == "__main__":
with open(sys.argv[1]) as f:
data = f.read().decode('utf8')
scrubbed = scrub_page(data)
print scrubbed.encode('utf-8').strip()
|
<commit_before><commit_msg>Add a script for cleaning a page<commit_after>
|
import re
import sys
def remove_lang_templates(string):
# Preserves text inside template, as it is displayed in the page.
re_str_lang = ur'{{lang\|.*?\|(.*?)}}'
return re.sub(re_str_lang, r'\1', string, re.U | re.S)
def remove_all_templates(string):
re_str_templates = ur'(\{\{.*?\}\})'
while re.search(re_str_templates, string, re.U | re.S):
string = re.sub(re_str_templates, ur'', string, flags=re.U | re.S)
return string
def remove_refs(string):
re_str_refs = ur'(<ref.*?>.*?</ref>)'
while re.search(re_str_refs, string, re.U | re.S | re.M):
string = re.sub(re_str_refs, '', string, flags=re.U | re.S | re.M)
return string
def remove_html_tags(string):
re_str_html_tags = ur'<.*?>'
return re.sub(re_str_html_tags, '', string, flags=re.U | re.S)
def remove_nonimage_wikilinks(string):
# Removes in a manner that preserves link as word in text.
re_str_nonimage_wikilinks = ur'\[\[(?![Ii]mage:).*?\|?([^|]*?)\]\]'
while re.search(re_str_nonimage_wikilinks, string, re.U | re.S):
string = re.sub(re_str_nonimage_wikilinks, ur'\1', string,
flags=re.U | re.S)
return string
def remove_image_wikilinks(string):
# Removes all content
re_str_image_wikilinks = ur'\[\[[Ii]mage:.*?\]\]'
while re.search(re_str_image_wikilinks, string, re.U | re.S):
string = re.sub(re_str_image_wikilinks, '', string,
flags=re.U | re.S)
return string
def compress_newlines(string):
re_str_dbl_newline = ur'\n\n'
while re.search(re_str_dbl_newline, string, re.U | re.S):
string = re.sub(re_str_dbl_newline, '\n', string, flags=re.U | re.S)
return string
def scrub_page(page):
page = remove_refs(page)
page = remove_html_tags(page)
page = remove_lang_templates(page)
page = remove_all_templates(page)
page = remove_nonimage_wikilinks(page)
page = remove_image_wikilinks(page)
page = compress_newlines(page)
return page
if __name__ == "__main__":
with open(sys.argv[1]) as f:
data = f.read().decode('utf8')
scrubbed = scrub_page(data)
print scrubbed.encode('utf-8').strip()
|
Add a script for cleaning a pageimport re
import sys
def remove_lang_templates(string):
# Preserves text inside template, as it is displayed in the page.
re_str_lang = ur'{{lang\|.*?\|(.*?)}}'
return re.sub(re_str_lang, r'\1', string, re.U | re.S)
def remove_all_templates(string):
re_str_templates = ur'(\{\{.*?\}\})'
while re.search(re_str_templates, string, re.U | re.S):
string = re.sub(re_str_templates, ur'', string, flags=re.U | re.S)
return string
def remove_refs(string):
re_str_refs = ur'(<ref.*?>.*?</ref>)'
while re.search(re_str_refs, string, re.U | re.S | re.M):
string = re.sub(re_str_refs, '', string, flags=re.U | re.S | re.M)
return string
def remove_html_tags(string):
re_str_html_tags = ur'<.*?>'
return re.sub(re_str_html_tags, '', string, flags=re.U | re.S)
def remove_nonimage_wikilinks(string):
# Removes in a manner that preserves link as word in text.
re_str_nonimage_wikilinks = ur'\[\[(?![Ii]mage:).*?\|?([^|]*?)\]\]'
while re.search(re_str_nonimage_wikilinks, string, re.U | re.S):
string = re.sub(re_str_nonimage_wikilinks, ur'\1', string,
flags=re.U | re.S)
return string
def remove_image_wikilinks(string):
# Removes all content
re_str_image_wikilinks = ur'\[\[[Ii]mage:.*?\]\]'
while re.search(re_str_image_wikilinks, string, re.U | re.S):
string = re.sub(re_str_image_wikilinks, '', string,
flags=re.U | re.S)
return string
def compress_newlines(string):
re_str_dbl_newline = ur'\n\n'
while re.search(re_str_dbl_newline, string, re.U | re.S):
string = re.sub(re_str_dbl_newline, '\n', string, flags=re.U | re.S)
return string
def scrub_page(page):
page = remove_refs(page)
page = remove_html_tags(page)
page = remove_lang_templates(page)
page = remove_all_templates(page)
page = remove_nonimage_wikilinks(page)
page = remove_image_wikilinks(page)
page = compress_newlines(page)
return page
if __name__ == "__main__":
with open(sys.argv[1]) as f:
data = f.read().decode('utf8')
scrubbed = scrub_page(data)
print scrubbed.encode('utf-8').strip()
|
<commit_before><commit_msg>Add a script for cleaning a page<commit_after>import re
import sys
def remove_lang_templates(string):
# Preserves text inside template, as it is displayed in the page.
re_str_lang = ur'{{lang\|.*?\|(.*?)}}'
return re.sub(re_str_lang, r'\1', string, re.U | re.S)
def remove_all_templates(string):
re_str_templates = ur'(\{\{.*?\}\})'
while re.search(re_str_templates, string, re.U | re.S):
string = re.sub(re_str_templates, ur'', string, flags=re.U | re.S)
return string
def remove_refs(string):
re_str_refs = ur'(<ref.*?>.*?</ref>)'
while re.search(re_str_refs, string, re.U | re.S | re.M):
string = re.sub(re_str_refs, '', string, flags=re.U | re.S | re.M)
return string
def remove_html_tags(string):
re_str_html_tags = ur'<.*?>'
return re.sub(re_str_html_tags, '', string, flags=re.U | re.S)
def remove_nonimage_wikilinks(string):
# Removes in a manner that preserves link as word in text.
re_str_nonimage_wikilinks = ur'\[\[(?![Ii]mage:).*?\|?([^|]*?)\]\]'
while re.search(re_str_nonimage_wikilinks, string, re.U | re.S):
string = re.sub(re_str_nonimage_wikilinks, ur'\1', string,
flags=re.U | re.S)
return string
def remove_image_wikilinks(string):
# Removes all content
re_str_image_wikilinks = ur'\[\[[Ii]mage:.*?\]\]'
while re.search(re_str_image_wikilinks, string, re.U | re.S):
string = re.sub(re_str_image_wikilinks, '', string,
flags=re.U | re.S)
return string
def compress_newlines(string):
re_str_dbl_newline = ur'\n\n'
while re.search(re_str_dbl_newline, string, re.U | re.S):
string = re.sub(re_str_dbl_newline, '\n', string, flags=re.U | re.S)
return string
def scrub_page(page):
page = remove_refs(page)
page = remove_html_tags(page)
page = remove_lang_templates(page)
page = remove_all_templates(page)
page = remove_nonimage_wikilinks(page)
page = remove_image_wikilinks(page)
page = compress_newlines(page)
return page
if __name__ == "__main__":
with open(sys.argv[1]) as f:
data = f.read().decode('utf8')
scrubbed = scrub_page(data)
print scrubbed.encode('utf-8').strip()
|
|
e6303e7c43c0ac9727e7c110cac4d1b6b9b9784f
|
atlas/prodtask/management/commands/pthealthcheck.py
|
atlas/prodtask/management/commands/pthealthcheck.py
|
from django.core.management.base import BaseCommand, CommandError
import time
from django_celery_beat.models import PeriodicTask
from django.utils import timezone
from datetime import timedelta
from atlas.prodtask.views import send_alarm_message
class Command(BaseCommand):
args = 'None'
help = 'Check celery beat health'
def handle(self, *args, **options):
if not args:
try:
try:
last_executed_task = PeriodicTask.objects.all().order_by('-last_run_at')[0]
except Exception as e:
send_alarm_message('Alarm: the celery beat health check problem',
f'Celery beat health check problem {e}')
raise e
if (timezone.now() - last_executed_task.last_run_at) < timedelta(hours=2):
send_alarm_message('Alarm: the celery beat is stuck',
f'Celery beat last updated {last_executed_task.last_run_at}')
except Exception as e:
raise CommandError('Some problem during alarm mail sending check: %s'%e)
|
Add celery beat health check
|
Add celery beat health check
|
Python
|
apache-2.0
|
PanDAWMS/panda-bigmon-atlas,PanDAWMS/panda-bigmon-atlas,PanDAWMS/panda-bigmon-atlas,PanDAWMS/panda-bigmon-atlas
|
Add celery beat health check
|
from django.core.management.base import BaseCommand, CommandError
import time
from django_celery_beat.models import PeriodicTask
from django.utils import timezone
from datetime import timedelta
from atlas.prodtask.views import send_alarm_message
class Command(BaseCommand):
args = 'None'
help = 'Check celery beat health'
def handle(self, *args, **options):
if not args:
try:
try:
last_executed_task = PeriodicTask.objects.all().order_by('-last_run_at')[0]
except Exception as e:
send_alarm_message('Alarm: the celery beat health check problem',
f'Celery beat health check problem {e}')
raise e
if (timezone.now() - last_executed_task.last_run_at) < timedelta(hours=2):
send_alarm_message('Alarm: the celery beat is stuck',
f'Celery beat last updated {last_executed_task.last_run_at}')
except Exception as e:
raise CommandError('Some problem during alarm mail sending check: %s'%e)
|
<commit_before><commit_msg>Add celery beat health check<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
import time
from django_celery_beat.models import PeriodicTask
from django.utils import timezone
from datetime import timedelta
from atlas.prodtask.views import send_alarm_message
class Command(BaseCommand):
args = 'None'
help = 'Check celery beat health'
def handle(self, *args, **options):
if not args:
try:
try:
last_executed_task = PeriodicTask.objects.all().order_by('-last_run_at')[0]
except Exception as e:
send_alarm_message('Alarm: the celery beat health check problem',
f'Celery beat health check problem {e}')
raise e
if (timezone.now() - last_executed_task.last_run_at) < timedelta(hours=2):
send_alarm_message('Alarm: the celery beat is stuck',
f'Celery beat last updated {last_executed_task.last_run_at}')
except Exception as e:
raise CommandError('Some problem during alarm mail sending check: %s'%e)
|
Add celery beat health checkfrom django.core.management.base import BaseCommand, CommandError
import time
from django_celery_beat.models import PeriodicTask
from django.utils import timezone
from datetime import timedelta
from atlas.prodtask.views import send_alarm_message
class Command(BaseCommand):
args = 'None'
help = 'Check celery beat health'
def handle(self, *args, **options):
if not args:
try:
try:
last_executed_task = PeriodicTask.objects.all().order_by('-last_run_at')[0]
except Exception as e:
send_alarm_message('Alarm: the celery beat health check problem',
f'Celery beat health check problem {e}')
raise e
if (timezone.now() - last_executed_task.last_run_at) < timedelta(hours=2):
send_alarm_message('Alarm: the celery beat is stuck',
f'Celery beat last updated {last_executed_task.last_run_at}')
except Exception as e:
raise CommandError('Some problem during alarm mail sending check: %s'%e)
|
<commit_before><commit_msg>Add celery beat health check<commit_after>from django.core.management.base import BaseCommand, CommandError
import time
from django_celery_beat.models import PeriodicTask
from django.utils import timezone
from datetime import timedelta
from atlas.prodtask.views import send_alarm_message
class Command(BaseCommand):
args = 'None'
help = 'Check celery beat health'
def handle(self, *args, **options):
if not args:
try:
try:
last_executed_task = PeriodicTask.objects.all().order_by('-last_run_at')[0]
except Exception as e:
send_alarm_message('Alarm: the celery beat health check problem',
f'Celery beat health check problem {e}')
raise e
if (timezone.now() - last_executed_task.last_run_at) < timedelta(hours=2):
send_alarm_message('Alarm: the celery beat is stuck',
f'Celery beat last updated {last_executed_task.last_run_at}')
except Exception as e:
raise CommandError('Some problem during alarm mail sending check: %s'%e)
|
|
c08df80a02791978b6677d2e2fbd8ebb885f024d
|
cfp/admin.py
|
cfp/admin.py
|
from django.contrib import admin
from cfp.models import CallForPaper, PaperApplication, Applicant
from django.core import urlresolvers
class ApplicantAdmin(admin.ModelAdmin):
readonly_fields = ('user',)
list_display = ('full_name', 'about', 'biography', 'speaker_experience', 'github', 'twitter')
fields = ('user', 'about', 'biography', 'speaker_experience', 'image')
class PaperApplicationAdmin(admin.ModelAdmin):
list_display = ('title', 'link_to_applicant', 'about', 'abstract', 'skill_level', 'duration')
readonly_fields = ('cfp', 'applicant')
fields = ('cfp', 'applicant', 'title', 'about', 'abstract', 'skill_level', 'duration')
def link_to_applicant(self, obj):
link = urlresolvers.reverse("admin:cfp_applicant_change", args=[obj.applicant.id])
return u'<a href="%s">%s</a>' % (link, obj.applicant)
link_to_applicant.allow_tags = True
admin.site.register(CallForPaper)
admin.site.register(Applicant, ApplicantAdmin)
admin.site.register(PaperApplication, PaperApplicationAdmin)
|
from django.contrib import admin
from cfp.models import CallForPaper, PaperApplication, Applicant
from django.core import urlresolvers
class ApplicantAdmin(admin.ModelAdmin):
list_display = ('user', 'full_name', 'about', 'biography', 'speaker_experience', 'github', 'twitter')
fields = ('user', 'about', 'biography', 'speaker_experience', 'image')
class PaperApplicationAdmin(admin.ModelAdmin):
list_display = ('title', 'link_to_applicant', 'about', 'abstract', 'skill_level', 'duration')
readonly_fields = ('cfp', 'applicant')
fields = ('cfp', 'applicant', 'title', 'about', 'abstract', 'skill_level', 'duration')
def link_to_applicant(self, obj):
link = urlresolvers.reverse("admin:cfp_applicant_change", args=[obj.applicant.id])
return u'<a href="%s">%s</a>' % (link, obj.applicant)
link_to_applicant.allow_tags = True
admin.site.register(CallForPaper)
admin.site.register(Applicant, ApplicantAdmin)
admin.site.register(PaperApplication, PaperApplicationAdmin)
|
Remove user from readonly fields
|
Remove user from readonly fields
So that we can add Applicants from the admin interface.
|
Python
|
bsd-3-clause
|
denibertovic/conference-web,denibertovic/conference-web,WebCampZg/conference-web,WebCampZg/conference-web,denibertovic/conference-web,denibertovic/conference-web,denibertovic/conference-web,WebCampZg/conference-web
|
from django.contrib import admin
from cfp.models import CallForPaper, PaperApplication, Applicant
from django.core import urlresolvers
class ApplicantAdmin(admin.ModelAdmin):
readonly_fields = ('user',)
list_display = ('full_name', 'about', 'biography', 'speaker_experience', 'github', 'twitter')
fields = ('user', 'about', 'biography', 'speaker_experience', 'image')
class PaperApplicationAdmin(admin.ModelAdmin):
list_display = ('title', 'link_to_applicant', 'about', 'abstract', 'skill_level', 'duration')
readonly_fields = ('cfp', 'applicant')
fields = ('cfp', 'applicant', 'title', 'about', 'abstract', 'skill_level', 'duration')
def link_to_applicant(self, obj):
link = urlresolvers.reverse("admin:cfp_applicant_change", args=[obj.applicant.id])
return u'<a href="%s">%s</a>' % (link, obj.applicant)
link_to_applicant.allow_tags = True
admin.site.register(CallForPaper)
admin.site.register(Applicant, ApplicantAdmin)
admin.site.register(PaperApplication, PaperApplicationAdmin)
Remove user from readonly fields
So that we can add Applicants from the admin interface.
|
from django.contrib import admin
from cfp.models import CallForPaper, PaperApplication, Applicant
from django.core import urlresolvers
class ApplicantAdmin(admin.ModelAdmin):
list_display = ('user', 'full_name', 'about', 'biography', 'speaker_experience', 'github', 'twitter')
fields = ('user', 'about', 'biography', 'speaker_experience', 'image')
class PaperApplicationAdmin(admin.ModelAdmin):
list_display = ('title', 'link_to_applicant', 'about', 'abstract', 'skill_level', 'duration')
readonly_fields = ('cfp', 'applicant')
fields = ('cfp', 'applicant', 'title', 'about', 'abstract', 'skill_level', 'duration')
def link_to_applicant(self, obj):
link = urlresolvers.reverse("admin:cfp_applicant_change", args=[obj.applicant.id])
return u'<a href="%s">%s</a>' % (link, obj.applicant)
link_to_applicant.allow_tags = True
admin.site.register(CallForPaper)
admin.site.register(Applicant, ApplicantAdmin)
admin.site.register(PaperApplication, PaperApplicationAdmin)
|
<commit_before>from django.contrib import admin
from cfp.models import CallForPaper, PaperApplication, Applicant
from django.core import urlresolvers
class ApplicantAdmin(admin.ModelAdmin):
readonly_fields = ('user',)
list_display = ('full_name', 'about', 'biography', 'speaker_experience', 'github', 'twitter')
fields = ('user', 'about', 'biography', 'speaker_experience', 'image')
class PaperApplicationAdmin(admin.ModelAdmin):
list_display = ('title', 'link_to_applicant', 'about', 'abstract', 'skill_level', 'duration')
readonly_fields = ('cfp', 'applicant')
fields = ('cfp', 'applicant', 'title', 'about', 'abstract', 'skill_level', 'duration')
def link_to_applicant(self, obj):
link = urlresolvers.reverse("admin:cfp_applicant_change", args=[obj.applicant.id])
return u'<a href="%s">%s</a>' % (link, obj.applicant)
link_to_applicant.allow_tags = True
admin.site.register(CallForPaper)
admin.site.register(Applicant, ApplicantAdmin)
admin.site.register(PaperApplication, PaperApplicationAdmin)
<commit_msg>Remove user from readonly fields
So that we can add Applicants from the admin interface.<commit_after>
|
from django.contrib import admin
from cfp.models import CallForPaper, PaperApplication, Applicant
from django.core import urlresolvers
class ApplicantAdmin(admin.ModelAdmin):
list_display = ('user', 'full_name', 'about', 'biography', 'speaker_experience', 'github', 'twitter')
fields = ('user', 'about', 'biography', 'speaker_experience', 'image')
class PaperApplicationAdmin(admin.ModelAdmin):
list_display = ('title', 'link_to_applicant', 'about', 'abstract', 'skill_level', 'duration')
readonly_fields = ('cfp', 'applicant')
fields = ('cfp', 'applicant', 'title', 'about', 'abstract', 'skill_level', 'duration')
def link_to_applicant(self, obj):
link = urlresolvers.reverse("admin:cfp_applicant_change", args=[obj.applicant.id])
return u'<a href="%s">%s</a>' % (link, obj.applicant)
link_to_applicant.allow_tags = True
admin.site.register(CallForPaper)
admin.site.register(Applicant, ApplicantAdmin)
admin.site.register(PaperApplication, PaperApplicationAdmin)
|
from django.contrib import admin
from cfp.models import CallForPaper, PaperApplication, Applicant
from django.core import urlresolvers
class ApplicantAdmin(admin.ModelAdmin):
readonly_fields = ('user',)
list_display = ('full_name', 'about', 'biography', 'speaker_experience', 'github', 'twitter')
fields = ('user', 'about', 'biography', 'speaker_experience', 'image')
class PaperApplicationAdmin(admin.ModelAdmin):
list_display = ('title', 'link_to_applicant', 'about', 'abstract', 'skill_level', 'duration')
readonly_fields = ('cfp', 'applicant')
fields = ('cfp', 'applicant', 'title', 'about', 'abstract', 'skill_level', 'duration')
def link_to_applicant(self, obj):
link = urlresolvers.reverse("admin:cfp_applicant_change", args=[obj.applicant.id])
return u'<a href="%s">%s</a>' % (link, obj.applicant)
link_to_applicant.allow_tags = True
admin.site.register(CallForPaper)
admin.site.register(Applicant, ApplicantAdmin)
admin.site.register(PaperApplication, PaperApplicationAdmin)
Remove user from readonly fields
So that we can add Applicants from the admin interface.from django.contrib import admin
from cfp.models import CallForPaper, PaperApplication, Applicant
from django.core import urlresolvers
class ApplicantAdmin(admin.ModelAdmin):
list_display = ('user', 'full_name', 'about', 'biography', 'speaker_experience', 'github', 'twitter')
fields = ('user', 'about', 'biography', 'speaker_experience', 'image')
class PaperApplicationAdmin(admin.ModelAdmin):
list_display = ('title', 'link_to_applicant', 'about', 'abstract', 'skill_level', 'duration')
readonly_fields = ('cfp', 'applicant')
fields = ('cfp', 'applicant', 'title', 'about', 'abstract', 'skill_level', 'duration')
def link_to_applicant(self, obj):
link = urlresolvers.reverse("admin:cfp_applicant_change", args=[obj.applicant.id])
return u'<a href="%s">%s</a>' % (link, obj.applicant)
link_to_applicant.allow_tags = True
admin.site.register(CallForPaper)
admin.site.register(Applicant, ApplicantAdmin)
admin.site.register(PaperApplication, PaperApplicationAdmin)
|
<commit_before>from django.contrib import admin
from cfp.models import CallForPaper, PaperApplication, Applicant
from django.core import urlresolvers
class ApplicantAdmin(admin.ModelAdmin):
readonly_fields = ('user',)
list_display = ('full_name', 'about', 'biography', 'speaker_experience', 'github', 'twitter')
fields = ('user', 'about', 'biography', 'speaker_experience', 'image')
class PaperApplicationAdmin(admin.ModelAdmin):
list_display = ('title', 'link_to_applicant', 'about', 'abstract', 'skill_level', 'duration')
readonly_fields = ('cfp', 'applicant')
fields = ('cfp', 'applicant', 'title', 'about', 'abstract', 'skill_level', 'duration')
def link_to_applicant(self, obj):
link = urlresolvers.reverse("admin:cfp_applicant_change", args=[obj.applicant.id])
return u'<a href="%s">%s</a>' % (link, obj.applicant)
link_to_applicant.allow_tags = True
admin.site.register(CallForPaper)
admin.site.register(Applicant, ApplicantAdmin)
admin.site.register(PaperApplication, PaperApplicationAdmin)
<commit_msg>Remove user from readonly fields
So that we can add Applicants from the admin interface.<commit_after>from django.contrib import admin
from cfp.models import CallForPaper, PaperApplication, Applicant
from django.core import urlresolvers
class ApplicantAdmin(admin.ModelAdmin):
list_display = ('user', 'full_name', 'about', 'biography', 'speaker_experience', 'github', 'twitter')
fields = ('user', 'about', 'biography', 'speaker_experience', 'image')
class PaperApplicationAdmin(admin.ModelAdmin):
list_display = ('title', 'link_to_applicant', 'about', 'abstract', 'skill_level', 'duration')
readonly_fields = ('cfp', 'applicant')
fields = ('cfp', 'applicant', 'title', 'about', 'abstract', 'skill_level', 'duration')
def link_to_applicant(self, obj):
link = urlresolvers.reverse("admin:cfp_applicant_change", args=[obj.applicant.id])
return u'<a href="%s">%s</a>' % (link, obj.applicant)
link_to_applicant.allow_tags = True
admin.site.register(CallForPaper)
admin.site.register(Applicant, ApplicantAdmin)
admin.site.register(PaperApplication, PaperApplicationAdmin)
|
11112ce48c8b1042fe5bb75f37db84b7d9162a6f
|
python/test/annotator/embeddings/bert_embeddings_test.py
|
python/test/annotator/embeddings/bert_embeddings_test.py
|
# Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from test.util import SparkContextForTest
@pytest.mark.slow
class BertEmbeddingsTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
albert = BertEmbeddings.pretrained() \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
albert
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
@pytest.mark.slow
class BertEmbeddingsLoadSavedModelTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
albert = BertEmbeddings.loadSavedModel(os.getcwd() + "/../src/test/resources/tf-hub-bert/model",
SparkContextForTest.spark) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
albert
])
model = pipeline.fit(self.data)
model.write().overwrite().save("./tmp_bert_pipeline_model")
model.transform(self.data).show()
|
Add a new unit test for BERT in Python [skip test]
|
Add a new unit test for BERT in Python [skip test]
|
Python
|
apache-2.0
|
JohnSnowLabs/spark-nlp,JohnSnowLabs/spark-nlp,JohnSnowLabs/spark-nlp,JohnSnowLabs/spark-nlp
|
Add a new unit test for BERT in Python [skip test]
|
# Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from test.util import SparkContextForTest
@pytest.mark.slow
class BertEmbeddingsTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
albert = BertEmbeddings.pretrained() \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
albert
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
@pytest.mark.slow
class BertEmbeddingsLoadSavedModelTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
albert = BertEmbeddings.loadSavedModel(os.getcwd() + "/../src/test/resources/tf-hub-bert/model",
SparkContextForTest.spark) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
albert
])
model = pipeline.fit(self.data)
model.write().overwrite().save("./tmp_bert_pipeline_model")
model.transform(self.data).show()
|
<commit_before><commit_msg>Add a new unit test for BERT in Python [skip test]<commit_after>
|
# Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from test.util import SparkContextForTest
@pytest.mark.slow
class BertEmbeddingsTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
albert = BertEmbeddings.pretrained() \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
albert
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
@pytest.mark.slow
class BertEmbeddingsLoadSavedModelTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
albert = BertEmbeddings.loadSavedModel(os.getcwd() + "/../src/test/resources/tf-hub-bert/model",
SparkContextForTest.spark) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
albert
])
model = pipeline.fit(self.data)
model.write().overwrite().save("./tmp_bert_pipeline_model")
model.transform(self.data).show()
|
Add a new unit test for BERT in Python [skip test]# Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from test.util import SparkContextForTest
@pytest.mark.slow
class BertEmbeddingsTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
albert = BertEmbeddings.pretrained() \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
albert
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
@pytest.mark.slow
class BertEmbeddingsLoadSavedModelTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
albert = BertEmbeddings.loadSavedModel(os.getcwd() + "/../src/test/resources/tf-hub-bert/model",
SparkContextForTest.spark) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
albert
])
model = pipeline.fit(self.data)
model.write().overwrite().save("./tmp_bert_pipeline_model")
model.transform(self.data).show()
|
<commit_before><commit_msg>Add a new unit test for BERT in Python [skip test]<commit_after># Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pytest
from sparknlp.annotator import *
from sparknlp.base import *
from test.util import SparkContextForTest
@pytest.mark.slow
class BertEmbeddingsTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
albert = BertEmbeddings.pretrained() \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
albert
])
model = pipeline.fit(self.data)
model.transform(self.data).show()
@pytest.mark.slow
class BertEmbeddingsLoadSavedModelTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkContextForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/embeddings/sentence_embeddings.csv")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("text") \
.setOutputCol("document")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
albert = BertEmbeddings.loadSavedModel(os.getcwd() + "/../src/test/resources/tf-hub-bert/model",
SparkContextForTest.spark) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("embeddings")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
albert
])
model = pipeline.fit(self.data)
model.write().overwrite().save("./tmp_bert_pipeline_model")
model.transform(self.data).show()
|
|
212316c45a43d1d6a973ff81304837365139ab0e
|
python/ecep/portal/widgets.py
|
python/ecep/portal/widgets.py
|
from django.forms import widgets
from django.utils.safestring import mark_safe
class MapWidget(widgets.HiddenInput):
def render(self, name, value, attrs=None):
#import ipdb; ipdb.set_trace()
widget = super(MapWidget, self).render(name, value, attrs)
return mark_safe("""<input name="geom" readonly="readonly" value="%s" type="text" id="id_geom" size="60">
<br>
<input type="button" value="Geocode Address" onclick=ecepAdmin.geocodeAddress()>
(<a onclick=ecepAdmin.mapHelp() href="#">?</a>)
<div id='map-help'></div><div id="map">%s</div>""" % (value, widget))
|
Add custom widget for input of Points and google map
|
Add custom widget for input of Points and google map
Custom widget includes input area for geometry which is automatically
filled upon geocoding or manual selection of a location.
Also includes a placeholder div for the google map on the page.
|
Python
|
mit
|
smartchicago/chicago-early-learning,smartchicago/chicago-early-learning,smartchicago/chicago-early-learning,smartchicago/chicago-early-learning
|
Add custom widget for input of Points and google map
Custom widget includes input area for geometry which is automatically
filled upon geocoding or manual selection of a location.
Also includes a placeholder div for the google map on the page.
|
from django.forms import widgets
from django.utils.safestring import mark_safe
class MapWidget(widgets.HiddenInput):
def render(self, name, value, attrs=None):
#import ipdb; ipdb.set_trace()
widget = super(MapWidget, self).render(name, value, attrs)
return mark_safe("""<input name="geom" readonly="readonly" value="%s" type="text" id="id_geom" size="60">
<br>
<input type="button" value="Geocode Address" onclick=ecepAdmin.geocodeAddress()>
(<a onclick=ecepAdmin.mapHelp() href="#">?</a>)
<div id='map-help'></div><div id="map">%s</div>""" % (value, widget))
|
<commit_before><commit_msg>Add custom widget for input of Points and google map
Custom widget includes input area for geometry which is automatically
filled upon geocoding or manual selection of a location.
Also includes a placeholder div for the google map on the page.<commit_after>
|
from django.forms import widgets
from django.utils.safestring import mark_safe
class MapWidget(widgets.HiddenInput):
def render(self, name, value, attrs=None):
#import ipdb; ipdb.set_trace()
widget = super(MapWidget, self).render(name, value, attrs)
return mark_safe("""<input name="geom" readonly="readonly" value="%s" type="text" id="id_geom" size="60">
<br>
<input type="button" value="Geocode Address" onclick=ecepAdmin.geocodeAddress()>
(<a onclick=ecepAdmin.mapHelp() href="#">?</a>)
<div id='map-help'></div><div id="map">%s</div>""" % (value, widget))
|
Add custom widget for input of Points and google map
Custom widget includes input area for geometry which is automatically
filled upon geocoding or manual selection of a location.
Also includes a placeholder div for the google map on the page.from django.forms import widgets
from django.utils.safestring import mark_safe
class MapWidget(widgets.HiddenInput):
def render(self, name, value, attrs=None):
#import ipdb; ipdb.set_trace()
widget = super(MapWidget, self).render(name, value, attrs)
return mark_safe("""<input name="geom" readonly="readonly" value="%s" type="text" id="id_geom" size="60">
<br>
<input type="button" value="Geocode Address" onclick=ecepAdmin.geocodeAddress()>
(<a onclick=ecepAdmin.mapHelp() href="#">?</a>)
<div id='map-help'></div><div id="map">%s</div>""" % (value, widget))
|
<commit_before><commit_msg>Add custom widget for input of Points and google map
Custom widget includes input area for geometry which is automatically
filled upon geocoding or manual selection of a location.
Also includes a placeholder div for the google map on the page.<commit_after>from django.forms import widgets
from django.utils.safestring import mark_safe
class MapWidget(widgets.HiddenInput):
def render(self, name, value, attrs=None):
#import ipdb; ipdb.set_trace()
widget = super(MapWidget, self).render(name, value, attrs)
return mark_safe("""<input name="geom" readonly="readonly" value="%s" type="text" id="id_geom" size="60">
<br>
<input type="button" value="Geocode Address" onclick=ecepAdmin.geocodeAddress()>
(<a onclick=ecepAdmin.mapHelp() href="#">?</a>)
<div id='map-help'></div><div id="map">%s</div>""" % (value, widget))
|
|
06bbb4f99e35deddb7dc26d01af4aaa431526afd
|
accelerator/migrations/0024_program_show_all_masschallenge_events.py
|
accelerator/migrations/0024_program_show_all_masschallenge_events.py
|
# Generated by Django 2.2.10 on 2020-06-30 17:47
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '023_alter_topics_field_office_hours'),
]
operations = [
migrations.AddField(
model_name='program',
name='show_all_masschallenge_events',
field=models.BooleanField(default=False),
),
]
|
Fix migrations causing test failures
|
[AC-7750-fix] Fix migrations causing test failures
|
Python
|
mit
|
masschallenge/django-accelerator,masschallenge/django-accelerator
|
[AC-7750-fix] Fix migrations causing test failures
|
# Generated by Django 2.2.10 on 2020-06-30 17:47
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '023_alter_topics_field_office_hours'),
]
operations = [
migrations.AddField(
model_name='program',
name='show_all_masschallenge_events',
field=models.BooleanField(default=False),
),
]
|
<commit_before><commit_msg>[AC-7750-fix] Fix migrations causing test failures<commit_after>
|
# Generated by Django 2.2.10 on 2020-06-30 17:47
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '023_alter_topics_field_office_hours'),
]
operations = [
migrations.AddField(
model_name='program',
name='show_all_masschallenge_events',
field=models.BooleanField(default=False),
),
]
|
[AC-7750-fix] Fix migrations causing test failures# Generated by Django 2.2.10 on 2020-06-30 17:47
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '023_alter_topics_field_office_hours'),
]
operations = [
migrations.AddField(
model_name='program',
name='show_all_masschallenge_events',
field=models.BooleanField(default=False),
),
]
|
<commit_before><commit_msg>[AC-7750-fix] Fix migrations causing test failures<commit_after># Generated by Django 2.2.10 on 2020-06-30 17:47
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '023_alter_topics_field_office_hours'),
]
operations = [
migrations.AddField(
model_name='program',
name='show_all_masschallenge_events',
field=models.BooleanField(default=False),
),
]
|
|
0a83c4c0b2728c04ec1b5b4eabdb612170e450bd
|
webkit/tools/layout_tests/test_types/fuzzy_image_diff.py
|
webkit/tools/layout_tests/test_types/fuzzy_image_diff.py
|
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compares the image output of a test to the expected image output using
fuzzy matching.
"""
import errno
import logging
import os
import shutil
import subprocess
from layout_package import path_utils
from layout_package import test_failures
from test_types import test_type_base
class FuzzyImageDiff(test_type_base.TestTypeBase):
def CompareOutput(self, filename, proc, output, test_args):
"""Implementation of CompareOutput that checks the output image and
checksum against the expected files from the LayoutTest directory.
"""
failures = []
# If we didn't produce a hash file, this test must be text-only.
if test_args.hash is None:
return failures
expected_png_file = path_utils.ExpectedFilename(filename,
'.png',
self._platform)
if test_args.show_sources:
logging.debug('Using %s' % expected_png_file)
# Also report a missing expected PNG file.
if not os.path.isfile(expected_png_file):
failures.append(test_failures.FailureMissingImage(self))
# Run the fuzzymatcher
r = subprocess.call(['fuzzymatch', test_args.png_path, expected_png_file])
print ' fuzzymatch returned', r
if r != 0:
failures.append(test_failures.FailureFuzzyFailure(self))
return failures
|
Fix build: missed a file
|
Fix build: missed a file
TBR=tony
Review URL: http://codereview.chromium.org/13209
git-svn-id: http://src.chromium.org/svn/trunk/src@6449 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
Former-commit-id: b919e44b1d44926606de27cb569e7ad1b4724989
|
Python
|
bsd-3-clause
|
meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser
|
Fix build: missed a file
TBR=tony
Review URL: http://codereview.chromium.org/13209
git-svn-id: http://src.chromium.org/svn/trunk/src@6449 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
Former-commit-id: b919e44b1d44926606de27cb569e7ad1b4724989
|
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compares the image output of a test to the expected image output using
fuzzy matching.
"""
import errno
import logging
import os
import shutil
import subprocess
from layout_package import path_utils
from layout_package import test_failures
from test_types import test_type_base
class FuzzyImageDiff(test_type_base.TestTypeBase):
def CompareOutput(self, filename, proc, output, test_args):
"""Implementation of CompareOutput that checks the output image and
checksum against the expected files from the LayoutTest directory.
"""
failures = []
# If we didn't produce a hash file, this test must be text-only.
if test_args.hash is None:
return failures
expected_png_file = path_utils.ExpectedFilename(filename,
'.png',
self._platform)
if test_args.show_sources:
logging.debug('Using %s' % expected_png_file)
# Also report a missing expected PNG file.
if not os.path.isfile(expected_png_file):
failures.append(test_failures.FailureMissingImage(self))
# Run the fuzzymatcher
r = subprocess.call(['fuzzymatch', test_args.png_path, expected_png_file])
print ' fuzzymatch returned', r
if r != 0:
failures.append(test_failures.FailureFuzzyFailure(self))
return failures
|
<commit_before><commit_msg>Fix build: missed a file
TBR=tony
Review URL: http://codereview.chromium.org/13209
git-svn-id: http://src.chromium.org/svn/trunk/src@6449 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
Former-commit-id: b919e44b1d44926606de27cb569e7ad1b4724989<commit_after>
|
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compares the image output of a test to the expected image output using
fuzzy matching.
"""
import errno
import logging
import os
import shutil
import subprocess
from layout_package import path_utils
from layout_package import test_failures
from test_types import test_type_base
class FuzzyImageDiff(test_type_base.TestTypeBase):
def CompareOutput(self, filename, proc, output, test_args):
"""Implementation of CompareOutput that checks the output image and
checksum against the expected files from the LayoutTest directory.
"""
failures = []
# If we didn't produce a hash file, this test must be text-only.
if test_args.hash is None:
return failures
expected_png_file = path_utils.ExpectedFilename(filename,
'.png',
self._platform)
if test_args.show_sources:
logging.debug('Using %s' % expected_png_file)
# Also report a missing expected PNG file.
if not os.path.isfile(expected_png_file):
failures.append(test_failures.FailureMissingImage(self))
# Run the fuzzymatcher
r = subprocess.call(['fuzzymatch', test_args.png_path, expected_png_file])
print ' fuzzymatch returned', r
if r != 0:
failures.append(test_failures.FailureFuzzyFailure(self))
return failures
|
Fix build: missed a file
TBR=tony
Review URL: http://codereview.chromium.org/13209
git-svn-id: http://src.chromium.org/svn/trunk/src@6449 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
Former-commit-id: b919e44b1d44926606de27cb569e7ad1b4724989# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compares the image output of a test to the expected image output using
fuzzy matching.
"""
import errno
import logging
import os
import shutil
import subprocess
from layout_package import path_utils
from layout_package import test_failures
from test_types import test_type_base
class FuzzyImageDiff(test_type_base.TestTypeBase):
def CompareOutput(self, filename, proc, output, test_args):
"""Implementation of CompareOutput that checks the output image and
checksum against the expected files from the LayoutTest directory.
"""
failures = []
# If we didn't produce a hash file, this test must be text-only.
if test_args.hash is None:
return failures
expected_png_file = path_utils.ExpectedFilename(filename,
'.png',
self._platform)
if test_args.show_sources:
logging.debug('Using %s' % expected_png_file)
# Also report a missing expected PNG file.
if not os.path.isfile(expected_png_file):
failures.append(test_failures.FailureMissingImage(self))
# Run the fuzzymatcher
r = subprocess.call(['fuzzymatch', test_args.png_path, expected_png_file])
print ' fuzzymatch returned', r
if r != 0:
failures.append(test_failures.FailureFuzzyFailure(self))
return failures
|
<commit_before><commit_msg>Fix build: missed a file
TBR=tony
Review URL: http://codereview.chromium.org/13209
git-svn-id: http://src.chromium.org/svn/trunk/src@6449 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
Former-commit-id: b919e44b1d44926606de27cb569e7ad1b4724989<commit_after># Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compares the image output of a test to the expected image output using
fuzzy matching.
"""
import errno
import logging
import os
import shutil
import subprocess
from layout_package import path_utils
from layout_package import test_failures
from test_types import test_type_base
class FuzzyImageDiff(test_type_base.TestTypeBase):
def CompareOutput(self, filename, proc, output, test_args):
"""Implementation of CompareOutput that checks the output image and
checksum against the expected files from the LayoutTest directory.
"""
failures = []
# If we didn't produce a hash file, this test must be text-only.
if test_args.hash is None:
return failures
expected_png_file = path_utils.ExpectedFilename(filename,
'.png',
self._platform)
if test_args.show_sources:
logging.debug('Using %s' % expected_png_file)
# Also report a missing expected PNG file.
if not os.path.isfile(expected_png_file):
failures.append(test_failures.FailureMissingImage(self))
# Run the fuzzymatcher
r = subprocess.call(['fuzzymatch', test_args.png_path, expected_png_file])
print ' fuzzymatch returned', r
if r != 0:
failures.append(test_failures.FailureFuzzyFailure(self))
return failures
|
|
2f30d3d07d2c2a15c0eaad026d745682a9b94eec
|
scripts/export_temporal.py
|
scripts/export_temporal.py
|
"""
Exports the data in db.grids.temporal.counties to CSV.
"""
import twitterproj
db = twitterproj.connect()
header = \
"""# COUNTS are tweet counts in 5 minute intervals: 24 * 60 / 5 columns
# weekday is the day of week, Mon=0, ..., Sun=6
# state_fips, county_fips, offset_to_eastern, year, month, day, weekday, COUNTS
"""
offset_to_eastern = {'Eastern': 0, 'Central': -1, 'Mountain': -2, 'Pacific': -3}
rows = []
cols = ['state_fips', 'county_fips', 'year', 'month', 'day', 'weekday']
for doc in db.grids.temporal.counties.find():
row = []
for col in cols:
if col == 'date':
row.append(doc[col].isoformat())
else:
row.append(doc[col])
date = doc['date']
tz = twitterproj.get_ustz(date, doc['timezone'])
offset = offset_to_eastern[tz]
row.insert(2, offset)
row.extend(doc['counts'])
row = map(str, row)
rows.append(','.join(row))
out = header
out += '\n'.join(rows)
with open('temporal.csv', 'w') as fobj:
fobj.write(out)
|
Add script export build temporal collection.
|
Add script export build temporal collection.
|
Python
|
unlicense
|
chebee7i/twitter,chebee7i/twitter,chebee7i/twitter
|
Add script export build temporal collection.
|
"""
Exports the data in db.grids.temporal.counties to CSV.
"""
import twitterproj
db = twitterproj.connect()
header = \
"""# COUNTS are tweet counts in 5 minute intervals: 24 * 60 / 5 columns
# weekday is the day of week, Mon=0, ..., Sun=6
# state_fips, county_fips, offset_to_eastern, year, month, day, weekday, COUNTS
"""
offset_to_eastern = {'Eastern': 0, 'Central': -1, 'Mountain': -2, 'Pacific': -3}
rows = []
cols = ['state_fips', 'county_fips', 'year', 'month', 'day', 'weekday']
for doc in db.grids.temporal.counties.find():
row = []
for col in cols:
if col == 'date':
row.append(doc[col].isoformat())
else:
row.append(doc[col])
date = doc['date']
tz = twitterproj.get_ustz(date, doc['timezone'])
offset = offset_to_eastern[tz]
row.insert(2, offset)
row.extend(doc['counts'])
row = map(str, row)
rows.append(','.join(row))
out = header
out += '\n'.join(rows)
with open('temporal.csv', 'w') as fobj:
fobj.write(out)
|
<commit_before><commit_msg>Add script export build temporal collection.<commit_after>
|
"""
Exports the data in db.grids.temporal.counties to CSV.
"""
import twitterproj
db = twitterproj.connect()
header = \
"""# COUNTS are tweet counts in 5 minute intervals: 24 * 60 / 5 columns
# weekday is the day of week, Mon=0, ..., Sun=6
# state_fips, county_fips, offset_to_eastern, year, month, day, weekday, COUNTS
"""
offset_to_eastern = {'Eastern': 0, 'Central': -1, 'Mountain': -2, 'Pacific': -3}
rows = []
cols = ['state_fips', 'county_fips', 'year', 'month', 'day', 'weekday']
for doc in db.grids.temporal.counties.find():
row = []
for col in cols:
if col == 'date':
row.append(doc[col].isoformat())
else:
row.append(doc[col])
date = doc['date']
tz = twitterproj.get_ustz(date, doc['timezone'])
offset = offset_to_eastern[tz]
row.insert(2, offset)
row.extend(doc['counts'])
row = map(str, row)
rows.append(','.join(row))
out = header
out += '\n'.join(rows)
with open('temporal.csv', 'w') as fobj:
fobj.write(out)
|
Add script export build temporal collection."""
Exports the data in db.grids.temporal.counties to CSV.
"""
import twitterproj
db = twitterproj.connect()
header = \
"""# COUNTS are tweet counts in 5 minute intervals: 24 * 60 / 5 columns
# weekday is the day of week, Mon=0, ..., Sun=6
# state_fips, county_fips, offset_to_eastern, year, month, day, weekday, COUNTS
"""
offset_to_eastern = {'Eastern': 0, 'Central': -1, 'Mountain': -2, 'Pacific': -3}
rows = []
cols = ['state_fips', 'county_fips', 'year', 'month', 'day', 'weekday']
for doc in db.grids.temporal.counties.find():
row = []
for col in cols:
if col == 'date':
row.append(doc[col].isoformat())
else:
row.append(doc[col])
date = doc['date']
tz = twitterproj.get_ustz(date, doc['timezone'])
offset = offset_to_eastern[tz]
row.insert(2, offset)
row.extend(doc['counts'])
row = map(str, row)
rows.append(','.join(row))
out = header
out += '\n'.join(rows)
with open('temporal.csv', 'w') as fobj:
fobj.write(out)
|
<commit_before><commit_msg>Add script export build temporal collection.<commit_after>"""
Exports the data in db.grids.temporal.counties to CSV.
"""
import twitterproj
db = twitterproj.connect()
header = \
"""# COUNTS are tweet counts in 5 minute intervals: 24 * 60 / 5 columns
# weekday is the day of week, Mon=0, ..., Sun=6
# state_fips, county_fips, offset_to_eastern, year, month, day, weekday, COUNTS
"""
offset_to_eastern = {'Eastern': 0, 'Central': -1, 'Mountain': -2, 'Pacific': -3}
rows = []
cols = ['state_fips', 'county_fips', 'year', 'month', 'day', 'weekday']
for doc in db.grids.temporal.counties.find():
row = []
for col in cols:
if col == 'date':
row.append(doc[col].isoformat())
else:
row.append(doc[col])
date = doc['date']
tz = twitterproj.get_ustz(date, doc['timezone'])
offset = offset_to_eastern[tz]
row.insert(2, offset)
row.extend(doc['counts'])
row = map(str, row)
rows.append(','.join(row))
out = header
out += '\n'.join(rows)
with open('temporal.csv', 'w') as fobj:
fobj.write(out)
|
|
ce8ece0f2d7e27cc325e24a569c0b2a9761eac6f
|
f5_openstack_agent/utils/clean_partition.py
|
f5_openstack_agent/utils/clean_partition.py
|
# coding=utf-8
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import ConfigParser
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.drivers.bigip import system_helper
import requests
import sys
requests.packages.urllib3.disable_warnings()
def clean_partition(bigip, partition):
sh = system_helper.SystemHelper()
return sh.purge_folder_contents(bigip, folder=partition)
def parse_args():
parser = argparse.ArgumentParser(
description='Utility to clear out the contents of a corrupted tenant',
)
parser.add_argument(
'--config-file', help="Path to f5-openstack-agent.ini",
metavar='config_file',
required=True
)
parser.add_argument(
'--partition', help="Partion on the device to clean",
required=True
)
return parser.parse_args()
def parse_config(config_file):
config = ConfigParser.ConfigParser()
config.readfp(open(config_file))
bigips = []
try:
config_addrs = config.get("DEFAULT", 'icontrol_hostname')
config_user = config.get("DEFAULT", 'icontrol_username')
config_pass = config.get("DEFAULT", 'icontrol_password')
except ConfigParser.NoOptionError as err:
print(err.message)
return bigips
for config_addr in config_addrs.split(','):
bigips.append(
ManagementRoot(hostname=config_addr,
username=config_user,
password=config_pass)
)
return bigips
def main(args):
# Parse the config file
bigips = parse_config(args.config_file)
for bigip in bigips:
try:
clean_partition(bigip, args.partition)
except Exception as err:
print(err.message)
if __name__ == "__main__":
sys.exit(main(parse_args()))
|
Create partition cleaning utility script
|
Create partition cleaning utility script
Issues:
Fixes #464
Problem:
In the event the agent to get out of sync with the neutron database,
it is helpful to clear all objects from the tenant partition without
having to navigate to the BIG-IP and delete manually or having to
revert to a saved config. This script will take a folder and BIG-IP
credentials and purge a folder of all its contents.
Analysis:
Adding a script that will parse the configuration file for credentials
and use those to connect to the BIG-IP and remove all objects from
the tenant partition.
Tests:
Populate the tenant folder by creating an LTM service. Run
the script to remove all objects.
|
Python
|
apache-2.0
|
F5Networks/f5-openstack-agent,richbrowne/f5-openstack-agent,F5Networks/f5-openstack-agent,richbrowne/f5-openstack-agent,richbrowne/f5-openstack-agent,F5Networks/f5-openstack-agent
|
Create partition cleaning utility script
Issues:
Fixes #464
Problem:
In the event the agent to get out of sync with the neutron database,
it is helpful to clear all objects from the tenant partition without
having to navigate to the BIG-IP and delete manually or having to
revert to a saved config. This script will take a folder and BIG-IP
credentials and purge a folder of all its contents.
Analysis:
Adding a script that will parse the configuration file for credentials
and use those to connect to the BIG-IP and remove all objects from
the tenant partition.
Tests:
Populate the tenant folder by creating an LTM service. Run
the script to remove all objects.
|
# coding=utf-8
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import ConfigParser
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.drivers.bigip import system_helper
import requests
import sys
requests.packages.urllib3.disable_warnings()
def clean_partition(bigip, partition):
sh = system_helper.SystemHelper()
return sh.purge_folder_contents(bigip, folder=partition)
def parse_args():
parser = argparse.ArgumentParser(
description='Utility to clear out the contents of a corrupted tenant',
)
parser.add_argument(
'--config-file', help="Path to f5-openstack-agent.ini",
metavar='config_file',
required=True
)
parser.add_argument(
'--partition', help="Partion on the device to clean",
required=True
)
return parser.parse_args()
def parse_config(config_file):
config = ConfigParser.ConfigParser()
config.readfp(open(config_file))
bigips = []
try:
config_addrs = config.get("DEFAULT", 'icontrol_hostname')
config_user = config.get("DEFAULT", 'icontrol_username')
config_pass = config.get("DEFAULT", 'icontrol_password')
except ConfigParser.NoOptionError as err:
print(err.message)
return bigips
for config_addr in config_addrs.split(','):
bigips.append(
ManagementRoot(hostname=config_addr,
username=config_user,
password=config_pass)
)
return bigips
def main(args):
# Parse the config file
bigips = parse_config(args.config_file)
for bigip in bigips:
try:
clean_partition(bigip, args.partition)
except Exception as err:
print(err.message)
if __name__ == "__main__":
sys.exit(main(parse_args()))
|
<commit_before><commit_msg>Create partition cleaning utility script
Issues:
Fixes #464
Problem:
In the event the agent to get out of sync with the neutron database,
it is helpful to clear all objects from the tenant partition without
having to navigate to the BIG-IP and delete manually or having to
revert to a saved config. This script will take a folder and BIG-IP
credentials and purge a folder of all its contents.
Analysis:
Adding a script that will parse the configuration file for credentials
and use those to connect to the BIG-IP and remove all objects from
the tenant partition.
Tests:
Populate the tenant folder by creating an LTM service. Run
the script to remove all objects.<commit_after>
|
# coding=utf-8
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import ConfigParser
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.drivers.bigip import system_helper
import requests
import sys
requests.packages.urllib3.disable_warnings()
def clean_partition(bigip, partition):
sh = system_helper.SystemHelper()
return sh.purge_folder_contents(bigip, folder=partition)
def parse_args():
parser = argparse.ArgumentParser(
description='Utility to clear out the contents of a corrupted tenant',
)
parser.add_argument(
'--config-file', help="Path to f5-openstack-agent.ini",
metavar='config_file',
required=True
)
parser.add_argument(
'--partition', help="Partion on the device to clean",
required=True
)
return parser.parse_args()
def parse_config(config_file):
config = ConfigParser.ConfigParser()
config.readfp(open(config_file))
bigips = []
try:
config_addrs = config.get("DEFAULT", 'icontrol_hostname')
config_user = config.get("DEFAULT", 'icontrol_username')
config_pass = config.get("DEFAULT", 'icontrol_password')
except ConfigParser.NoOptionError as err:
print(err.message)
return bigips
for config_addr in config_addrs.split(','):
bigips.append(
ManagementRoot(hostname=config_addr,
username=config_user,
password=config_pass)
)
return bigips
def main(args):
# Parse the config file
bigips = parse_config(args.config_file)
for bigip in bigips:
try:
clean_partition(bigip, args.partition)
except Exception as err:
print(err.message)
if __name__ == "__main__":
sys.exit(main(parse_args()))
|
Create partition cleaning utility script
Issues:
Fixes #464
Problem:
In the event the agent to get out of sync with the neutron database,
it is helpful to clear all objects from the tenant partition without
having to navigate to the BIG-IP and delete manually or having to
revert to a saved config. This script will take a folder and BIG-IP
credentials and purge a folder of all its contents.
Analysis:
Adding a script that will parse the configuration file for credentials
and use those to connect to the BIG-IP and remove all objects from
the tenant partition.
Tests:
Populate the tenant folder by creating an LTM service. Run
the script to remove all objects.# coding=utf-8
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import ConfigParser
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.drivers.bigip import system_helper
import requests
import sys
requests.packages.urllib3.disable_warnings()
def clean_partition(bigip, partition):
sh = system_helper.SystemHelper()
return sh.purge_folder_contents(bigip, folder=partition)
def parse_args():
parser = argparse.ArgumentParser(
description='Utility to clear out the contents of a corrupted tenant',
)
parser.add_argument(
'--config-file', help="Path to f5-openstack-agent.ini",
metavar='config_file',
required=True
)
parser.add_argument(
'--partition', help="Partion on the device to clean",
required=True
)
return parser.parse_args()
def parse_config(config_file):
config = ConfigParser.ConfigParser()
config.readfp(open(config_file))
bigips = []
try:
config_addrs = config.get("DEFAULT", 'icontrol_hostname')
config_user = config.get("DEFAULT", 'icontrol_username')
config_pass = config.get("DEFAULT", 'icontrol_password')
except ConfigParser.NoOptionError as err:
print(err.message)
return bigips
for config_addr in config_addrs.split(','):
bigips.append(
ManagementRoot(hostname=config_addr,
username=config_user,
password=config_pass)
)
return bigips
def main(args):
# Parse the config file
bigips = parse_config(args.config_file)
for bigip in bigips:
try:
clean_partition(bigip, args.partition)
except Exception as err:
print(err.message)
if __name__ == "__main__":
sys.exit(main(parse_args()))
|
<commit_before><commit_msg>Create partition cleaning utility script
Issues:
Fixes #464
Problem:
In the event the agent to get out of sync with the neutron database,
it is helpful to clear all objects from the tenant partition without
having to navigate to the BIG-IP and delete manually or having to
revert to a saved config. This script will take a folder and BIG-IP
credentials and purge a folder of all its contents.
Analysis:
Adding a script that will parse the configuration file for credentials
and use those to connect to the BIG-IP and remove all objects from
the tenant partition.
Tests:
Populate the tenant folder by creating an LTM service. Run
the script to remove all objects.<commit_after># coding=utf-8
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import ConfigParser
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.drivers.bigip import system_helper
import requests
import sys
requests.packages.urllib3.disable_warnings()
def clean_partition(bigip, partition):
sh = system_helper.SystemHelper()
return sh.purge_folder_contents(bigip, folder=partition)
def parse_args():
parser = argparse.ArgumentParser(
description='Utility to clear out the contents of a corrupted tenant',
)
parser.add_argument(
'--config-file', help="Path to f5-openstack-agent.ini",
metavar='config_file',
required=True
)
parser.add_argument(
'--partition', help="Partion on the device to clean",
required=True
)
return parser.parse_args()
def parse_config(config_file):
config = ConfigParser.ConfigParser()
config.readfp(open(config_file))
bigips = []
try:
config_addrs = config.get("DEFAULT", 'icontrol_hostname')
config_user = config.get("DEFAULT", 'icontrol_username')
config_pass = config.get("DEFAULT", 'icontrol_password')
except ConfigParser.NoOptionError as err:
print(err.message)
return bigips
for config_addr in config_addrs.split(','):
bigips.append(
ManagementRoot(hostname=config_addr,
username=config_user,
password=config_pass)
)
return bigips
def main(args):
# Parse the config file
bigips = parse_config(args.config_file)
for bigip in bigips:
try:
clean_partition(bigip, args.partition)
except Exception as err:
print(err.message)
if __name__ == "__main__":
sys.exit(main(parse_args()))
|
|
cdf16c7f2dd9bbcfe79339cf9fb145e15f1ebfd6
|
apps/mutations/management/commands/update_locus.py
|
apps/mutations/management/commands/update_locus.py
|
"""
Take the csv/tsv data from coding and non-coding files and insert into
Gene Locus database.
"""
import logging
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandError
from apps.mutations.models import GeneLocus
LOGGER = logging.getLogger('apps.mutations')
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--files', nargs='+', type=int,
help='Specify the code files')
def handle(self, dataset_id=None, retry=False, **options):
for f in files:
if not os.path.isfile(f):
raise IOError("File doesn't exist: %s" % f)
for f in files:
self.process_file(f)
def process_file(self, filename):
with open(filename, 'r') as fhl:
self.process_data(fhl.read().split('\n'))
def process_data(self, data):
drugs = OrderedDict()
drug = None
for item in data:
item = item.strip()
if not item:
continue
if item and not ' ' in item:
drug = item
DRUGS[drug] = []
continue
if not drug:
logging.warning("Drug missing in mutation list (%s)." % item)
continue
(x, result) = item.split(' ', 1)
drugs[drug].append(result)
|
Add gene locus generator taking in tabular inputs and using fav lib
|
Add gene locus generator taking in tabular inputs and using fav lib
|
Python
|
agpl-3.0
|
IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site
|
Add gene locus generator taking in tabular inputs and using fav lib
|
"""
Take the csv/tsv data from coding and non-coding files and insert into
Gene Locus database.
"""
import logging
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandError
from apps.mutations.models import GeneLocus
LOGGER = logging.getLogger('apps.mutations')
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--files', nargs='+', type=int,
help='Specify the code files')
def handle(self, dataset_id=None, retry=False, **options):
for f in files:
if not os.path.isfile(f):
raise IOError("File doesn't exist: %s" % f)
for f in files:
self.process_file(f)
def process_file(self, filename):
with open(filename, 'r') as fhl:
self.process_data(fhl.read().split('\n'))
def process_data(self, data):
drugs = OrderedDict()
drug = None
for item in data:
item = item.strip()
if not item:
continue
if item and not ' ' in item:
drug = item
DRUGS[drug] = []
continue
if not drug:
logging.warning("Drug missing in mutation list (%s)." % item)
continue
(x, result) = item.split(' ', 1)
drugs[drug].append(result)
|
<commit_before><commit_msg>Add gene locus generator taking in tabular inputs and using fav lib<commit_after>
|
"""
Take the csv/tsv data from coding and non-coding files and insert into
Gene Locus database.
"""
import logging
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandError
from apps.mutations.models import GeneLocus
LOGGER = logging.getLogger('apps.mutations')
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--files', nargs='+', type=int,
help='Specify the code files')
def handle(self, dataset_id=None, retry=False, **options):
for f in files:
if not os.path.isfile(f):
raise IOError("File doesn't exist: %s" % f)
for f in files:
self.process_file(f)
def process_file(self, filename):
with open(filename, 'r') as fhl:
self.process_data(fhl.read().split('\n'))
def process_data(self, data):
drugs = OrderedDict()
drug = None
for item in data:
item = item.strip()
if not item:
continue
if item and not ' ' in item:
drug = item
DRUGS[drug] = []
continue
if not drug:
logging.warning("Drug missing in mutation list (%s)." % item)
continue
(x, result) = item.split(' ', 1)
drugs[drug].append(result)
|
Add gene locus generator taking in tabular inputs and using fav lib"""
Take the csv/tsv data from coding and non-coding files and insert into
Gene Locus database.
"""
import logging
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandError
from apps.mutations.models import GeneLocus
LOGGER = logging.getLogger('apps.mutations')
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--files', nargs='+', type=int,
help='Specify the code files')
def handle(self, dataset_id=None, retry=False, **options):
for f in files:
if not os.path.isfile(f):
raise IOError("File doesn't exist: %s" % f)
for f in files:
self.process_file(f)
def process_file(self, filename):
with open(filename, 'r') as fhl:
self.process_data(fhl.read().split('\n'))
def process_data(self, data):
drugs = OrderedDict()
drug = None
for item in data:
item = item.strip()
if not item:
continue
if item and not ' ' in item:
drug = item
DRUGS[drug] = []
continue
if not drug:
logging.warning("Drug missing in mutation list (%s)." % item)
continue
(x, result) = item.split(' ', 1)
drugs[drug].append(result)
|
<commit_before><commit_msg>Add gene locus generator taking in tabular inputs and using fav lib<commit_after>"""
Take the csv/tsv data from coding and non-coding files and insert into
Gene Locus database.
"""
import logging
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandError
from apps.mutations.models import GeneLocus
LOGGER = logging.getLogger('apps.mutations')
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--files', nargs='+', type=int,
help='Specify the code files')
def handle(self, dataset_id=None, retry=False, **options):
for f in files:
if not os.path.isfile(f):
raise IOError("File doesn't exist: %s" % f)
for f in files:
self.process_file(f)
def process_file(self, filename):
with open(filename, 'r') as fhl:
self.process_data(fhl.read().split('\n'))
def process_data(self, data):
drugs = OrderedDict()
drug = None
for item in data:
item = item.strip()
if not item:
continue
if item and not ' ' in item:
drug = item
DRUGS[drug] = []
continue
if not drug:
logging.warning("Drug missing in mutation list (%s)." % item)
continue
(x, result) = item.split(' ', 1)
drugs[drug].append(result)
|
|
2d6d087b7729357cac77eb6a4bfb129c50c217e9
|
assess_isoform_quantification/test/test_options.py
|
assess_isoform_quantification/test/test_options.py
|
from assess_isoform_quantification.options import validate_file_option
from tempfile import NamedTemporaryFile
from schema import SchemaError
import pytest
def test_validate_file_option_returns_handle_for_file_that_exists():
file = NamedTemporaryFile()
file_name = file.name
assert validate_file_option(file_name, "dummy") is not None
def test_validate_file_option_raises_exception_for_non_existing_file():
file = NamedTemporaryFile()
file_name = file.name
file.close()
with pytest.raises(SchemaError):
validate_file_option(file_name, "dummy")
def test_validate_file_option_exception_message_contains_correct_info():
file = NamedTemporaryFile()
file_name = file.name
file.close()
msg = "dummy"
with pytest.raises(SchemaError) as exc_info:
validate_file_option(file_name, msg)
exc_msg = exc_info.value.message
assert msg in exc_msg
assert file_name in exc_msg
|
Add a few test cases for validate_file_option function.
|
Add a few test cases for validate_file_option function.
|
Python
|
mit
|
COMBINE-lab/piquant,lweasel/piquant,lweasel/piquant
|
Add a few test cases for validate_file_option function.
|
from assess_isoform_quantification.options import validate_file_option
from tempfile import NamedTemporaryFile
from schema import SchemaError
import pytest
def test_validate_file_option_returns_handle_for_file_that_exists():
file = NamedTemporaryFile()
file_name = file.name
assert validate_file_option(file_name, "dummy") is not None
def test_validate_file_option_raises_exception_for_non_existing_file():
file = NamedTemporaryFile()
file_name = file.name
file.close()
with pytest.raises(SchemaError):
validate_file_option(file_name, "dummy")
def test_validate_file_option_exception_message_contains_correct_info():
file = NamedTemporaryFile()
file_name = file.name
file.close()
msg = "dummy"
with pytest.raises(SchemaError) as exc_info:
validate_file_option(file_name, msg)
exc_msg = exc_info.value.message
assert msg in exc_msg
assert file_name in exc_msg
|
<commit_before><commit_msg>Add a few test cases for validate_file_option function.<commit_after>
|
from assess_isoform_quantification.options import validate_file_option
from tempfile import NamedTemporaryFile
from schema import SchemaError
import pytest
def test_validate_file_option_returns_handle_for_file_that_exists():
file = NamedTemporaryFile()
file_name = file.name
assert validate_file_option(file_name, "dummy") is not None
def test_validate_file_option_raises_exception_for_non_existing_file():
file = NamedTemporaryFile()
file_name = file.name
file.close()
with pytest.raises(SchemaError):
validate_file_option(file_name, "dummy")
def test_validate_file_option_exception_message_contains_correct_info():
file = NamedTemporaryFile()
file_name = file.name
file.close()
msg = "dummy"
with pytest.raises(SchemaError) as exc_info:
validate_file_option(file_name, msg)
exc_msg = exc_info.value.message
assert msg in exc_msg
assert file_name in exc_msg
|
Add a few test cases for validate_file_option function.from assess_isoform_quantification.options import validate_file_option
from tempfile import NamedTemporaryFile
from schema import SchemaError
import pytest
def test_validate_file_option_returns_handle_for_file_that_exists():
file = NamedTemporaryFile()
file_name = file.name
assert validate_file_option(file_name, "dummy") is not None
def test_validate_file_option_raises_exception_for_non_existing_file():
file = NamedTemporaryFile()
file_name = file.name
file.close()
with pytest.raises(SchemaError):
validate_file_option(file_name, "dummy")
def test_validate_file_option_exception_message_contains_correct_info():
file = NamedTemporaryFile()
file_name = file.name
file.close()
msg = "dummy"
with pytest.raises(SchemaError) as exc_info:
validate_file_option(file_name, msg)
exc_msg = exc_info.value.message
assert msg in exc_msg
assert file_name in exc_msg
|
<commit_before><commit_msg>Add a few test cases for validate_file_option function.<commit_after>from assess_isoform_quantification.options import validate_file_option
from tempfile import NamedTemporaryFile
from schema import SchemaError
import pytest
def test_validate_file_option_returns_handle_for_file_that_exists():
file = NamedTemporaryFile()
file_name = file.name
assert validate_file_option(file_name, "dummy") is not None
def test_validate_file_option_raises_exception_for_non_existing_file():
file = NamedTemporaryFile()
file_name = file.name
file.close()
with pytest.raises(SchemaError):
validate_file_option(file_name, "dummy")
def test_validate_file_option_exception_message_contains_correct_info():
file = NamedTemporaryFile()
file_name = file.name
file.close()
msg = "dummy"
with pytest.raises(SchemaError) as exc_info:
validate_file_option(file_name, msg)
exc_msg = exc_info.value.message
assert msg in exc_msg
assert file_name in exc_msg
|
|
72fd9ecb9eb068d86762d31429f70061aa907a47
|
server/misc/twitter/tweets.py
|
server/misc/twitter/tweets.py
|
import json
import os
import pymongo
import tweepy
from libs.carmen import get_resolver
def search(query, pages):
_mongo = pymongo.MongoClient().minerva[query]
minerva_twitter_config = json.load(open(
os.path.join(os.path.dirname(__file__), "twitter.json")
))
auth = tweepy.OAuthHandler(minerva_twitter_config["twitter"]["CONSUMER_KEY"],
minerva_twitter_config["twitter"]["CONSUMER_SECRET"])
auth.set_access_token(minerva_twitter_config["twitter"]["ACCESS_KEY"],
minerva_twitter_config["twitter"]["ACCESS_SECRET"])
api = tweepy.API(auth)
resolver = get_resolver()
resolver.load_locations()
for pageInd, page in enumerate(tweepy.Cursor(api.search, q=query, count=100).pages(pages)):
for resultInd, result in enumerate(page):
location = resolver.resolve_tweet(result._json)
# only store those with geolocation
if location:
rec = {
"id": result.id_str,
"location": location[1].__dict__,
"text": result.text,
"created_at": result.created_at,
}
print(rec)
_mongo.insert(rec)
if __name__ == '__main__':
search('influenza', 10)
|
Add script for twitter search api
|
Add script for twitter search api
|
Python
|
apache-2.0
|
Kitware/minerva,Kitware/minerva,Kitware/minerva
|
Add script for twitter search api
|
import json
import os
import pymongo
import tweepy
from libs.carmen import get_resolver
def search(query, pages):
_mongo = pymongo.MongoClient().minerva[query]
minerva_twitter_config = json.load(open(
os.path.join(os.path.dirname(__file__), "twitter.json")
))
auth = tweepy.OAuthHandler(minerva_twitter_config["twitter"]["CONSUMER_KEY"],
minerva_twitter_config["twitter"]["CONSUMER_SECRET"])
auth.set_access_token(minerva_twitter_config["twitter"]["ACCESS_KEY"],
minerva_twitter_config["twitter"]["ACCESS_SECRET"])
api = tweepy.API(auth)
resolver = get_resolver()
resolver.load_locations()
for pageInd, page in enumerate(tweepy.Cursor(api.search, q=query, count=100).pages(pages)):
for resultInd, result in enumerate(page):
location = resolver.resolve_tweet(result._json)
# only store those with geolocation
if location:
rec = {
"id": result.id_str,
"location": location[1].__dict__,
"text": result.text,
"created_at": result.created_at,
}
print(rec)
_mongo.insert(rec)
if __name__ == '__main__':
search('influenza', 10)
|
<commit_before><commit_msg>Add script for twitter search api<commit_after>
|
import json
import os
import pymongo
import tweepy
from libs.carmen import get_resolver
def search(query, pages):
_mongo = pymongo.MongoClient().minerva[query]
minerva_twitter_config = json.load(open(
os.path.join(os.path.dirname(__file__), "twitter.json")
))
auth = tweepy.OAuthHandler(minerva_twitter_config["twitter"]["CONSUMER_KEY"],
minerva_twitter_config["twitter"]["CONSUMER_SECRET"])
auth.set_access_token(minerva_twitter_config["twitter"]["ACCESS_KEY"],
minerva_twitter_config["twitter"]["ACCESS_SECRET"])
api = tweepy.API(auth)
resolver = get_resolver()
resolver.load_locations()
for pageInd, page in enumerate(tweepy.Cursor(api.search, q=query, count=100).pages(pages)):
for resultInd, result in enumerate(page):
location = resolver.resolve_tweet(result._json)
# only store those with geolocation
if location:
rec = {
"id": result.id_str,
"location": location[1].__dict__,
"text": result.text,
"created_at": result.created_at,
}
print(rec)
_mongo.insert(rec)
if __name__ == '__main__':
search('influenza', 10)
|
Add script for twitter search apiimport json
import os
import pymongo
import tweepy
from libs.carmen import get_resolver
def search(query, pages):
_mongo = pymongo.MongoClient().minerva[query]
minerva_twitter_config = json.load(open(
os.path.join(os.path.dirname(__file__), "twitter.json")
))
auth = tweepy.OAuthHandler(minerva_twitter_config["twitter"]["CONSUMER_KEY"],
minerva_twitter_config["twitter"]["CONSUMER_SECRET"])
auth.set_access_token(minerva_twitter_config["twitter"]["ACCESS_KEY"],
minerva_twitter_config["twitter"]["ACCESS_SECRET"])
api = tweepy.API(auth)
resolver = get_resolver()
resolver.load_locations()
for pageInd, page in enumerate(tweepy.Cursor(api.search, q=query, count=100).pages(pages)):
for resultInd, result in enumerate(page):
location = resolver.resolve_tweet(result._json)
# only store those with geolocation
if location:
rec = {
"id": result.id_str,
"location": location[1].__dict__,
"text": result.text,
"created_at": result.created_at,
}
print(rec)
_mongo.insert(rec)
if __name__ == '__main__':
search('influenza', 10)
|
<commit_before><commit_msg>Add script for twitter search api<commit_after>import json
import os
import pymongo
import tweepy
from libs.carmen import get_resolver
def search(query, pages):
_mongo = pymongo.MongoClient().minerva[query]
minerva_twitter_config = json.load(open(
os.path.join(os.path.dirname(__file__), "twitter.json")
))
auth = tweepy.OAuthHandler(minerva_twitter_config["twitter"]["CONSUMER_KEY"],
minerva_twitter_config["twitter"]["CONSUMER_SECRET"])
auth.set_access_token(minerva_twitter_config["twitter"]["ACCESS_KEY"],
minerva_twitter_config["twitter"]["ACCESS_SECRET"])
api = tweepy.API(auth)
resolver = get_resolver()
resolver.load_locations()
for pageInd, page in enumerate(tweepy.Cursor(api.search, q=query, count=100).pages(pages)):
for resultInd, result in enumerate(page):
location = resolver.resolve_tweet(result._json)
# only store those with geolocation
if location:
rec = {
"id": result.id_str,
"location": location[1].__dict__,
"text": result.text,
"created_at": result.created_at,
}
print(rec)
_mongo.insert(rec)
if __name__ == '__main__':
search('influenza', 10)
|
|
a64b25499a9421a5e8dc371ab4cd540adcaaf38c
|
test/test_argument_parsing.py
|
test/test_argument_parsing.py
|
import pytest
from ros_get.__main__ import parse_args
def test_parse_no_command(capsys):
"""It should print an error message when no command is provided"""
with pytest.raises(SystemExit) as pytest_wrapped_e:
parse_args([])
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code != 0
captured = capsys.readouterr()
assert captured.out == ''
assert 'the following arguments are required' in captured.err or 'too few arguments' in captured.err
def test_parse_status():
func, args = parse_args(['status'])
assert func == 'status'
assert not args.verbose
def test_parse_status_verbose():
func, args = parse_args(['--verbose', 'status'])
assert func == 'status'
assert args.verbose
|
Add test for running ros-get without arguments
|
Add test for running ros-get without arguments
|
Python
|
mit
|
Rayman/ros-get,Rayman/ros-get
|
Add test for running ros-get without arguments
|
import pytest
from ros_get.__main__ import parse_args
def test_parse_no_command(capsys):
"""It should print an error message when no command is provided"""
with pytest.raises(SystemExit) as pytest_wrapped_e:
parse_args([])
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code != 0
captured = capsys.readouterr()
assert captured.out == ''
assert 'the following arguments are required' in captured.err or 'too few arguments' in captured.err
def test_parse_status():
func, args = parse_args(['status'])
assert func == 'status'
assert not args.verbose
def test_parse_status_verbose():
func, args = parse_args(['--verbose', 'status'])
assert func == 'status'
assert args.verbose
|
<commit_before><commit_msg>Add test for running ros-get without arguments<commit_after>
|
import pytest
from ros_get.__main__ import parse_args
def test_parse_no_command(capsys):
"""It should print an error message when no command is provided"""
with pytest.raises(SystemExit) as pytest_wrapped_e:
parse_args([])
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code != 0
captured = capsys.readouterr()
assert captured.out == ''
assert 'the following arguments are required' in captured.err or 'too few arguments' in captured.err
def test_parse_status():
func, args = parse_args(['status'])
assert func == 'status'
assert not args.verbose
def test_parse_status_verbose():
func, args = parse_args(['--verbose', 'status'])
assert func == 'status'
assert args.verbose
|
Add test for running ros-get without argumentsimport pytest
from ros_get.__main__ import parse_args
def test_parse_no_command(capsys):
"""It should print an error message when no command is provided"""
with pytest.raises(SystemExit) as pytest_wrapped_e:
parse_args([])
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code != 0
captured = capsys.readouterr()
assert captured.out == ''
assert 'the following arguments are required' in captured.err or 'too few arguments' in captured.err
def test_parse_status():
func, args = parse_args(['status'])
assert func == 'status'
assert not args.verbose
def test_parse_status_verbose():
func, args = parse_args(['--verbose', 'status'])
assert func == 'status'
assert args.verbose
|
<commit_before><commit_msg>Add test for running ros-get without arguments<commit_after>import pytest
from ros_get.__main__ import parse_args
def test_parse_no_command(capsys):
"""It should print an error message when no command is provided"""
with pytest.raises(SystemExit) as pytest_wrapped_e:
parse_args([])
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code != 0
captured = capsys.readouterr()
assert captured.out == ''
assert 'the following arguments are required' in captured.err or 'too few arguments' in captured.err
def test_parse_status():
func, args = parse_args(['status'])
assert func == 'status'
assert not args.verbose
def test_parse_status_verbose():
func, args = parse_args(['--verbose', 'status'])
assert func == 'status'
assert args.verbose
|
|
014788b5b78132f942b2411f33518ff56a1f0132
|
tests/test_simple_features.py
|
tests/test_simple_features.py
|
import wordgraph
EPOCH_START = 1407109280
def test_monotonic_up_per_second():
datapoints = []
for i in range(10):
datapoints.append((float(i), EPOCH_START + i))
features = wordgraph.name_things(datapoints)
assert "" in features
|
Test case for mononotic increasing graph
|
Test case for mononotic increasing graph
Verify feature description for simple linear graph increasing
monotonically with time.
|
Python
|
apache-2.0
|
tleeuwenburg/wordgraph,tleeuwenburg/wordgraph
|
Test case for mononotic increasing graph
Verify feature description for simple linear graph increasing
monotonically with time.
|
import wordgraph
EPOCH_START = 1407109280
def test_monotonic_up_per_second():
datapoints = []
for i in range(10):
datapoints.append((float(i), EPOCH_START + i))
features = wordgraph.name_things(datapoints)
assert "" in features
|
<commit_before><commit_msg>Test case for mononotic increasing graph
Verify feature description for simple linear graph increasing
monotonically with time.<commit_after>
|
import wordgraph
EPOCH_START = 1407109280
def test_monotonic_up_per_second():
datapoints = []
for i in range(10):
datapoints.append((float(i), EPOCH_START + i))
features = wordgraph.name_things(datapoints)
assert "" in features
|
Test case for mononotic increasing graph
Verify feature description for simple linear graph increasing
monotonically with time.import wordgraph
EPOCH_START = 1407109280
def test_monotonic_up_per_second():
datapoints = []
for i in range(10):
datapoints.append((float(i), EPOCH_START + i))
features = wordgraph.name_things(datapoints)
assert "" in features
|
<commit_before><commit_msg>Test case for mononotic increasing graph
Verify feature description for simple linear graph increasing
monotonically with time.<commit_after>import wordgraph
EPOCH_START = 1407109280
def test_monotonic_up_per_second():
datapoints = []
for i in range(10):
datapoints.append((float(i), EPOCH_START + i))
features = wordgraph.name_things(datapoints)
assert "" in features
|
|
bd7ee6e783f1e6bad6a277f4f63438b995d4a3b1
|
tests/unit/test_expression.py
|
tests/unit/test_expression.py
|
import unittest
from stencil import Template
class ExpressionTests(unittest.TestCase):
def test_function(self):
t = Template('Test {{f("abc")}}').render({"f": lambda x: x.title() })
self.assertEqual(t, 'Test Abc')
def test_function_argument(self):
def double(a):
return a * 2
t = Template('Test {{double(double(1))}}').render({"double": double})
self.assertEqual(t, 'Test 4')
|
Add tests for latest fix
|
Add tests for latest fix
|
Python
|
mit
|
funkybob/stencil,funkybob/stencil
|
Add tests for latest fix
|
import unittest
from stencil import Template
class ExpressionTests(unittest.TestCase):
def test_function(self):
t = Template('Test {{f("abc")}}').render({"f": lambda x: x.title() })
self.assertEqual(t, 'Test Abc')
def test_function_argument(self):
def double(a):
return a * 2
t = Template('Test {{double(double(1))}}').render({"double": double})
self.assertEqual(t, 'Test 4')
|
<commit_before><commit_msg>Add tests for latest fix<commit_after>
|
import unittest
from stencil import Template
class ExpressionTests(unittest.TestCase):
def test_function(self):
t = Template('Test {{f("abc")}}').render({"f": lambda x: x.title() })
self.assertEqual(t, 'Test Abc')
def test_function_argument(self):
def double(a):
return a * 2
t = Template('Test {{double(double(1))}}').render({"double": double})
self.assertEqual(t, 'Test 4')
|
Add tests for latest fiximport unittest
from stencil import Template
class ExpressionTests(unittest.TestCase):
def test_function(self):
t = Template('Test {{f("abc")}}').render({"f": lambda x: x.title() })
self.assertEqual(t, 'Test Abc')
def test_function_argument(self):
def double(a):
return a * 2
t = Template('Test {{double(double(1))}}').render({"double": double})
self.assertEqual(t, 'Test 4')
|
<commit_before><commit_msg>Add tests for latest fix<commit_after>import unittest
from stencil import Template
class ExpressionTests(unittest.TestCase):
def test_function(self):
t = Template('Test {{f("abc")}}').render({"f": lambda x: x.title() })
self.assertEqual(t, 'Test Abc')
def test_function_argument(self):
def double(a):
return a * 2
t = Template('Test {{double(double(1))}}').render({"double": double})
self.assertEqual(t, 'Test 4')
|
|
e74177a0c668e2b4016b5b58780f92d63f29a222
|
django_afip/migrations/0034_vat_condition_choices.py
|
django_afip/migrations/0034_vat_condition_choices.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-30 01:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('afip', '0033_receiptpdf__pdf_file__help_text'),
]
operations = [
migrations.AlterField(
model_name='receiptpdf',
name='client_vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Sujeto Exento', 'IVA Sujeto Exento'), ('Consumidor Final', 'Consumidor Final'), ('Responsable Monotributo', 'Responsable Monotributo'), ('Proveedor del Exterior', 'Proveedor del Exterior'), ('Cliente del Exterior', 'Cliente del Exterior'), ('IVA Liberado - Ley Nº 19.640', 'IVA Liberado - Ley Nº 19.640'), ('IVA Responsable Inscripto - Agente de Percepción', 'IVA Responsable Inscripto - Agente de Percepción'), ('Monotributista Social', 'Monotributista Social'), ('IVA no alcanzado', 'IVA no alcanzado')], max_length=48, verbose_name='client vat condition'),
),
migrations.AlterField(
model_name='receiptpdf',
name='vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Exento', 'IVA Exento'), ('No Responsable IVA', 'No Responsable IVA'), ('Responsable Monotributo', 'Responsable Monotributo')], max_length=48, verbose_name='vat condition'),
),
migrations.AlterField(
model_name='taxpayerprofile',
name='vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Exento', 'IVA Exento'), ('No Responsable IVA', 'No Responsable IVA'), ('Responsable Monotributo', 'Responsable Monotributo')], max_length=48, verbose_name='vat condition'),
),
]
|
Add updated migrations for vat_conditions
|
Add updated migrations for vat_conditions
|
Python
|
isc
|
hobarrera/django-afip,hobarrera/django-afip
|
Add updated migrations for vat_conditions
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-30 01:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('afip', '0033_receiptpdf__pdf_file__help_text'),
]
operations = [
migrations.AlterField(
model_name='receiptpdf',
name='client_vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Sujeto Exento', 'IVA Sujeto Exento'), ('Consumidor Final', 'Consumidor Final'), ('Responsable Monotributo', 'Responsable Monotributo'), ('Proveedor del Exterior', 'Proveedor del Exterior'), ('Cliente del Exterior', 'Cliente del Exterior'), ('IVA Liberado - Ley Nº 19.640', 'IVA Liberado - Ley Nº 19.640'), ('IVA Responsable Inscripto - Agente de Percepción', 'IVA Responsable Inscripto - Agente de Percepción'), ('Monotributista Social', 'Monotributista Social'), ('IVA no alcanzado', 'IVA no alcanzado')], max_length=48, verbose_name='client vat condition'),
),
migrations.AlterField(
model_name='receiptpdf',
name='vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Exento', 'IVA Exento'), ('No Responsable IVA', 'No Responsable IVA'), ('Responsable Monotributo', 'Responsable Monotributo')], max_length=48, verbose_name='vat condition'),
),
migrations.AlterField(
model_name='taxpayerprofile',
name='vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Exento', 'IVA Exento'), ('No Responsable IVA', 'No Responsable IVA'), ('Responsable Monotributo', 'Responsable Monotributo')], max_length=48, verbose_name='vat condition'),
),
]
|
<commit_before><commit_msg>Add updated migrations for vat_conditions<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-30 01:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('afip', '0033_receiptpdf__pdf_file__help_text'),
]
operations = [
migrations.AlterField(
model_name='receiptpdf',
name='client_vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Sujeto Exento', 'IVA Sujeto Exento'), ('Consumidor Final', 'Consumidor Final'), ('Responsable Monotributo', 'Responsable Monotributo'), ('Proveedor del Exterior', 'Proveedor del Exterior'), ('Cliente del Exterior', 'Cliente del Exterior'), ('IVA Liberado - Ley Nº 19.640', 'IVA Liberado - Ley Nº 19.640'), ('IVA Responsable Inscripto - Agente de Percepción', 'IVA Responsable Inscripto - Agente de Percepción'), ('Monotributista Social', 'Monotributista Social'), ('IVA no alcanzado', 'IVA no alcanzado')], max_length=48, verbose_name='client vat condition'),
),
migrations.AlterField(
model_name='receiptpdf',
name='vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Exento', 'IVA Exento'), ('No Responsable IVA', 'No Responsable IVA'), ('Responsable Monotributo', 'Responsable Monotributo')], max_length=48, verbose_name='vat condition'),
),
migrations.AlterField(
model_name='taxpayerprofile',
name='vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Exento', 'IVA Exento'), ('No Responsable IVA', 'No Responsable IVA'), ('Responsable Monotributo', 'Responsable Monotributo')], max_length=48, verbose_name='vat condition'),
),
]
|
Add updated migrations for vat_conditions# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-30 01:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('afip', '0033_receiptpdf__pdf_file__help_text'),
]
operations = [
migrations.AlterField(
model_name='receiptpdf',
name='client_vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Sujeto Exento', 'IVA Sujeto Exento'), ('Consumidor Final', 'Consumidor Final'), ('Responsable Monotributo', 'Responsable Monotributo'), ('Proveedor del Exterior', 'Proveedor del Exterior'), ('Cliente del Exterior', 'Cliente del Exterior'), ('IVA Liberado - Ley Nº 19.640', 'IVA Liberado - Ley Nº 19.640'), ('IVA Responsable Inscripto - Agente de Percepción', 'IVA Responsable Inscripto - Agente de Percepción'), ('Monotributista Social', 'Monotributista Social'), ('IVA no alcanzado', 'IVA no alcanzado')], max_length=48, verbose_name='client vat condition'),
),
migrations.AlterField(
model_name='receiptpdf',
name='vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Exento', 'IVA Exento'), ('No Responsable IVA', 'No Responsable IVA'), ('Responsable Monotributo', 'Responsable Monotributo')], max_length=48, verbose_name='vat condition'),
),
migrations.AlterField(
model_name='taxpayerprofile',
name='vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Exento', 'IVA Exento'), ('No Responsable IVA', 'No Responsable IVA'), ('Responsable Monotributo', 'Responsable Monotributo')], max_length=48, verbose_name='vat condition'),
),
]
|
<commit_before><commit_msg>Add updated migrations for vat_conditions<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-30 01:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('afip', '0033_receiptpdf__pdf_file__help_text'),
]
operations = [
migrations.AlterField(
model_name='receiptpdf',
name='client_vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Sujeto Exento', 'IVA Sujeto Exento'), ('Consumidor Final', 'Consumidor Final'), ('Responsable Monotributo', 'Responsable Monotributo'), ('Proveedor del Exterior', 'Proveedor del Exterior'), ('Cliente del Exterior', 'Cliente del Exterior'), ('IVA Liberado - Ley Nº 19.640', 'IVA Liberado - Ley Nº 19.640'), ('IVA Responsable Inscripto - Agente de Percepción', 'IVA Responsable Inscripto - Agente de Percepción'), ('Monotributista Social', 'Monotributista Social'), ('IVA no alcanzado', 'IVA no alcanzado')], max_length=48, verbose_name='client vat condition'),
),
migrations.AlterField(
model_name='receiptpdf',
name='vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Exento', 'IVA Exento'), ('No Responsable IVA', 'No Responsable IVA'), ('Responsable Monotributo', 'Responsable Monotributo')], max_length=48, verbose_name='vat condition'),
),
migrations.AlterField(
model_name='taxpayerprofile',
name='vat_condition',
field=models.CharField(choices=[('IVA Responsable Inscripto', 'IVA Responsable Inscripto'), ('IVA Responsable No Inscripto', 'IVA Responsable No Inscripto'), ('IVA Exento', 'IVA Exento'), ('No Responsable IVA', 'No Responsable IVA'), ('Responsable Monotributo', 'Responsable Monotributo')], max_length=48, verbose_name='vat condition'),
),
]
|
|
8032fd5bf99b7c235e75617b45c77e38dcba4ec7
|
core/migrations/0023_alter_homepage_featured_section_integer_block.py
|
core/migrations/0023_alter_homepage_featured_section_integer_block.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
import wagtail.wagtailcore.blocks
import wagtail.wagtailimages.blocks
import wagtail.wagtailimages.models
class Migration(migrations.Migration):
dependencies = [
('core', '0022_remove_filter_field_for_wagtail_1_9_upgrade'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='featured_content',
field=wagtail.wagtailcore.fields.StreamField([('featured_section', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'link_text', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'url', wagtail.wagtailcore.blocks.CharBlock()), (b'position_from_left', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Value in percentage (Max: 75)', max_value=75, default=9, min_value=0, required=True)), (b'position_from_top', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Value in percentage (Max: 40)', max_value=40, default=30, min_value=0, required=True)), (b'featured_image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'featured_image_label', wagtail.wagtailcore.blocks.CharBlock(required=False))]))], null=True, blank=True),
),
]
|
Add migration for homepage featured-section integer_block
|
Add migration for homepage featured-section integer_block
|
Python
|
bsd-3-clause
|
PARINetwork/pari,PARINetwork/pari,PARINetwork/pari,PARINetwork/pari
|
Add migration for homepage featured-section integer_block
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
import wagtail.wagtailcore.blocks
import wagtail.wagtailimages.blocks
import wagtail.wagtailimages.models
class Migration(migrations.Migration):
dependencies = [
('core', '0022_remove_filter_field_for_wagtail_1_9_upgrade'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='featured_content',
field=wagtail.wagtailcore.fields.StreamField([('featured_section', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'link_text', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'url', wagtail.wagtailcore.blocks.CharBlock()), (b'position_from_left', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Value in percentage (Max: 75)', max_value=75, default=9, min_value=0, required=True)), (b'position_from_top', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Value in percentage (Max: 40)', max_value=40, default=30, min_value=0, required=True)), (b'featured_image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'featured_image_label', wagtail.wagtailcore.blocks.CharBlock(required=False))]))], null=True, blank=True),
),
]
|
<commit_before><commit_msg>Add migration for homepage featured-section integer_block<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
import wagtail.wagtailcore.blocks
import wagtail.wagtailimages.blocks
import wagtail.wagtailimages.models
class Migration(migrations.Migration):
dependencies = [
('core', '0022_remove_filter_field_for_wagtail_1_9_upgrade'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='featured_content',
field=wagtail.wagtailcore.fields.StreamField([('featured_section', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'link_text', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'url', wagtail.wagtailcore.blocks.CharBlock()), (b'position_from_left', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Value in percentage (Max: 75)', max_value=75, default=9, min_value=0, required=True)), (b'position_from_top', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Value in percentage (Max: 40)', max_value=40, default=30, min_value=0, required=True)), (b'featured_image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'featured_image_label', wagtail.wagtailcore.blocks.CharBlock(required=False))]))], null=True, blank=True),
),
]
|
Add migration for homepage featured-section integer_block# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
import wagtail.wagtailcore.blocks
import wagtail.wagtailimages.blocks
import wagtail.wagtailimages.models
class Migration(migrations.Migration):
dependencies = [
('core', '0022_remove_filter_field_for_wagtail_1_9_upgrade'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='featured_content',
field=wagtail.wagtailcore.fields.StreamField([('featured_section', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'link_text', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'url', wagtail.wagtailcore.blocks.CharBlock()), (b'position_from_left', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Value in percentage (Max: 75)', max_value=75, default=9, min_value=0, required=True)), (b'position_from_top', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Value in percentage (Max: 40)', max_value=40, default=30, min_value=0, required=True)), (b'featured_image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'featured_image_label', wagtail.wagtailcore.blocks.CharBlock(required=False))]))], null=True, blank=True),
),
]
|
<commit_before><commit_msg>Add migration for homepage featured-section integer_block<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
import wagtail.wagtailcore.blocks
import wagtail.wagtailimages.blocks
import wagtail.wagtailimages.models
class Migration(migrations.Migration):
dependencies = [
('core', '0022_remove_filter_field_for_wagtail_1_9_upgrade'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='featured_content',
field=wagtail.wagtailcore.fields.StreamField([('featured_section', wagtail.wagtailcore.blocks.StructBlock([(b'title', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'link_text', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'url', wagtail.wagtailcore.blocks.CharBlock()), (b'position_from_left', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Value in percentage (Max: 75)', max_value=75, default=9, min_value=0, required=True)), (b'position_from_top', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Value in percentage (Max: 40)', max_value=40, default=30, min_value=0, required=True)), (b'featured_image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'featured_image_label', wagtail.wagtailcore.blocks.CharBlock(required=False))]))], null=True, blank=True),
),
]
|
|
e80a92ad6e77d1fcf38c8830c29399099001c541
|
sdks/python/apache_beam/ml/gcp/visionml_test_it.py
|
sdks/python/apache_beam/ml/gcp/visionml_test_it.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.ml.gcp.visionml import AnnotateImage
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
try:
from google.cloud.vision import types
except ImportError:
types = None
def extract(response):
for r in response.responses:
for text_annotation in r.text_annotations:
yield text_annotation.description
@attr('IT')
class VisionMlTestIT(unittest.TestCase):
IMAGES_TO_ANNOTATE = ['gs://cloud-samples-data/vision/ocr/sign.jpg']
IMAGE_CONTEXT = [types.ImageContext(language_hints=['en'])]
def test_text_detection_with_language_hint(self):
with TestPipeline(is_integration_test=True) as p:
contexts = p | 'Create context' >> beam.Create(
dict(zip(self.IMAGES_TO_ANNOTATE, self.IMAGE_CONTEXT)))
output = (
p
| beam.Create(self.IMAGES_TO_ANNOTATE)
| AnnotateImage(
features=[types.Feature(type='TEXT_DETECTION')],
context_side_input=beam.pvalue.AsDict(contexts))
| beam.ParDo(extract))
assert_that(
output,
equal_to([
'WAITING?\nPLEASE\nTURN OFF\nYOUR\nENGINE',
'WAITING?',
'PLEASE',
'TURN',
'OFF',
'YOUR',
'ENGINE'
]))
if __name__ == '__main__':
unittest.main()
|
Add integration test for AnnotateImage transform
|
Add integration test for AnnotateImage transform
|
Python
|
apache-2.0
|
robertwb/incubator-beam,apache/beam,apache/beam,robertwb/incubator-beam,chamikaramj/beam,iemejia/incubator-beam,robertwb/incubator-beam,apache/beam,chamikaramj/beam,chamikaramj/beam,lukecwik/incubator-beam,chamikaramj/beam,robertwb/incubator-beam,chamikaramj/beam,robertwb/incubator-beam,chamikaramj/beam,apache/beam,lukecwik/incubator-beam,lukecwik/incubator-beam,chamikaramj/beam,robertwb/incubator-beam,chamikaramj/beam,apache/beam,robertwb/incubator-beam,lukecwik/incubator-beam,apache/beam,lukecwik/incubator-beam,lukecwik/incubator-beam,robertwb/incubator-beam,robertwb/incubator-beam,iemejia/incubator-beam,apache/beam,lukecwik/incubator-beam,lukecwik/incubator-beam,chamikaramj/beam,apache/beam,apache/beam,lukecwik/incubator-beam,apache/beam,robertwb/incubator-beam,chamikaramj/beam,apache/beam,lukecwik/incubator-beam
|
Add integration test for AnnotateImage transform
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.ml.gcp.visionml import AnnotateImage
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
try:
from google.cloud.vision import types
except ImportError:
types = None
def extract(response):
for r in response.responses:
for text_annotation in r.text_annotations:
yield text_annotation.description
@attr('IT')
class VisionMlTestIT(unittest.TestCase):
IMAGES_TO_ANNOTATE = ['gs://cloud-samples-data/vision/ocr/sign.jpg']
IMAGE_CONTEXT = [types.ImageContext(language_hints=['en'])]
def test_text_detection_with_language_hint(self):
with TestPipeline(is_integration_test=True) as p:
contexts = p | 'Create context' >> beam.Create(
dict(zip(self.IMAGES_TO_ANNOTATE, self.IMAGE_CONTEXT)))
output = (
p
| beam.Create(self.IMAGES_TO_ANNOTATE)
| AnnotateImage(
features=[types.Feature(type='TEXT_DETECTION')],
context_side_input=beam.pvalue.AsDict(contexts))
| beam.ParDo(extract))
assert_that(
output,
equal_to([
'WAITING?\nPLEASE\nTURN OFF\nYOUR\nENGINE',
'WAITING?',
'PLEASE',
'TURN',
'OFF',
'YOUR',
'ENGINE'
]))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add integration test for AnnotateImage transform<commit_after>
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.ml.gcp.visionml import AnnotateImage
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
try:
from google.cloud.vision import types
except ImportError:
types = None
def extract(response):
for r in response.responses:
for text_annotation in r.text_annotations:
yield text_annotation.description
@attr('IT')
class VisionMlTestIT(unittest.TestCase):
IMAGES_TO_ANNOTATE = ['gs://cloud-samples-data/vision/ocr/sign.jpg']
IMAGE_CONTEXT = [types.ImageContext(language_hints=['en'])]
def test_text_detection_with_language_hint(self):
with TestPipeline(is_integration_test=True) as p:
contexts = p | 'Create context' >> beam.Create(
dict(zip(self.IMAGES_TO_ANNOTATE, self.IMAGE_CONTEXT)))
output = (
p
| beam.Create(self.IMAGES_TO_ANNOTATE)
| AnnotateImage(
features=[types.Feature(type='TEXT_DETECTION')],
context_side_input=beam.pvalue.AsDict(contexts))
| beam.ParDo(extract))
assert_that(
output,
equal_to([
'WAITING?\nPLEASE\nTURN OFF\nYOUR\nENGINE',
'WAITING?',
'PLEASE',
'TURN',
'OFF',
'YOUR',
'ENGINE'
]))
if __name__ == '__main__':
unittest.main()
|
Add integration test for AnnotateImage transform#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.ml.gcp.visionml import AnnotateImage
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
try:
from google.cloud.vision import types
except ImportError:
types = None
def extract(response):
for r in response.responses:
for text_annotation in r.text_annotations:
yield text_annotation.description
@attr('IT')
class VisionMlTestIT(unittest.TestCase):
IMAGES_TO_ANNOTATE = ['gs://cloud-samples-data/vision/ocr/sign.jpg']
IMAGE_CONTEXT = [types.ImageContext(language_hints=['en'])]
def test_text_detection_with_language_hint(self):
with TestPipeline(is_integration_test=True) as p:
contexts = p | 'Create context' >> beam.Create(
dict(zip(self.IMAGES_TO_ANNOTATE, self.IMAGE_CONTEXT)))
output = (
p
| beam.Create(self.IMAGES_TO_ANNOTATE)
| AnnotateImage(
features=[types.Feature(type='TEXT_DETECTION')],
context_side_input=beam.pvalue.AsDict(contexts))
| beam.ParDo(extract))
assert_that(
output,
equal_to([
'WAITING?\nPLEASE\nTURN OFF\nYOUR\nENGINE',
'WAITING?',
'PLEASE',
'TURN',
'OFF',
'YOUR',
'ENGINE'
]))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add integration test for AnnotateImage transform<commit_after>#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.ml.gcp.visionml import AnnotateImage
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
try:
from google.cloud.vision import types
except ImportError:
types = None
def extract(response):
for r in response.responses:
for text_annotation in r.text_annotations:
yield text_annotation.description
@attr('IT')
class VisionMlTestIT(unittest.TestCase):
IMAGES_TO_ANNOTATE = ['gs://cloud-samples-data/vision/ocr/sign.jpg']
IMAGE_CONTEXT = [types.ImageContext(language_hints=['en'])]
def test_text_detection_with_language_hint(self):
with TestPipeline(is_integration_test=True) as p:
contexts = p | 'Create context' >> beam.Create(
dict(zip(self.IMAGES_TO_ANNOTATE, self.IMAGE_CONTEXT)))
output = (
p
| beam.Create(self.IMAGES_TO_ANNOTATE)
| AnnotateImage(
features=[types.Feature(type='TEXT_DETECTION')],
context_side_input=beam.pvalue.AsDict(contexts))
| beam.ParDo(extract))
assert_that(
output,
equal_to([
'WAITING?\nPLEASE\nTURN OFF\nYOUR\nENGINE',
'WAITING?',
'PLEASE',
'TURN',
'OFF',
'YOUR',
'ENGINE'
]))
if __name__ == '__main__':
unittest.main()
|
|
7130a440b422ac3edeaafac5885be3d2a38c4dfc
|
sgt/accounts/migrations/0003_auto_20160507_1902.py
|
sgt/accounts/migrations/0003_auto_20160507_1902.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-07 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150629_1908'),
]
operations = [
migrations.AlterField(
model_name='userdbv',
name='position',
field=models.CharField(choices=[('CSO', 'Conselheiro(a)'), ('CPT', 'Capit(ã)o'), ('SCT', 'Secretário(a)'), ('TSR', 'Tesoureiro(a)'), ('INS', 'Instrutor(a)'), ('DRT', 'Diretor(a)'), ('DRT_AS', 'Diretor(a) Associado(a)'), ('CPL', 'Capel(ã)o'), ('DBV', 'Desbravador(a)')], max_length=100),
),
]
|
Create migration file for accounts app
|
Create migration file for accounts app
|
Python
|
mit
|
mazulo/SGT,mazulo/SGT
|
Create migration file for accounts app
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-07 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150629_1908'),
]
operations = [
migrations.AlterField(
model_name='userdbv',
name='position',
field=models.CharField(choices=[('CSO', 'Conselheiro(a)'), ('CPT', 'Capit(ã)o'), ('SCT', 'Secretário(a)'), ('TSR', 'Tesoureiro(a)'), ('INS', 'Instrutor(a)'), ('DRT', 'Diretor(a)'), ('DRT_AS', 'Diretor(a) Associado(a)'), ('CPL', 'Capel(ã)o'), ('DBV', 'Desbravador(a)')], max_length=100),
),
]
|
<commit_before><commit_msg>Create migration file for accounts app<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-07 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150629_1908'),
]
operations = [
migrations.AlterField(
model_name='userdbv',
name='position',
field=models.CharField(choices=[('CSO', 'Conselheiro(a)'), ('CPT', 'Capit(ã)o'), ('SCT', 'Secretário(a)'), ('TSR', 'Tesoureiro(a)'), ('INS', 'Instrutor(a)'), ('DRT', 'Diretor(a)'), ('DRT_AS', 'Diretor(a) Associado(a)'), ('CPL', 'Capel(ã)o'), ('DBV', 'Desbravador(a)')], max_length=100),
),
]
|
Create migration file for accounts app# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-07 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150629_1908'),
]
operations = [
migrations.AlterField(
model_name='userdbv',
name='position',
field=models.CharField(choices=[('CSO', 'Conselheiro(a)'), ('CPT', 'Capit(ã)o'), ('SCT', 'Secretário(a)'), ('TSR', 'Tesoureiro(a)'), ('INS', 'Instrutor(a)'), ('DRT', 'Diretor(a)'), ('DRT_AS', 'Diretor(a) Associado(a)'), ('CPL', 'Capel(ã)o'), ('DBV', 'Desbravador(a)')], max_length=100),
),
]
|
<commit_before><commit_msg>Create migration file for accounts app<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-07 19:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20150629_1908'),
]
operations = [
migrations.AlterField(
model_name='userdbv',
name='position',
field=models.CharField(choices=[('CSO', 'Conselheiro(a)'), ('CPT', 'Capit(ã)o'), ('SCT', 'Secretário(a)'), ('TSR', 'Tesoureiro(a)'), ('INS', 'Instrutor(a)'), ('DRT', 'Diretor(a)'), ('DRT_AS', 'Diretor(a) Associado(a)'), ('CPL', 'Capel(ã)o'), ('DBV', 'Desbravador(a)')], max_length=100),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.