commit
stringlengths
40
40
old_file
stringlengths
4
118
new_file
stringlengths
4
118
old_contents
stringlengths
0
2.94k
new_contents
stringlengths
1
4.43k
subject
stringlengths
15
444
message
stringlengths
16
3.45k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
5
43.2k
prompt
stringlengths
17
4.58k
response
stringlengths
1
4.43k
prompt_tagged
stringlengths
58
4.62k
response_tagged
stringlengths
1
4.43k
text
stringlengths
132
7.29k
text_tagged
stringlengths
173
7.33k
f622255dc2c6695b785213c8d69cb57ae5d8a5e9
waftools/pebble_sdk_version.py
waftools/pebble_sdk_version.py
from waflib.Configure import conf @conf def compare_sdk_version(ctx, platform, version): target_env = ctx.all_envs[platform] if platform in ctx.all_envs else ctx.env target_version = (int(target_env.SDK_VERSION_MAJOR or 0x5) * 0xff + int(target_env.SDK_VERSION_MINOR or 0x19)) other_version = int(version[0]) * 0xff + int(version[1]) diff_version = target_version - other_version return 0 if diff_version == 0 else diff_version / abs(diff_version) @conf def supports_bitmap_resource(ctx): return (ctx.compare_sdk_version('aplite', [0x5, 0x48]) >= 0)
Add pebble sdk version for detecting sdk features
Add pebble sdk version for detecting sdk features
Python
mit
jiangege/pebblejs-project,youtux/PebbleShows,bkbilly/Tvheadend-EPG,bkbilly/Tvheadend-EPG,pebble/pebblejs,fletchto99/pebblejs,sunshineyyy/CatchOneBus,fletchto99/pebblejs,pebble/pebblejs,carlo-colombo/dublin-bus-pebble,youtux/PebbleShows,jsfi/pebblejs,daduke/LMSController,jiangege/pebblejs-project,sunshineyyy/CatchOneBus,carlo-colombo/dublin-bus-pebble,bkbilly/Tvheadend-EPG,carlo-colombo/dublin-bus-pebble,pebble/pebblejs,fletchto99/pebblejs,jsfi/pebblejs,sunshineyyy/CatchOneBus,sunshineyyy/CatchOneBus,youtux/PebbleShows,jiangege/pebblejs-project,daduke/LMSController,daduke/LMSController,sunshineyyy/CatchOneBus,pebble/pebblejs,youtux/PebbleShows,fletchto99/pebblejs,daduke/LMSController,bkbilly/Tvheadend-EPG,bkbilly/Tvheadend-EPG,jsfi/pebblejs,jiangege/pebblejs-project,jsfi/pebblejs,carlo-colombo/dublin-bus-pebble,daduke/LMSController,jiangege/pebblejs-project,pebble/pebblejs,carlo-colombo/dublin-bus-pebble,fletchto99/pebblejs,jsfi/pebblejs
Add pebble sdk version for detecting sdk features
from waflib.Configure import conf @conf def compare_sdk_version(ctx, platform, version): target_env = ctx.all_envs[platform] if platform in ctx.all_envs else ctx.env target_version = (int(target_env.SDK_VERSION_MAJOR or 0x5) * 0xff + int(target_env.SDK_VERSION_MINOR or 0x19)) other_version = int(version[0]) * 0xff + int(version[1]) diff_version = target_version - other_version return 0 if diff_version == 0 else diff_version / abs(diff_version) @conf def supports_bitmap_resource(ctx): return (ctx.compare_sdk_version('aplite', [0x5, 0x48]) >= 0)
<commit_before><commit_msg>Add pebble sdk version for detecting sdk features<commit_after>
from waflib.Configure import conf @conf def compare_sdk_version(ctx, platform, version): target_env = ctx.all_envs[platform] if platform in ctx.all_envs else ctx.env target_version = (int(target_env.SDK_VERSION_MAJOR or 0x5) * 0xff + int(target_env.SDK_VERSION_MINOR or 0x19)) other_version = int(version[0]) * 0xff + int(version[1]) diff_version = target_version - other_version return 0 if diff_version == 0 else diff_version / abs(diff_version) @conf def supports_bitmap_resource(ctx): return (ctx.compare_sdk_version('aplite', [0x5, 0x48]) >= 0)
Add pebble sdk version for detecting sdk featuresfrom waflib.Configure import conf @conf def compare_sdk_version(ctx, platform, version): target_env = ctx.all_envs[platform] if platform in ctx.all_envs else ctx.env target_version = (int(target_env.SDK_VERSION_MAJOR or 0x5) * 0xff + int(target_env.SDK_VERSION_MINOR or 0x19)) other_version = int(version[0]) * 0xff + int(version[1]) diff_version = target_version - other_version return 0 if diff_version == 0 else diff_version / abs(diff_version) @conf def supports_bitmap_resource(ctx): return (ctx.compare_sdk_version('aplite', [0x5, 0x48]) >= 0)
<commit_before><commit_msg>Add pebble sdk version for detecting sdk features<commit_after>from waflib.Configure import conf @conf def compare_sdk_version(ctx, platform, version): target_env = ctx.all_envs[platform] if platform in ctx.all_envs else ctx.env target_version = (int(target_env.SDK_VERSION_MAJOR or 0x5) * 0xff + int(target_env.SDK_VERSION_MINOR or 0x19)) other_version = int(version[0]) * 0xff + int(version[1]) diff_version = target_version - other_version return 0 if diff_version == 0 else diff_version / abs(diff_version) @conf def supports_bitmap_resource(ctx): return (ctx.compare_sdk_version('aplite', [0x5, 0x48]) >= 0)
0fa30986e1f97331f96444e0b3b0f86cbe20c68a
shadho/backend/json/tests/test_db.py
shadho/backend/json/tests/test_db.py
import pytest from shadho.backend.base.tests.test_db import TestBaseBackend from shadho.backend.json.db import JsonBackend import json import os import shutil class TestJsonBackend(object): def test_init(self): """Ensure that initialization sets up the db and filepath.""" # Test default initialization b = JsonBackend() assert b.path == os.path.join(os.getcwd(), 'shadho.json') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 10 assert b.update_frequency == 10 # Test custom initialization b = JsonBackend(path='foo.bar', commit_frequency=42, update_frequency=42) assert b.path == os.path.join(os.getcwd(), 'foo.bar') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 42 assert b.update_frequency == 42 # Test without specifying a file name b = JsonBackend(path='/tmp') assert b.path == os.path.join('/tmp', 'shadho.json') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 10 assert b.update_frequency == 10 def test_commit(self): """Ensure that commit writes to file and the file is loadable.""" temp = shutil.mkdtemp() fpath = os.path.join(temp, 'shahdo.json') # Test saving and loading b = JsonBackend(path=temp) assert os.path.isfile(fpath) with open(fpath, 'r') as f: db = json.load(f) assert db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} shutil.rmtree(temp) def test_count(self): """Ensure that the correct counts are returned for object classes"""
Add tests for JsonBackend __init__ and commit methods
Add tests for JsonBackend __init__ and commit methods
Python
mit
jeffkinnison/shadho,jeffkinnison/shadho
Add tests for JsonBackend __init__ and commit methods
import pytest from shadho.backend.base.tests.test_db import TestBaseBackend from shadho.backend.json.db import JsonBackend import json import os import shutil class TestJsonBackend(object): def test_init(self): """Ensure that initialization sets up the db and filepath.""" # Test default initialization b = JsonBackend() assert b.path == os.path.join(os.getcwd(), 'shadho.json') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 10 assert b.update_frequency == 10 # Test custom initialization b = JsonBackend(path='foo.bar', commit_frequency=42, update_frequency=42) assert b.path == os.path.join(os.getcwd(), 'foo.bar') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 42 assert b.update_frequency == 42 # Test without specifying a file name b = JsonBackend(path='/tmp') assert b.path == os.path.join('/tmp', 'shadho.json') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 10 assert b.update_frequency == 10 def test_commit(self): """Ensure that commit writes to file and the file is loadable.""" temp = shutil.mkdtemp() fpath = os.path.join(temp, 'shahdo.json') # Test saving and loading b = JsonBackend(path=temp) assert os.path.isfile(fpath) with open(fpath, 'r') as f: db = json.load(f) assert db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} shutil.rmtree(temp) def test_count(self): """Ensure that the correct counts are returned for object classes"""
<commit_before><commit_msg>Add tests for JsonBackend __init__ and commit methods<commit_after>
import pytest from shadho.backend.base.tests.test_db import TestBaseBackend from shadho.backend.json.db import JsonBackend import json import os import shutil class TestJsonBackend(object): def test_init(self): """Ensure that initialization sets up the db and filepath.""" # Test default initialization b = JsonBackend() assert b.path == os.path.join(os.getcwd(), 'shadho.json') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 10 assert b.update_frequency == 10 # Test custom initialization b = JsonBackend(path='foo.bar', commit_frequency=42, update_frequency=42) assert b.path == os.path.join(os.getcwd(), 'foo.bar') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 42 assert b.update_frequency == 42 # Test without specifying a file name b = JsonBackend(path='/tmp') assert b.path == os.path.join('/tmp', 'shadho.json') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 10 assert b.update_frequency == 10 def test_commit(self): """Ensure that commit writes to file and the file is loadable.""" temp = shutil.mkdtemp() fpath = os.path.join(temp, 'shahdo.json') # Test saving and loading b = JsonBackend(path=temp) assert os.path.isfile(fpath) with open(fpath, 'r') as f: db = json.load(f) assert db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} shutil.rmtree(temp) def test_count(self): """Ensure that the correct counts are returned for object classes"""
Add tests for JsonBackend __init__ and commit methodsimport pytest from shadho.backend.base.tests.test_db import TestBaseBackend from shadho.backend.json.db import JsonBackend import json import os import shutil class TestJsonBackend(object): def test_init(self): """Ensure that initialization sets up the db and filepath.""" # Test default initialization b = JsonBackend() assert b.path == os.path.join(os.getcwd(), 'shadho.json') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 10 assert b.update_frequency == 10 # Test custom initialization b = JsonBackend(path='foo.bar', commit_frequency=42, update_frequency=42) assert b.path == os.path.join(os.getcwd(), 'foo.bar') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 42 assert b.update_frequency == 42 # Test without specifying a file name b = JsonBackend(path='/tmp') assert b.path == os.path.join('/tmp', 'shadho.json') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 10 assert b.update_frequency == 10 def test_commit(self): """Ensure that commit writes to file and the file is loadable.""" temp = shutil.mkdtemp() fpath = os.path.join(temp, 'shahdo.json') # Test saving and loading b = JsonBackend(path=temp) assert os.path.isfile(fpath) with open(fpath, 'r') as f: db = json.load(f) assert db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} shutil.rmtree(temp) def test_count(self): """Ensure that the correct counts are returned for object classes"""
<commit_before><commit_msg>Add tests for JsonBackend __init__ and commit methods<commit_after>import pytest from shadho.backend.base.tests.test_db import TestBaseBackend from shadho.backend.json.db import JsonBackend import json import os import shutil class TestJsonBackend(object): def test_init(self): """Ensure that initialization sets up the db and filepath.""" # Test default initialization b = JsonBackend() assert b.path == os.path.join(os.getcwd(), 'shadho.json') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 10 assert b.update_frequency == 10 # Test custom initialization b = JsonBackend(path='foo.bar', commit_frequency=42, update_frequency=42) assert b.path == os.path.join(os.getcwd(), 'foo.bar') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 42 assert b.update_frequency == 42 # Test without specifying a file name b = JsonBackend(path='/tmp') assert b.path == os.path.join('/tmp', 'shadho.json') assert b.db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} assert b.commit_frequency == 10 assert b.update_frequency == 10 def test_commit(self): """Ensure that commit writes to file and the file is loadable.""" temp = shutil.mkdtemp() fpath = os.path.join(temp, 'shahdo.json') # Test saving and loading b = JsonBackend(path=temp) assert os.path.isfile(fpath) with open(fpath, 'r') as f: db = json.load(f) assert db == {'models': {}, 'domains': {}, 'results': {}, 'values': {}} shutil.rmtree(temp) def test_count(self): """Ensure that the correct counts are returned for object classes"""
871f79a0b2bd235df457e3a1dc502d5c18bd934a
tools/build/common_utils.py
tools/build/common_utils.py
from __future__ import print_function import os def game_root_path(): file_path = os.path.dirname(os.path.abspath(__file__)) return os.path.abspath(os.path.join(file_path, '..', '..')) def files_with_type(root, type): all_files = [os.path.join(root, filename) for filename in os.listdir(root)] typed_files = [path for path in all_files if path.endswith('.' + type)] return typed_files def sha1_of_file(filepath): import hashlib if not os.path.exists(filepath): return '' with open(filepath, 'rb') as f: return hashlib.sha1(f.read()).hexdigest() def fetch_file(url, target_path, sha1): if sha1_of_file(target_path) == sha1: return True # Already downloaded import urllib if hasattr(urllib, 'urlretrieve'): # Python 2 urllib.urlretrieve(url, target_path) else: # Python 3 import urllib.request urllib.request.urlretrieve(url, target_path) if sha1 == None: print('sha1 of ' + target_path + ': ' + sha1_of_file(target_path)) elif sha1_of_file(target_path) != sha1: if os.path.exists(target_path): os.remove(target_path) return False return True def python27_path(): import sys exe = '' if sys.version_info.minor == 7 and sys.version_info.major == 2: exe = sys.executable elif sys.platform.startswith("linux"): exe = '/usr/local/bin/python2.7' elif sys.platform == "darwin": exe = '/usr/local/bin/python2.7' elif sys.platform == "win32": exe = 'C:\Python27\python.exe' return exe if __name__ == '__main__': print('Game root path: ' + game_root_path())
Add some generic python utilities as a basis for scripts
Add some generic python utilities as a basis for scripts
Python
mit
Oletus/gameutils.js,Oletus/gameutils.js
Add some generic python utilities as a basis for scripts
from __future__ import print_function import os def game_root_path(): file_path = os.path.dirname(os.path.abspath(__file__)) return os.path.abspath(os.path.join(file_path, '..', '..')) def files_with_type(root, type): all_files = [os.path.join(root, filename) for filename in os.listdir(root)] typed_files = [path for path in all_files if path.endswith('.' + type)] return typed_files def sha1_of_file(filepath): import hashlib if not os.path.exists(filepath): return '' with open(filepath, 'rb') as f: return hashlib.sha1(f.read()).hexdigest() def fetch_file(url, target_path, sha1): if sha1_of_file(target_path) == sha1: return True # Already downloaded import urllib if hasattr(urllib, 'urlretrieve'): # Python 2 urllib.urlretrieve(url, target_path) else: # Python 3 import urllib.request urllib.request.urlretrieve(url, target_path) if sha1 == None: print('sha1 of ' + target_path + ': ' + sha1_of_file(target_path)) elif sha1_of_file(target_path) != sha1: if os.path.exists(target_path): os.remove(target_path) return False return True def python27_path(): import sys exe = '' if sys.version_info.minor == 7 and sys.version_info.major == 2: exe = sys.executable elif sys.platform.startswith("linux"): exe = '/usr/local/bin/python2.7' elif sys.platform == "darwin": exe = '/usr/local/bin/python2.7' elif sys.platform == "win32": exe = 'C:\Python27\python.exe' return exe if __name__ == '__main__': print('Game root path: ' + game_root_path())
<commit_before><commit_msg>Add some generic python utilities as a basis for scripts<commit_after>
from __future__ import print_function import os def game_root_path(): file_path = os.path.dirname(os.path.abspath(__file__)) return os.path.abspath(os.path.join(file_path, '..', '..')) def files_with_type(root, type): all_files = [os.path.join(root, filename) for filename in os.listdir(root)] typed_files = [path for path in all_files if path.endswith('.' + type)] return typed_files def sha1_of_file(filepath): import hashlib if not os.path.exists(filepath): return '' with open(filepath, 'rb') as f: return hashlib.sha1(f.read()).hexdigest() def fetch_file(url, target_path, sha1): if sha1_of_file(target_path) == sha1: return True # Already downloaded import urllib if hasattr(urllib, 'urlretrieve'): # Python 2 urllib.urlretrieve(url, target_path) else: # Python 3 import urllib.request urllib.request.urlretrieve(url, target_path) if sha1 == None: print('sha1 of ' + target_path + ': ' + sha1_of_file(target_path)) elif sha1_of_file(target_path) != sha1: if os.path.exists(target_path): os.remove(target_path) return False return True def python27_path(): import sys exe = '' if sys.version_info.minor == 7 and sys.version_info.major == 2: exe = sys.executable elif sys.platform.startswith("linux"): exe = '/usr/local/bin/python2.7' elif sys.platform == "darwin": exe = '/usr/local/bin/python2.7' elif sys.platform == "win32": exe = 'C:\Python27\python.exe' return exe if __name__ == '__main__': print('Game root path: ' + game_root_path())
Add some generic python utilities as a basis for scriptsfrom __future__ import print_function import os def game_root_path(): file_path = os.path.dirname(os.path.abspath(__file__)) return os.path.abspath(os.path.join(file_path, '..', '..')) def files_with_type(root, type): all_files = [os.path.join(root, filename) for filename in os.listdir(root)] typed_files = [path for path in all_files if path.endswith('.' + type)] return typed_files def sha1_of_file(filepath): import hashlib if not os.path.exists(filepath): return '' with open(filepath, 'rb') as f: return hashlib.sha1(f.read()).hexdigest() def fetch_file(url, target_path, sha1): if sha1_of_file(target_path) == sha1: return True # Already downloaded import urllib if hasattr(urllib, 'urlretrieve'): # Python 2 urllib.urlretrieve(url, target_path) else: # Python 3 import urllib.request urllib.request.urlretrieve(url, target_path) if sha1 == None: print('sha1 of ' + target_path + ': ' + sha1_of_file(target_path)) elif sha1_of_file(target_path) != sha1: if os.path.exists(target_path): os.remove(target_path) return False return True def python27_path(): import sys exe = '' if sys.version_info.minor == 7 and sys.version_info.major == 2: exe = sys.executable elif sys.platform.startswith("linux"): exe = '/usr/local/bin/python2.7' elif sys.platform == "darwin": exe = '/usr/local/bin/python2.7' elif sys.platform == "win32": exe = 'C:\Python27\python.exe' return exe if __name__ == '__main__': print('Game root path: ' + game_root_path())
<commit_before><commit_msg>Add some generic python utilities as a basis for scripts<commit_after>from __future__ import print_function import os def game_root_path(): file_path = os.path.dirname(os.path.abspath(__file__)) return os.path.abspath(os.path.join(file_path, '..', '..')) def files_with_type(root, type): all_files = [os.path.join(root, filename) for filename in os.listdir(root)] typed_files = [path for path in all_files if path.endswith('.' + type)] return typed_files def sha1_of_file(filepath): import hashlib if not os.path.exists(filepath): return '' with open(filepath, 'rb') as f: return hashlib.sha1(f.read()).hexdigest() def fetch_file(url, target_path, sha1): if sha1_of_file(target_path) == sha1: return True # Already downloaded import urllib if hasattr(urllib, 'urlretrieve'): # Python 2 urllib.urlretrieve(url, target_path) else: # Python 3 import urllib.request urllib.request.urlretrieve(url, target_path) if sha1 == None: print('sha1 of ' + target_path + ': ' + sha1_of_file(target_path)) elif sha1_of_file(target_path) != sha1: if os.path.exists(target_path): os.remove(target_path) return False return True def python27_path(): import sys exe = '' if sys.version_info.minor == 7 and sys.version_info.major == 2: exe = sys.executable elif sys.platform.startswith("linux"): exe = '/usr/local/bin/python2.7' elif sys.platform == "darwin": exe = '/usr/local/bin/python2.7' elif sys.platform == "win32": exe = 'C:\Python27\python.exe' return exe if __name__ == '__main__': print('Game root path: ' + game_root_path())
9c53e59ee0c4e5418b54d47c932454b7b907dc03
seahub/profile/forms.py
seahub/profile/forms.py
# encoding: utf-8 from django import forms from django.utils.html import escape from seahub.profile.models import Profile, DetailedProfile class ProfileForm(forms.Form): nickname = forms.CharField(max_length=64, required=False) intro = forms.CharField(max_length=256, required=False) def save(self, username): nickname = escape(self.cleaned_data['nickname']) intro = escape(self.cleaned_data['intro']) Profile.objects.add_or_update(username, nickname, intro) class DetailedProfileForm(ProfileForm): department = forms.CharField(max_length=512, required=False) telephone = forms.CharField(max_length=100, required=False) def save(self, username): super(DetailedProfileForm, self).save(username) department = escape(self.cleaned_data['department']) telephone = escape(self.cleaned_data['telephone']) DetailedProfile.objects.add_or_update(username, department, telephone)
# encoding: utf-8 from django import forms from seahub.profile.models import Profile, DetailedProfile class ProfileForm(forms.Form): nickname = forms.CharField(max_length=64, required=False) intro = forms.CharField(max_length=256, required=False) def save(self, username): nickname = self.cleaned_data['nickname'] intro = self.cleaned_data['intro'] Profile.objects.add_or_update(username, nickname, intro) class DetailedProfileForm(ProfileForm): department = forms.CharField(max_length=512, required=False) telephone = forms.CharField(max_length=100, required=False) def save(self, username): super(DetailedProfileForm, self).save(username) department = self.cleaned_data['department'] telephone = self.cleaned_data['telephone'] DetailedProfile.objects.add_or_update(username, department, telephone)
Revert escape nickname, desc, etc in user profile
Revert escape nickname, desc, etc in user profile
Python
apache-2.0
madflow/seahub,Chilledheart/seahub,cloudcopy/seahub,madflow/seahub,Chilledheart/seahub,cloudcopy/seahub,miurahr/seahub,miurahr/seahub,cloudcopy/seahub,Chilledheart/seahub,miurahr/seahub,madflow/seahub,madflow/seahub,madflow/seahub,Chilledheart/seahub,cloudcopy/seahub,miurahr/seahub,Chilledheart/seahub
# encoding: utf-8 from django import forms from django.utils.html import escape from seahub.profile.models import Profile, DetailedProfile class ProfileForm(forms.Form): nickname = forms.CharField(max_length=64, required=False) intro = forms.CharField(max_length=256, required=False) def save(self, username): nickname = escape(self.cleaned_data['nickname']) intro = escape(self.cleaned_data['intro']) Profile.objects.add_or_update(username, nickname, intro) class DetailedProfileForm(ProfileForm): department = forms.CharField(max_length=512, required=False) telephone = forms.CharField(max_length=100, required=False) def save(self, username): super(DetailedProfileForm, self).save(username) department = escape(self.cleaned_data['department']) telephone = escape(self.cleaned_data['telephone']) DetailedProfile.objects.add_or_update(username, department, telephone) Revert escape nickname, desc, etc in user profile
# encoding: utf-8 from django import forms from seahub.profile.models import Profile, DetailedProfile class ProfileForm(forms.Form): nickname = forms.CharField(max_length=64, required=False) intro = forms.CharField(max_length=256, required=False) def save(self, username): nickname = self.cleaned_data['nickname'] intro = self.cleaned_data['intro'] Profile.objects.add_or_update(username, nickname, intro) class DetailedProfileForm(ProfileForm): department = forms.CharField(max_length=512, required=False) telephone = forms.CharField(max_length=100, required=False) def save(self, username): super(DetailedProfileForm, self).save(username) department = self.cleaned_data['department'] telephone = self.cleaned_data['telephone'] DetailedProfile.objects.add_or_update(username, department, telephone)
<commit_before># encoding: utf-8 from django import forms from django.utils.html import escape from seahub.profile.models import Profile, DetailedProfile class ProfileForm(forms.Form): nickname = forms.CharField(max_length=64, required=False) intro = forms.CharField(max_length=256, required=False) def save(self, username): nickname = escape(self.cleaned_data['nickname']) intro = escape(self.cleaned_data['intro']) Profile.objects.add_or_update(username, nickname, intro) class DetailedProfileForm(ProfileForm): department = forms.CharField(max_length=512, required=False) telephone = forms.CharField(max_length=100, required=False) def save(self, username): super(DetailedProfileForm, self).save(username) department = escape(self.cleaned_data['department']) telephone = escape(self.cleaned_data['telephone']) DetailedProfile.objects.add_or_update(username, department, telephone) <commit_msg>Revert escape nickname, desc, etc in user profile<commit_after>
# encoding: utf-8 from django import forms from seahub.profile.models import Profile, DetailedProfile class ProfileForm(forms.Form): nickname = forms.CharField(max_length=64, required=False) intro = forms.CharField(max_length=256, required=False) def save(self, username): nickname = self.cleaned_data['nickname'] intro = self.cleaned_data['intro'] Profile.objects.add_or_update(username, nickname, intro) class DetailedProfileForm(ProfileForm): department = forms.CharField(max_length=512, required=False) telephone = forms.CharField(max_length=100, required=False) def save(self, username): super(DetailedProfileForm, self).save(username) department = self.cleaned_data['department'] telephone = self.cleaned_data['telephone'] DetailedProfile.objects.add_or_update(username, department, telephone)
# encoding: utf-8 from django import forms from django.utils.html import escape from seahub.profile.models import Profile, DetailedProfile class ProfileForm(forms.Form): nickname = forms.CharField(max_length=64, required=False) intro = forms.CharField(max_length=256, required=False) def save(self, username): nickname = escape(self.cleaned_data['nickname']) intro = escape(self.cleaned_data['intro']) Profile.objects.add_or_update(username, nickname, intro) class DetailedProfileForm(ProfileForm): department = forms.CharField(max_length=512, required=False) telephone = forms.CharField(max_length=100, required=False) def save(self, username): super(DetailedProfileForm, self).save(username) department = escape(self.cleaned_data['department']) telephone = escape(self.cleaned_data['telephone']) DetailedProfile.objects.add_or_update(username, department, telephone) Revert escape nickname, desc, etc in user profile# encoding: utf-8 from django import forms from seahub.profile.models import Profile, DetailedProfile class ProfileForm(forms.Form): nickname = forms.CharField(max_length=64, required=False) intro = forms.CharField(max_length=256, required=False) def save(self, username): nickname = self.cleaned_data['nickname'] intro = self.cleaned_data['intro'] Profile.objects.add_or_update(username, nickname, intro) class DetailedProfileForm(ProfileForm): department = forms.CharField(max_length=512, required=False) telephone = forms.CharField(max_length=100, required=False) def save(self, username): super(DetailedProfileForm, self).save(username) department = self.cleaned_data['department'] telephone = self.cleaned_data['telephone'] DetailedProfile.objects.add_or_update(username, department, telephone)
<commit_before># encoding: utf-8 from django import forms from django.utils.html import escape from seahub.profile.models import Profile, DetailedProfile class ProfileForm(forms.Form): nickname = forms.CharField(max_length=64, required=False) intro = forms.CharField(max_length=256, required=False) def save(self, username): nickname = escape(self.cleaned_data['nickname']) intro = escape(self.cleaned_data['intro']) Profile.objects.add_or_update(username, nickname, intro) class DetailedProfileForm(ProfileForm): department = forms.CharField(max_length=512, required=False) telephone = forms.CharField(max_length=100, required=False) def save(self, username): super(DetailedProfileForm, self).save(username) department = escape(self.cleaned_data['department']) telephone = escape(self.cleaned_data['telephone']) DetailedProfile.objects.add_or_update(username, department, telephone) <commit_msg>Revert escape nickname, desc, etc in user profile<commit_after># encoding: utf-8 from django import forms from seahub.profile.models import Profile, DetailedProfile class ProfileForm(forms.Form): nickname = forms.CharField(max_length=64, required=False) intro = forms.CharField(max_length=256, required=False) def save(self, username): nickname = self.cleaned_data['nickname'] intro = self.cleaned_data['intro'] Profile.objects.add_or_update(username, nickname, intro) class DetailedProfileForm(ProfileForm): department = forms.CharField(max_length=512, required=False) telephone = forms.CharField(max_length=100, required=False) def save(self, username): super(DetailedProfileForm, self).save(username) department = self.cleaned_data['department'] telephone = self.cleaned_data['telephone'] DetailedProfile.objects.add_or_update(username, department, telephone)
8affeda715b1facf12de1dab1d445bbe54616306
oscar/core/ajax.py
oscar/core/ajax.py
import six from django.contrib import messages from six.moves import map class FlashMessages(object): """ Intermediate container for flash messages. This is useful as, at the time of creating the message, we don't know whether the response is an AJAX response or not. """ def __init__(self): self.msgs = {} def add_message(self, level, message): self.msgs.setdefault(level, []).append(message) def add_messages(self, level, messages): for msg in messages: self.add_message(level, msg) def info(self, message): self.add_message(messages.INFO, message) def warning(self, message): self.add_message(messages.WARNING, message) def error(self, message): self.add_message(messages.ERROR, message) def success(self, message): self.add_message(messages.SUCCESS, message) def to_json(self): payload = {} for level, msgs in self.msgs.items(): tag = messages.DEFAULT_TAGS.get(level, 'info') payload[tag] = map(six.text_type, msgs) return payload def apply_to_request(self, request): for level, msgs in self.msgs.items(): for msg in msgs: messages.add_message(request, level, msg)
import six from django.contrib import messages from six.moves import map class FlashMessages(object): """ Intermediate container for flash messages. This is useful as, at the time of creating the message, we don't know whether the response is an AJAX response or not. """ def __init__(self): self.msgs = {} def add_message(self, level, message): self.msgs.setdefault(level, []).append(message) def add_messages(self, level, messages): for msg in messages: self.add_message(level, msg) def info(self, message): self.add_message(messages.INFO, message) def warning(self, message): self.add_message(messages.WARNING, message) def error(self, message): self.add_message(messages.ERROR, message) def success(self, message): self.add_message(messages.SUCCESS, message) def to_json(self): payload = {} for level, msgs in self.msgs.items(): tag = messages.DEFAULT_TAGS.get(level, 'info') payload[tag] = [six.text_type(msg) for msg in msgs] return payload def apply_to_request(self, request): for level, msgs in self.msgs.items(): for msg in msgs: messages.add_message(request, level, msg)
Fix JSON serialisation problem with AJAX basket
Fix JSON serialisation problem with AJAX basket six.moves.map returns itertools.imap which won't serialize to JSON. This commit unpacks the list into a normal list of strings to circumvent the issue.
Python
bsd-3-clause
jmt4/django-oscar,jmt4/django-oscar,dongguangming/django-oscar,lijoantony/django-oscar,kapt/django-oscar,okfish/django-oscar,vovanbo/django-oscar,bnprk/django-oscar,Bogh/django-oscar,sasha0/django-oscar,ahmetdaglarbas/e-commerce,solarissmoke/django-oscar,bnprk/django-oscar,spartonia/django-oscar,WillisXChen/django-oscar,spartonia/django-oscar,WillisXChen/django-oscar,taedori81/django-oscar,elliotthill/django-oscar,sasha0/django-oscar,ka7eh/django-oscar,ademuk/django-oscar,josesanch/django-oscar,Bogh/django-oscar,jinnykoo/wuyisj,solarissmoke/django-oscar,pasqualguerrero/django-oscar,ka7eh/django-oscar,jlmadurga/django-oscar,michaelkuty/django-oscar,saadatqadri/django-oscar,pdonadeo/django-oscar,jlmadurga/django-oscar,binarydud/django-oscar,anentropic/django-oscar,rocopartners/django-oscar,manevant/django-oscar,mexeniz/django-oscar,adamend/django-oscar,django-oscar/django-oscar,monikasulik/django-oscar,okfish/django-oscar,kapari/django-oscar,DrOctogon/unwash_ecom,WadeYuChen/django-oscar,dongguangming/django-oscar,taedori81/django-oscar,solarissmoke/django-oscar,DrOctogon/unwash_ecom,saadatqadri/django-oscar,ka7eh/django-oscar,vovanbo/django-oscar,nickpack/django-oscar,WillisXChen/django-oscar,WillisXChen/django-oscar,sasha0/django-oscar,faratro/django-oscar,adamend/django-oscar,saadatqadri/django-oscar,binarydud/django-oscar,Jannes123/django-oscar,anentropic/django-oscar,faratro/django-oscar,jinnykoo/wuyisj.com,WillisXChen/django-oscar,eddiep1101/django-oscar,okfish/django-oscar,Bogh/django-oscar,thechampanurag/django-oscar,faratro/django-oscar,kapari/django-oscar,amirrpp/django-oscar,ademuk/django-oscar,marcoantoniooliveira/labweb,DrOctogon/unwash_ecom,mexeniz/django-oscar,manevant/django-oscar,josesanch/django-oscar,sonofatailor/django-oscar,sasha0/django-oscar,marcoantoniooliveira/labweb,machtfit/django-oscar,pdonadeo/django-oscar,ademuk/django-oscar,manevant/django-oscar,binarydud/django-oscar,bschuon/django-oscar,marcoantoniooliveira/labweb,saadatqadri/django-oscar,jlmadurga/django-oscar,kapt/django-oscar,pasqualguerrero/django-oscar,Jannes123/django-oscar,eddiep1101/django-oscar,nickpack/django-oscar,dongguangming/django-oscar,jinnykoo/wuyisj.com,jinnykoo/wuyisj.com,marcoantoniooliveira/labweb,MatthewWilkes/django-oscar,django-oscar/django-oscar,WadeYuChen/django-oscar,django-oscar/django-oscar,pasqualguerrero/django-oscar,itbabu/django-oscar,jmt4/django-oscar,pasqualguerrero/django-oscar,elliotthill/django-oscar,anentropic/django-oscar,WillisXChen/django-oscar,monikasulik/django-oscar,WadeYuChen/django-oscar,amirrpp/django-oscar,MatthewWilkes/django-oscar,vovanbo/django-oscar,QLGu/django-oscar,bschuon/django-oscar,nfletton/django-oscar,jinnykoo/wuyisj,kapt/django-oscar,mexeniz/django-oscar,jinnykoo/wuyisj,vovanbo/django-oscar,adamend/django-oscar,machtfit/django-oscar,monikasulik/django-oscar,rocopartners/django-oscar,faratro/django-oscar,nfletton/django-oscar,QLGu/django-oscar,kapari/django-oscar,monikasulik/django-oscar,spartonia/django-oscar,itbabu/django-oscar,itbabu/django-oscar,Jannes123/django-oscar,rocopartners/django-oscar,binarydud/django-oscar,taedori81/django-oscar,jinnykoo/christmas,pdonadeo/django-oscar,elliotthill/django-oscar,amirrpp/django-oscar,sonofatailor/django-oscar,nfletton/django-oscar,solarissmoke/django-oscar,manevant/django-oscar,itbabu/django-oscar,jinnykoo/wuyisj.com,thechampanurag/django-oscar,sonofatailor/django-oscar,nickpack/django-oscar,michaelkuty/django-oscar,lijoantony/django-oscar,django-oscar/django-oscar,ka7eh/django-oscar,ahmetdaglarbas/e-commerce,jinnykoo/wuyisj,anentropic/django-oscar,ahmetdaglarbas/e-commerce,thechampanurag/django-oscar,Bogh/django-oscar,QLGu/django-oscar,john-parton/django-oscar,michaelkuty/django-oscar,pdonadeo/django-oscar,eddiep1101/django-oscar,ahmetdaglarbas/e-commerce,thechampanurag/django-oscar,jmt4/django-oscar,spartonia/django-oscar,jlmadurga/django-oscar,michaelkuty/django-oscar,bschuon/django-oscar,mexeniz/django-oscar,ademuk/django-oscar,sonofatailor/django-oscar,dongguangming/django-oscar,nickpack/django-oscar,taedori81/django-oscar,adamend/django-oscar,jinnykoo/christmas,amirrpp/django-oscar,Jannes123/django-oscar,bschuon/django-oscar,QLGu/django-oscar,okfish/django-oscar,MatthewWilkes/django-oscar,nfletton/django-oscar,kapari/django-oscar,machtfit/django-oscar,lijoantony/django-oscar,john-parton/django-oscar,jinnykoo/christmas,MatthewWilkes/django-oscar,eddiep1101/django-oscar,josesanch/django-oscar,bnprk/django-oscar,john-parton/django-oscar,WadeYuChen/django-oscar,rocopartners/django-oscar,lijoantony/django-oscar,bnprk/django-oscar,john-parton/django-oscar
import six from django.contrib import messages from six.moves import map class FlashMessages(object): """ Intermediate container for flash messages. This is useful as, at the time of creating the message, we don't know whether the response is an AJAX response or not. """ def __init__(self): self.msgs = {} def add_message(self, level, message): self.msgs.setdefault(level, []).append(message) def add_messages(self, level, messages): for msg in messages: self.add_message(level, msg) def info(self, message): self.add_message(messages.INFO, message) def warning(self, message): self.add_message(messages.WARNING, message) def error(self, message): self.add_message(messages.ERROR, message) def success(self, message): self.add_message(messages.SUCCESS, message) def to_json(self): payload = {} for level, msgs in self.msgs.items(): tag = messages.DEFAULT_TAGS.get(level, 'info') payload[tag] = map(six.text_type, msgs) return payload def apply_to_request(self, request): for level, msgs in self.msgs.items(): for msg in msgs: messages.add_message(request, level, msg) Fix JSON serialisation problem with AJAX basket six.moves.map returns itertools.imap which won't serialize to JSON. This commit unpacks the list into a normal list of strings to circumvent the issue.
import six from django.contrib import messages from six.moves import map class FlashMessages(object): """ Intermediate container for flash messages. This is useful as, at the time of creating the message, we don't know whether the response is an AJAX response or not. """ def __init__(self): self.msgs = {} def add_message(self, level, message): self.msgs.setdefault(level, []).append(message) def add_messages(self, level, messages): for msg in messages: self.add_message(level, msg) def info(self, message): self.add_message(messages.INFO, message) def warning(self, message): self.add_message(messages.WARNING, message) def error(self, message): self.add_message(messages.ERROR, message) def success(self, message): self.add_message(messages.SUCCESS, message) def to_json(self): payload = {} for level, msgs in self.msgs.items(): tag = messages.DEFAULT_TAGS.get(level, 'info') payload[tag] = [six.text_type(msg) for msg in msgs] return payload def apply_to_request(self, request): for level, msgs in self.msgs.items(): for msg in msgs: messages.add_message(request, level, msg)
<commit_before>import six from django.contrib import messages from six.moves import map class FlashMessages(object): """ Intermediate container for flash messages. This is useful as, at the time of creating the message, we don't know whether the response is an AJAX response or not. """ def __init__(self): self.msgs = {} def add_message(self, level, message): self.msgs.setdefault(level, []).append(message) def add_messages(self, level, messages): for msg in messages: self.add_message(level, msg) def info(self, message): self.add_message(messages.INFO, message) def warning(self, message): self.add_message(messages.WARNING, message) def error(self, message): self.add_message(messages.ERROR, message) def success(self, message): self.add_message(messages.SUCCESS, message) def to_json(self): payload = {} for level, msgs in self.msgs.items(): tag = messages.DEFAULT_TAGS.get(level, 'info') payload[tag] = map(six.text_type, msgs) return payload def apply_to_request(self, request): for level, msgs in self.msgs.items(): for msg in msgs: messages.add_message(request, level, msg) <commit_msg>Fix JSON serialisation problem with AJAX basket six.moves.map returns itertools.imap which won't serialize to JSON. This commit unpacks the list into a normal list of strings to circumvent the issue.<commit_after>
import six from django.contrib import messages from six.moves import map class FlashMessages(object): """ Intermediate container for flash messages. This is useful as, at the time of creating the message, we don't know whether the response is an AJAX response or not. """ def __init__(self): self.msgs = {} def add_message(self, level, message): self.msgs.setdefault(level, []).append(message) def add_messages(self, level, messages): for msg in messages: self.add_message(level, msg) def info(self, message): self.add_message(messages.INFO, message) def warning(self, message): self.add_message(messages.WARNING, message) def error(self, message): self.add_message(messages.ERROR, message) def success(self, message): self.add_message(messages.SUCCESS, message) def to_json(self): payload = {} for level, msgs in self.msgs.items(): tag = messages.DEFAULT_TAGS.get(level, 'info') payload[tag] = [six.text_type(msg) for msg in msgs] return payload def apply_to_request(self, request): for level, msgs in self.msgs.items(): for msg in msgs: messages.add_message(request, level, msg)
import six from django.contrib import messages from six.moves import map class FlashMessages(object): """ Intermediate container for flash messages. This is useful as, at the time of creating the message, we don't know whether the response is an AJAX response or not. """ def __init__(self): self.msgs = {} def add_message(self, level, message): self.msgs.setdefault(level, []).append(message) def add_messages(self, level, messages): for msg in messages: self.add_message(level, msg) def info(self, message): self.add_message(messages.INFO, message) def warning(self, message): self.add_message(messages.WARNING, message) def error(self, message): self.add_message(messages.ERROR, message) def success(self, message): self.add_message(messages.SUCCESS, message) def to_json(self): payload = {} for level, msgs in self.msgs.items(): tag = messages.DEFAULT_TAGS.get(level, 'info') payload[tag] = map(six.text_type, msgs) return payload def apply_to_request(self, request): for level, msgs in self.msgs.items(): for msg in msgs: messages.add_message(request, level, msg) Fix JSON serialisation problem with AJAX basket six.moves.map returns itertools.imap which won't serialize to JSON. This commit unpacks the list into a normal list of strings to circumvent the issue.import six from django.contrib import messages from six.moves import map class FlashMessages(object): """ Intermediate container for flash messages. This is useful as, at the time of creating the message, we don't know whether the response is an AJAX response or not. """ def __init__(self): self.msgs = {} def add_message(self, level, message): self.msgs.setdefault(level, []).append(message) def add_messages(self, level, messages): for msg in messages: self.add_message(level, msg) def info(self, message): self.add_message(messages.INFO, message) def warning(self, message): self.add_message(messages.WARNING, message) def error(self, message): self.add_message(messages.ERROR, message) def success(self, message): self.add_message(messages.SUCCESS, message) def to_json(self): payload = {} for level, msgs in self.msgs.items(): tag = messages.DEFAULT_TAGS.get(level, 'info') payload[tag] = [six.text_type(msg) for msg in msgs] return payload def apply_to_request(self, request): for level, msgs in self.msgs.items(): for msg in msgs: messages.add_message(request, level, msg)
<commit_before>import six from django.contrib import messages from six.moves import map class FlashMessages(object): """ Intermediate container for flash messages. This is useful as, at the time of creating the message, we don't know whether the response is an AJAX response or not. """ def __init__(self): self.msgs = {} def add_message(self, level, message): self.msgs.setdefault(level, []).append(message) def add_messages(self, level, messages): for msg in messages: self.add_message(level, msg) def info(self, message): self.add_message(messages.INFO, message) def warning(self, message): self.add_message(messages.WARNING, message) def error(self, message): self.add_message(messages.ERROR, message) def success(self, message): self.add_message(messages.SUCCESS, message) def to_json(self): payload = {} for level, msgs in self.msgs.items(): tag = messages.DEFAULT_TAGS.get(level, 'info') payload[tag] = map(six.text_type, msgs) return payload def apply_to_request(self, request): for level, msgs in self.msgs.items(): for msg in msgs: messages.add_message(request, level, msg) <commit_msg>Fix JSON serialisation problem with AJAX basket six.moves.map returns itertools.imap which won't serialize to JSON. This commit unpacks the list into a normal list of strings to circumvent the issue.<commit_after>import six from django.contrib import messages from six.moves import map class FlashMessages(object): """ Intermediate container for flash messages. This is useful as, at the time of creating the message, we don't know whether the response is an AJAX response or not. """ def __init__(self): self.msgs = {} def add_message(self, level, message): self.msgs.setdefault(level, []).append(message) def add_messages(self, level, messages): for msg in messages: self.add_message(level, msg) def info(self, message): self.add_message(messages.INFO, message) def warning(self, message): self.add_message(messages.WARNING, message) def error(self, message): self.add_message(messages.ERROR, message) def success(self, message): self.add_message(messages.SUCCESS, message) def to_json(self): payload = {} for level, msgs in self.msgs.items(): tag = messages.DEFAULT_TAGS.get(level, 'info') payload[tag] = [six.text_type(msg) for msg in msgs] return payload def apply_to_request(self, request): for level, msgs in self.msgs.items(): for msg in msgs: messages.add_message(request, level, msg)
0c8b7fa865df535f5baa33025c184bbf4234b7b1
shp_to_csv_distances.py
shp_to_csv_distances.py
"""Create a csv matrix of distances between shapefile geometry objects. Requirements: fiona, shapely Written by: Taylor Denouden Date: November 25, 2015 """ import random import fiona from shapely.geometry import shape from scripts.printer import print_progress def main(): """Main script execution.""" outfile = open("out.csv", "w") ids = extract_ids("data/high_polys.shp") # Write header print "Writing Header" outfile.write("NODE") for i in ids: outfile.write("," + i) outfile.write("\n") # Write rows print "Writing Rows" for i, j in enumerate(ids): print_progress(i/len(ids)) outfile.write(j) write_row_distances(j, ids, "data/high_polys.shp", outfile) outfile.write("\n") print_progress(1) print def extract_ids(input_file): """Extract all polygon ids from input shapefile.""" with fiona.open(input_file, 'r') as source: return [shp['id'] for shp in source] def write_row_distances(i, ids, input_file, outfile): """Write distances between shape with id i and all other shapes in ids.""" with fiona.open(input_file, 'r') as source: source = list(source) i_shp = shape(source[int(i)]['geometry']) for j in ids: j_shp = shape(source[int(j)]['geometry']) if i_shp.is_valid and j_shp.is_valid: dist = i_shp.distance(j_shp) else: dist = -1 outfile.write("," + str(dist)) if __name__ == "__main__": main()
Create script to transform shapefile into csv distance matrix
Create script to transform shapefile into csv distance matrix
Python
mit
tayden-hakai/Island_MST
Create script to transform shapefile into csv distance matrix
"""Create a csv matrix of distances between shapefile geometry objects. Requirements: fiona, shapely Written by: Taylor Denouden Date: November 25, 2015 """ import random import fiona from shapely.geometry import shape from scripts.printer import print_progress def main(): """Main script execution.""" outfile = open("out.csv", "w") ids = extract_ids("data/high_polys.shp") # Write header print "Writing Header" outfile.write("NODE") for i in ids: outfile.write("," + i) outfile.write("\n") # Write rows print "Writing Rows" for i, j in enumerate(ids): print_progress(i/len(ids)) outfile.write(j) write_row_distances(j, ids, "data/high_polys.shp", outfile) outfile.write("\n") print_progress(1) print def extract_ids(input_file): """Extract all polygon ids from input shapefile.""" with fiona.open(input_file, 'r') as source: return [shp['id'] for shp in source] def write_row_distances(i, ids, input_file, outfile): """Write distances between shape with id i and all other shapes in ids.""" with fiona.open(input_file, 'r') as source: source = list(source) i_shp = shape(source[int(i)]['geometry']) for j in ids: j_shp = shape(source[int(j)]['geometry']) if i_shp.is_valid and j_shp.is_valid: dist = i_shp.distance(j_shp) else: dist = -1 outfile.write("," + str(dist)) if __name__ == "__main__": main()
<commit_before><commit_msg>Create script to transform shapefile into csv distance matrix<commit_after>
"""Create a csv matrix of distances between shapefile geometry objects. Requirements: fiona, shapely Written by: Taylor Denouden Date: November 25, 2015 """ import random import fiona from shapely.geometry import shape from scripts.printer import print_progress def main(): """Main script execution.""" outfile = open("out.csv", "w") ids = extract_ids("data/high_polys.shp") # Write header print "Writing Header" outfile.write("NODE") for i in ids: outfile.write("," + i) outfile.write("\n") # Write rows print "Writing Rows" for i, j in enumerate(ids): print_progress(i/len(ids)) outfile.write(j) write_row_distances(j, ids, "data/high_polys.shp", outfile) outfile.write("\n") print_progress(1) print def extract_ids(input_file): """Extract all polygon ids from input shapefile.""" with fiona.open(input_file, 'r') as source: return [shp['id'] for shp in source] def write_row_distances(i, ids, input_file, outfile): """Write distances between shape with id i and all other shapes in ids.""" with fiona.open(input_file, 'r') as source: source = list(source) i_shp = shape(source[int(i)]['geometry']) for j in ids: j_shp = shape(source[int(j)]['geometry']) if i_shp.is_valid and j_shp.is_valid: dist = i_shp.distance(j_shp) else: dist = -1 outfile.write("," + str(dist)) if __name__ == "__main__": main()
Create script to transform shapefile into csv distance matrix"""Create a csv matrix of distances between shapefile geometry objects. Requirements: fiona, shapely Written by: Taylor Denouden Date: November 25, 2015 """ import random import fiona from shapely.geometry import shape from scripts.printer import print_progress def main(): """Main script execution.""" outfile = open("out.csv", "w") ids = extract_ids("data/high_polys.shp") # Write header print "Writing Header" outfile.write("NODE") for i in ids: outfile.write("," + i) outfile.write("\n") # Write rows print "Writing Rows" for i, j in enumerate(ids): print_progress(i/len(ids)) outfile.write(j) write_row_distances(j, ids, "data/high_polys.shp", outfile) outfile.write("\n") print_progress(1) print def extract_ids(input_file): """Extract all polygon ids from input shapefile.""" with fiona.open(input_file, 'r') as source: return [shp['id'] for shp in source] def write_row_distances(i, ids, input_file, outfile): """Write distances between shape with id i and all other shapes in ids.""" with fiona.open(input_file, 'r') as source: source = list(source) i_shp = shape(source[int(i)]['geometry']) for j in ids: j_shp = shape(source[int(j)]['geometry']) if i_shp.is_valid and j_shp.is_valid: dist = i_shp.distance(j_shp) else: dist = -1 outfile.write("," + str(dist)) if __name__ == "__main__": main()
<commit_before><commit_msg>Create script to transform shapefile into csv distance matrix<commit_after>"""Create a csv matrix of distances between shapefile geometry objects. Requirements: fiona, shapely Written by: Taylor Denouden Date: November 25, 2015 """ import random import fiona from shapely.geometry import shape from scripts.printer import print_progress def main(): """Main script execution.""" outfile = open("out.csv", "w") ids = extract_ids("data/high_polys.shp") # Write header print "Writing Header" outfile.write("NODE") for i in ids: outfile.write("," + i) outfile.write("\n") # Write rows print "Writing Rows" for i, j in enumerate(ids): print_progress(i/len(ids)) outfile.write(j) write_row_distances(j, ids, "data/high_polys.shp", outfile) outfile.write("\n") print_progress(1) print def extract_ids(input_file): """Extract all polygon ids from input shapefile.""" with fiona.open(input_file, 'r') as source: return [shp['id'] for shp in source] def write_row_distances(i, ids, input_file, outfile): """Write distances between shape with id i and all other shapes in ids.""" with fiona.open(input_file, 'r') as source: source = list(source) i_shp = shape(source[int(i)]['geometry']) for j in ids: j_shp = shape(source[int(j)]['geometry']) if i_shp.is_valid and j_shp.is_valid: dist = i_shp.distance(j_shp) else: dist = -1 outfile.write("," + str(dist)) if __name__ == "__main__": main()
ec6dff24e3049ddaab392f0bc5b8d8b724e41e20
trending_python.py
trending_python.py
#!/usr/bin/env python3 import bs4 import requests url = 'https://github.com/trending?l=Python' soup = bs4.BeautifulSoup(requests.get(url).content, 'lxml') # or 'html5lib' repos = soup.find('ol', class_="repo-list").find_all('a', href=True) repos = (r.text.strip().replace(' ', '') for r in repos if '/' in r.text) print('\n'.join(repos))
Print the trending Python repos on GitHub
Print the trending Python repos on GitHub
Python
apache-2.0
cclauss/Ten-lines-or-less
Print the trending Python repos on GitHub
#!/usr/bin/env python3 import bs4 import requests url = 'https://github.com/trending?l=Python' soup = bs4.BeautifulSoup(requests.get(url).content, 'lxml') # or 'html5lib' repos = soup.find('ol', class_="repo-list").find_all('a', href=True) repos = (r.text.strip().replace(' ', '') for r in repos if '/' in r.text) print('\n'.join(repos))
<commit_before><commit_msg>Print the trending Python repos on GitHub<commit_after>
#!/usr/bin/env python3 import bs4 import requests url = 'https://github.com/trending?l=Python' soup = bs4.BeautifulSoup(requests.get(url).content, 'lxml') # or 'html5lib' repos = soup.find('ol', class_="repo-list").find_all('a', href=True) repos = (r.text.strip().replace(' ', '') for r in repos if '/' in r.text) print('\n'.join(repos))
Print the trending Python repos on GitHub#!/usr/bin/env python3 import bs4 import requests url = 'https://github.com/trending?l=Python' soup = bs4.BeautifulSoup(requests.get(url).content, 'lxml') # or 'html5lib' repos = soup.find('ol', class_="repo-list").find_all('a', href=True) repos = (r.text.strip().replace(' ', '') for r in repos if '/' in r.text) print('\n'.join(repos))
<commit_before><commit_msg>Print the trending Python repos on GitHub<commit_after>#!/usr/bin/env python3 import bs4 import requests url = 'https://github.com/trending?l=Python' soup = bs4.BeautifulSoup(requests.get(url).content, 'lxml') # or 'html5lib' repos = soup.find('ol', class_="repo-list").find_all('a', href=True) repos = (r.text.strip().replace(' ', '') for r in repos if '/' in r.text) print('\n'.join(repos))
f6c2d5e37685b149cfd447545c58ce1fc4d836b9
snorkel/models/views.py
snorkel/models/views.py
def create_serialized_candidate_view(session, C, verbose=True): """Creates a view in the database for a Candidate sub-class C defined over Span contexts, which are direct children of a single sentence. Creates VIEW with schema: candidate.id, candidate.split, span0.*, ..., spanK.*, sentence.* NOTE: This limited functionality should be expanded for arbitrary context trees. Also this should be made more dialect-independent. """ selects, froms, joins = [], [], [] for i, arg in enumerate(C.__argnames__): selects.append("span{0}.*".format(i)) froms.append("span AS span{0}".format(i)) joins.append("{0}.{1}_id = span{2}.id".format(C.__tablename__, arg, i)) sql = """ CREATE VIEW {0}_serialized AS SELECT candidate.id, candidate.split, {1}, sentence.* FROM candidate, {0}, {2}, sentence WHERE candidate.id = {0}.id AND sentence.id = span0.sentence_id AND {3} """.format( C.__tablename__, ", ".join(selects), ", ".join(froms), " AND ".join(joins) ) if verbose: print("Creating view...") print(sql) session.execute(sql)
Add function to create view for Span candidate subclasses
Add function to create view for Span candidate subclasses
Python
apache-2.0
jasontlam/snorkel,HazyResearch/snorkel,HazyResearch/snorkel,jasontlam/snorkel,jasontlam/snorkel,HazyResearch/snorkel
Add function to create view for Span candidate subclasses
def create_serialized_candidate_view(session, C, verbose=True): """Creates a view in the database for a Candidate sub-class C defined over Span contexts, which are direct children of a single sentence. Creates VIEW with schema: candidate.id, candidate.split, span0.*, ..., spanK.*, sentence.* NOTE: This limited functionality should be expanded for arbitrary context trees. Also this should be made more dialect-independent. """ selects, froms, joins = [], [], [] for i, arg in enumerate(C.__argnames__): selects.append("span{0}.*".format(i)) froms.append("span AS span{0}".format(i)) joins.append("{0}.{1}_id = span{2}.id".format(C.__tablename__, arg, i)) sql = """ CREATE VIEW {0}_serialized AS SELECT candidate.id, candidate.split, {1}, sentence.* FROM candidate, {0}, {2}, sentence WHERE candidate.id = {0}.id AND sentence.id = span0.sentence_id AND {3} """.format( C.__tablename__, ", ".join(selects), ", ".join(froms), " AND ".join(joins) ) if verbose: print("Creating view...") print(sql) session.execute(sql)
<commit_before><commit_msg>Add function to create view for Span candidate subclasses<commit_after>
def create_serialized_candidate_view(session, C, verbose=True): """Creates a view in the database for a Candidate sub-class C defined over Span contexts, which are direct children of a single sentence. Creates VIEW with schema: candidate.id, candidate.split, span0.*, ..., spanK.*, sentence.* NOTE: This limited functionality should be expanded for arbitrary context trees. Also this should be made more dialect-independent. """ selects, froms, joins = [], [], [] for i, arg in enumerate(C.__argnames__): selects.append("span{0}.*".format(i)) froms.append("span AS span{0}".format(i)) joins.append("{0}.{1}_id = span{2}.id".format(C.__tablename__, arg, i)) sql = """ CREATE VIEW {0}_serialized AS SELECT candidate.id, candidate.split, {1}, sentence.* FROM candidate, {0}, {2}, sentence WHERE candidate.id = {0}.id AND sentence.id = span0.sentence_id AND {3} """.format( C.__tablename__, ", ".join(selects), ", ".join(froms), " AND ".join(joins) ) if verbose: print("Creating view...") print(sql) session.execute(sql)
Add function to create view for Span candidate subclasses def create_serialized_candidate_view(session, C, verbose=True): """Creates a view in the database for a Candidate sub-class C defined over Span contexts, which are direct children of a single sentence. Creates VIEW with schema: candidate.id, candidate.split, span0.*, ..., spanK.*, sentence.* NOTE: This limited functionality should be expanded for arbitrary context trees. Also this should be made more dialect-independent. """ selects, froms, joins = [], [], [] for i, arg in enumerate(C.__argnames__): selects.append("span{0}.*".format(i)) froms.append("span AS span{0}".format(i)) joins.append("{0}.{1}_id = span{2}.id".format(C.__tablename__, arg, i)) sql = """ CREATE VIEW {0}_serialized AS SELECT candidate.id, candidate.split, {1}, sentence.* FROM candidate, {0}, {2}, sentence WHERE candidate.id = {0}.id AND sentence.id = span0.sentence_id AND {3} """.format( C.__tablename__, ", ".join(selects), ", ".join(froms), " AND ".join(joins) ) if verbose: print("Creating view...") print(sql) session.execute(sql)
<commit_before><commit_msg>Add function to create view for Span candidate subclasses<commit_after> def create_serialized_candidate_view(session, C, verbose=True): """Creates a view in the database for a Candidate sub-class C defined over Span contexts, which are direct children of a single sentence. Creates VIEW with schema: candidate.id, candidate.split, span0.*, ..., spanK.*, sentence.* NOTE: This limited functionality should be expanded for arbitrary context trees. Also this should be made more dialect-independent. """ selects, froms, joins = [], [], [] for i, arg in enumerate(C.__argnames__): selects.append("span{0}.*".format(i)) froms.append("span AS span{0}".format(i)) joins.append("{0}.{1}_id = span{2}.id".format(C.__tablename__, arg, i)) sql = """ CREATE VIEW {0}_serialized AS SELECT candidate.id, candidate.split, {1}, sentence.* FROM candidate, {0}, {2}, sentence WHERE candidate.id = {0}.id AND sentence.id = span0.sentence_id AND {3} """.format( C.__tablename__, ", ".join(selects), ", ".join(froms), " AND ".join(joins) ) if verbose: print("Creating view...") print(sql) session.execute(sql)
3609c5842b33ca4146ad14b74c76f8954545aaa8
loqusdb/commands/view.py
loqusdb/commands/view.py
# -*- coding: utf-8 -*- import logging import click from . import base_command logger = logging.getLogger(__name__) @base_command.command() @click.option('-c' ,'--case-id', help='Search for case' ) @click.pass_context def cases(ctx, case_id): """Display all cases in the database.""" adapter = ctx.obj['adapter'] if case_id: case = adapter.case(case_id) if case: click.echo(case) else: logger.info("Case {0} does not exist in database".format(case_id)) else: i = 0 for case in adapter.cases(): i += 1 click.echo(case) if i == 0: logger.info("No cases found in database") @base_command.command() @click.option('--variant-id', help='Search for a variant' ) @click.pass_context def variants(ctx, variant_id): """Display variants in the database.""" adapter = ctx.obj['adapter'] if variant_id: variant = adapter.get_variant({'_id':variant_id}) if variant: click.echo(variant) else: logger.info("Variant {0} does not exist in database".format(variant_id)) else: i = 0 for variant in adapter.get_variants(): i += 1 click.echo(variant) if i == 0: logger.info("No variants found in database")
Add commands for cases and variants
Add commands for cases and variants
Python
mit
moonso/loqusdb
Add commands for cases and variants
# -*- coding: utf-8 -*- import logging import click from . import base_command logger = logging.getLogger(__name__) @base_command.command() @click.option('-c' ,'--case-id', help='Search for case' ) @click.pass_context def cases(ctx, case_id): """Display all cases in the database.""" adapter = ctx.obj['adapter'] if case_id: case = adapter.case(case_id) if case: click.echo(case) else: logger.info("Case {0} does not exist in database".format(case_id)) else: i = 0 for case in adapter.cases(): i += 1 click.echo(case) if i == 0: logger.info("No cases found in database") @base_command.command() @click.option('--variant-id', help='Search for a variant' ) @click.pass_context def variants(ctx, variant_id): """Display variants in the database.""" adapter = ctx.obj['adapter'] if variant_id: variant = adapter.get_variant({'_id':variant_id}) if variant: click.echo(variant) else: logger.info("Variant {0} does not exist in database".format(variant_id)) else: i = 0 for variant in adapter.get_variants(): i += 1 click.echo(variant) if i == 0: logger.info("No variants found in database")
<commit_before><commit_msg>Add commands for cases and variants<commit_after>
# -*- coding: utf-8 -*- import logging import click from . import base_command logger = logging.getLogger(__name__) @base_command.command() @click.option('-c' ,'--case-id', help='Search for case' ) @click.pass_context def cases(ctx, case_id): """Display all cases in the database.""" adapter = ctx.obj['adapter'] if case_id: case = adapter.case(case_id) if case: click.echo(case) else: logger.info("Case {0} does not exist in database".format(case_id)) else: i = 0 for case in adapter.cases(): i += 1 click.echo(case) if i == 0: logger.info("No cases found in database") @base_command.command() @click.option('--variant-id', help='Search for a variant' ) @click.pass_context def variants(ctx, variant_id): """Display variants in the database.""" adapter = ctx.obj['adapter'] if variant_id: variant = adapter.get_variant({'_id':variant_id}) if variant: click.echo(variant) else: logger.info("Variant {0} does not exist in database".format(variant_id)) else: i = 0 for variant in adapter.get_variants(): i += 1 click.echo(variant) if i == 0: logger.info("No variants found in database")
Add commands for cases and variants# -*- coding: utf-8 -*- import logging import click from . import base_command logger = logging.getLogger(__name__) @base_command.command() @click.option('-c' ,'--case-id', help='Search for case' ) @click.pass_context def cases(ctx, case_id): """Display all cases in the database.""" adapter = ctx.obj['adapter'] if case_id: case = adapter.case(case_id) if case: click.echo(case) else: logger.info("Case {0} does not exist in database".format(case_id)) else: i = 0 for case in adapter.cases(): i += 1 click.echo(case) if i == 0: logger.info("No cases found in database") @base_command.command() @click.option('--variant-id', help='Search for a variant' ) @click.pass_context def variants(ctx, variant_id): """Display variants in the database.""" adapter = ctx.obj['adapter'] if variant_id: variant = adapter.get_variant({'_id':variant_id}) if variant: click.echo(variant) else: logger.info("Variant {0} does not exist in database".format(variant_id)) else: i = 0 for variant in adapter.get_variants(): i += 1 click.echo(variant) if i == 0: logger.info("No variants found in database")
<commit_before><commit_msg>Add commands for cases and variants<commit_after># -*- coding: utf-8 -*- import logging import click from . import base_command logger = logging.getLogger(__name__) @base_command.command() @click.option('-c' ,'--case-id', help='Search for case' ) @click.pass_context def cases(ctx, case_id): """Display all cases in the database.""" adapter = ctx.obj['adapter'] if case_id: case = adapter.case(case_id) if case: click.echo(case) else: logger.info("Case {0} does not exist in database".format(case_id)) else: i = 0 for case in adapter.cases(): i += 1 click.echo(case) if i == 0: logger.info("No cases found in database") @base_command.command() @click.option('--variant-id', help='Search for a variant' ) @click.pass_context def variants(ctx, variant_id): """Display variants in the database.""" adapter = ctx.obj['adapter'] if variant_id: variant = adapter.get_variant({'_id':variant_id}) if variant: click.echo(variant) else: logger.info("Variant {0} does not exist in database".format(variant_id)) else: i = 0 for variant in adapter.get_variants(): i += 1 click.echo(variant) if i == 0: logger.info("No variants found in database")
4a30d30b82fbdccbb0f15ebb5c094b13ce791f7f
genderator/utils.py
genderator/utils.py
from unidecode import unidecode class Normalizer: def normalize(text): text = Normalizer.remove_extra_whitespaces(text) text = Normalizer.replace_hyphens(text) # text = Normalizer.remove_accent_marks(text) return text.lower() @staticmethod def replace_hyphens(text): return text.replace('-', ' ') @staticmethod def remove_extra_whitespaces(text): return ' '.join(text.strip().split()); @staticmethod def remove_accent_marks(text): return unidecode(text)
Add a utility class to normalize input
Add a utility class to normalize input
Python
mit
davidmogar/genderator
Add a utility class to normalize input
from unidecode import unidecode class Normalizer: def normalize(text): text = Normalizer.remove_extra_whitespaces(text) text = Normalizer.replace_hyphens(text) # text = Normalizer.remove_accent_marks(text) return text.lower() @staticmethod def replace_hyphens(text): return text.replace('-', ' ') @staticmethod def remove_extra_whitespaces(text): return ' '.join(text.strip().split()); @staticmethod def remove_accent_marks(text): return unidecode(text)
<commit_before><commit_msg>Add a utility class to normalize input<commit_after>
from unidecode import unidecode class Normalizer: def normalize(text): text = Normalizer.remove_extra_whitespaces(text) text = Normalizer.replace_hyphens(text) # text = Normalizer.remove_accent_marks(text) return text.lower() @staticmethod def replace_hyphens(text): return text.replace('-', ' ') @staticmethod def remove_extra_whitespaces(text): return ' '.join(text.strip().split()); @staticmethod def remove_accent_marks(text): return unidecode(text)
Add a utility class to normalize inputfrom unidecode import unidecode class Normalizer: def normalize(text): text = Normalizer.remove_extra_whitespaces(text) text = Normalizer.replace_hyphens(text) # text = Normalizer.remove_accent_marks(text) return text.lower() @staticmethod def replace_hyphens(text): return text.replace('-', ' ') @staticmethod def remove_extra_whitespaces(text): return ' '.join(text.strip().split()); @staticmethod def remove_accent_marks(text): return unidecode(text)
<commit_before><commit_msg>Add a utility class to normalize input<commit_after>from unidecode import unidecode class Normalizer: def normalize(text): text = Normalizer.remove_extra_whitespaces(text) text = Normalizer.replace_hyphens(text) # text = Normalizer.remove_accent_marks(text) return text.lower() @staticmethod def replace_hyphens(text): return text.replace('-', ' ') @staticmethod def remove_extra_whitespaces(text): return ' '.join(text.strip().split()); @staticmethod def remove_accent_marks(text): return unidecode(text)
23cf747a3ff24f75d3300547f4bfdecf10c4a325
scrapple/utils/config.py
scrapple/utils/config.py
""" scrapple.utils.config ~~~~~~~~~~~~~~~~~~~~~ Functions related to traversing the configuration file """ from __future__ import print_function def traverse_next(page, next, results): for link in page.extract_links(next['follow_link']): print("Loading page", link.url) r = results for attribute in next['scraping'].get('data'): if attribute['field'] != "": print("\nExtracting", attribute['field'], "attribute", sep=' ') r[attribute['field']] = link.extract_content(attribute['selector'], attribute['attr']) if not next['scraping'].get('next'): yield r else: for next2 in next['scraping'].get('next'): for result in traverse_next(link, next2, r): yield result
Add next traversal util function
Add next traversal util function
Python
mit
scrappleapp/scrapple,AlexMathew/scrapple,AlexMathew/scrapple,scrappleapp/scrapple,AlexMathew/scrapple
Add next traversal util function
""" scrapple.utils.config ~~~~~~~~~~~~~~~~~~~~~ Functions related to traversing the configuration file """ from __future__ import print_function def traverse_next(page, next, results): for link in page.extract_links(next['follow_link']): print("Loading page", link.url) r = results for attribute in next['scraping'].get('data'): if attribute['field'] != "": print("\nExtracting", attribute['field'], "attribute", sep=' ') r[attribute['field']] = link.extract_content(attribute['selector'], attribute['attr']) if not next['scraping'].get('next'): yield r else: for next2 in next['scraping'].get('next'): for result in traverse_next(link, next2, r): yield result
<commit_before><commit_msg>Add next traversal util function<commit_after>
""" scrapple.utils.config ~~~~~~~~~~~~~~~~~~~~~ Functions related to traversing the configuration file """ from __future__ import print_function def traverse_next(page, next, results): for link in page.extract_links(next['follow_link']): print("Loading page", link.url) r = results for attribute in next['scraping'].get('data'): if attribute['field'] != "": print("\nExtracting", attribute['field'], "attribute", sep=' ') r[attribute['field']] = link.extract_content(attribute['selector'], attribute['attr']) if not next['scraping'].get('next'): yield r else: for next2 in next['scraping'].get('next'): for result in traverse_next(link, next2, r): yield result
Add next traversal util function""" scrapple.utils.config ~~~~~~~~~~~~~~~~~~~~~ Functions related to traversing the configuration file """ from __future__ import print_function def traverse_next(page, next, results): for link in page.extract_links(next['follow_link']): print("Loading page", link.url) r = results for attribute in next['scraping'].get('data'): if attribute['field'] != "": print("\nExtracting", attribute['field'], "attribute", sep=' ') r[attribute['field']] = link.extract_content(attribute['selector'], attribute['attr']) if not next['scraping'].get('next'): yield r else: for next2 in next['scraping'].get('next'): for result in traverse_next(link, next2, r): yield result
<commit_before><commit_msg>Add next traversal util function<commit_after>""" scrapple.utils.config ~~~~~~~~~~~~~~~~~~~~~ Functions related to traversing the configuration file """ from __future__ import print_function def traverse_next(page, next, results): for link in page.extract_links(next['follow_link']): print("Loading page", link.url) r = results for attribute in next['scraping'].get('data'): if attribute['field'] != "": print("\nExtracting", attribute['field'], "attribute", sep=' ') r[attribute['field']] = link.extract_content(attribute['selector'], attribute['attr']) if not next['scraping'].get('next'): yield r else: for next2 in next['scraping'].get('next'): for result in traverse_next(link, next2, r): yield result
82b9a66ea826b4463d82c69ba1703eab213efe83
heat_integrationtests/functional/test_stack_outputs.py
heat_integrationtests/functional/test_stack_outputs.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat_integrationtests.functional import functional_base class StackOutputsTest(functional_base.FunctionalTestsBase): template = ''' heat_template_version: 2015-10-15 resources: test_resource_a: type: OS::Heat::TestResource properties: value: 'a' test_resource_b: type: OS::Heat::TestResource properties: value: 'b' outputs: resource_output_a: description: 'Output of resource a' value: { get_attr: [test_resource_a, output] } resource_output_b: description: 'Output of resource b' value: { get_attr: [test_resource_b, output] } ''' def test_outputs(self): stack_identifier = self.stack_create( template=self.template ) expected_list = [{u'output_key': u'resource_output_a', u'description': u'Output of resource a'}, {u'output_key': u'resource_output_b', u'description': u'Output of resource b'}] actual_list = self.client.stacks.output_list( stack_identifier)['outputs'] self.assertEqual(expected_list, actual_list) expected_output_a = { u'output_value': u'a', u'output_key': u'resource_output_a', u'description': u'Output of resource a'} expected_output_b = { u'output_value': u'b', u'output_key': u'resource_output_b', u'description': u'Output of resource b'} actual_output_a = self.client.stacks.output_show( stack_identifier, 'resource_output_a')['output'] actual_output_b = self.client.stacks.output_show( stack_identifier, 'resource_output_b')['output'] self.assertEqual(expected_output_a, actual_output_a) self.assertEqual(expected_output_b, actual_output_b)
Add test for stack outputs
Add test for stack outputs Add test for output list and output show Depends-On: I1bc1cee1c78ecf0c9a18ecc0a871d38e4141e0f7 Change-Id: I1ee226494b5b04ec2d43edf0c03ab31e452fedf0
Python
apache-2.0
cwolferh/heat-scratch,noironetworks/heat,jasondunsmore/heat,jasondunsmore/heat,noironetworks/heat,openstack/heat,steveb/heat,dims/heat,steveb/heat,dims/heat,openstack/heat,cwolferh/heat-scratch
Add test for stack outputs Add test for output list and output show Depends-On: I1bc1cee1c78ecf0c9a18ecc0a871d38e4141e0f7 Change-Id: I1ee226494b5b04ec2d43edf0c03ab31e452fedf0
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat_integrationtests.functional import functional_base class StackOutputsTest(functional_base.FunctionalTestsBase): template = ''' heat_template_version: 2015-10-15 resources: test_resource_a: type: OS::Heat::TestResource properties: value: 'a' test_resource_b: type: OS::Heat::TestResource properties: value: 'b' outputs: resource_output_a: description: 'Output of resource a' value: { get_attr: [test_resource_a, output] } resource_output_b: description: 'Output of resource b' value: { get_attr: [test_resource_b, output] } ''' def test_outputs(self): stack_identifier = self.stack_create( template=self.template ) expected_list = [{u'output_key': u'resource_output_a', u'description': u'Output of resource a'}, {u'output_key': u'resource_output_b', u'description': u'Output of resource b'}] actual_list = self.client.stacks.output_list( stack_identifier)['outputs'] self.assertEqual(expected_list, actual_list) expected_output_a = { u'output_value': u'a', u'output_key': u'resource_output_a', u'description': u'Output of resource a'} expected_output_b = { u'output_value': u'b', u'output_key': u'resource_output_b', u'description': u'Output of resource b'} actual_output_a = self.client.stacks.output_show( stack_identifier, 'resource_output_a')['output'] actual_output_b = self.client.stacks.output_show( stack_identifier, 'resource_output_b')['output'] self.assertEqual(expected_output_a, actual_output_a) self.assertEqual(expected_output_b, actual_output_b)
<commit_before><commit_msg>Add test for stack outputs Add test for output list and output show Depends-On: I1bc1cee1c78ecf0c9a18ecc0a871d38e4141e0f7 Change-Id: I1ee226494b5b04ec2d43edf0c03ab31e452fedf0<commit_after>
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat_integrationtests.functional import functional_base class StackOutputsTest(functional_base.FunctionalTestsBase): template = ''' heat_template_version: 2015-10-15 resources: test_resource_a: type: OS::Heat::TestResource properties: value: 'a' test_resource_b: type: OS::Heat::TestResource properties: value: 'b' outputs: resource_output_a: description: 'Output of resource a' value: { get_attr: [test_resource_a, output] } resource_output_b: description: 'Output of resource b' value: { get_attr: [test_resource_b, output] } ''' def test_outputs(self): stack_identifier = self.stack_create( template=self.template ) expected_list = [{u'output_key': u'resource_output_a', u'description': u'Output of resource a'}, {u'output_key': u'resource_output_b', u'description': u'Output of resource b'}] actual_list = self.client.stacks.output_list( stack_identifier)['outputs'] self.assertEqual(expected_list, actual_list) expected_output_a = { u'output_value': u'a', u'output_key': u'resource_output_a', u'description': u'Output of resource a'} expected_output_b = { u'output_value': u'b', u'output_key': u'resource_output_b', u'description': u'Output of resource b'} actual_output_a = self.client.stacks.output_show( stack_identifier, 'resource_output_a')['output'] actual_output_b = self.client.stacks.output_show( stack_identifier, 'resource_output_b')['output'] self.assertEqual(expected_output_a, actual_output_a) self.assertEqual(expected_output_b, actual_output_b)
Add test for stack outputs Add test for output list and output show Depends-On: I1bc1cee1c78ecf0c9a18ecc0a871d38e4141e0f7 Change-Id: I1ee226494b5b04ec2d43edf0c03ab31e452fedf0# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat_integrationtests.functional import functional_base class StackOutputsTest(functional_base.FunctionalTestsBase): template = ''' heat_template_version: 2015-10-15 resources: test_resource_a: type: OS::Heat::TestResource properties: value: 'a' test_resource_b: type: OS::Heat::TestResource properties: value: 'b' outputs: resource_output_a: description: 'Output of resource a' value: { get_attr: [test_resource_a, output] } resource_output_b: description: 'Output of resource b' value: { get_attr: [test_resource_b, output] } ''' def test_outputs(self): stack_identifier = self.stack_create( template=self.template ) expected_list = [{u'output_key': u'resource_output_a', u'description': u'Output of resource a'}, {u'output_key': u'resource_output_b', u'description': u'Output of resource b'}] actual_list = self.client.stacks.output_list( stack_identifier)['outputs'] self.assertEqual(expected_list, actual_list) expected_output_a = { u'output_value': u'a', u'output_key': u'resource_output_a', u'description': u'Output of resource a'} expected_output_b = { u'output_value': u'b', u'output_key': u'resource_output_b', u'description': u'Output of resource b'} actual_output_a = self.client.stacks.output_show( stack_identifier, 'resource_output_a')['output'] actual_output_b = self.client.stacks.output_show( stack_identifier, 'resource_output_b')['output'] self.assertEqual(expected_output_a, actual_output_a) self.assertEqual(expected_output_b, actual_output_b)
<commit_before><commit_msg>Add test for stack outputs Add test for output list and output show Depends-On: I1bc1cee1c78ecf0c9a18ecc0a871d38e4141e0f7 Change-Id: I1ee226494b5b04ec2d43edf0c03ab31e452fedf0<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat_integrationtests.functional import functional_base class StackOutputsTest(functional_base.FunctionalTestsBase): template = ''' heat_template_version: 2015-10-15 resources: test_resource_a: type: OS::Heat::TestResource properties: value: 'a' test_resource_b: type: OS::Heat::TestResource properties: value: 'b' outputs: resource_output_a: description: 'Output of resource a' value: { get_attr: [test_resource_a, output] } resource_output_b: description: 'Output of resource b' value: { get_attr: [test_resource_b, output] } ''' def test_outputs(self): stack_identifier = self.stack_create( template=self.template ) expected_list = [{u'output_key': u'resource_output_a', u'description': u'Output of resource a'}, {u'output_key': u'resource_output_b', u'description': u'Output of resource b'}] actual_list = self.client.stacks.output_list( stack_identifier)['outputs'] self.assertEqual(expected_list, actual_list) expected_output_a = { u'output_value': u'a', u'output_key': u'resource_output_a', u'description': u'Output of resource a'} expected_output_b = { u'output_value': u'b', u'output_key': u'resource_output_b', u'description': u'Output of resource b'} actual_output_a = self.client.stacks.output_show( stack_identifier, 'resource_output_a')['output'] actual_output_b = self.client.stacks.output_show( stack_identifier, 'resource_output_b')['output'] self.assertEqual(expected_output_a, actual_output_a) self.assertEqual(expected_output_b, actual_output_b)
b7bf4586fea207453225a87fb85df59ccfc94e80
jarbas/core/migrations/0032_auto_20170613_0641.py
jarbas/core/migrations/0032_auto_20170613_0641.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-06-13 09:41 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0031_add_index_together_for_reimbursement'), ] operations = [ migrations.AlterField( model_name='historicalreimbursement', name='history_type', field=models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1), ), ]
Add missing migration related to django-simple-history update
Add missing migration related to django-simple-history update
Python
mit
datasciencebr/jarbas,datasciencebr/jarbas,marcusrehm/serenata-de-amor,marcusrehm/serenata-de-amor,datasciencebr/jarbas,datasciencebr/serenata-de-amor,datasciencebr/serenata-de-amor,marcusrehm/serenata-de-amor,datasciencebr/jarbas,marcusrehm/serenata-de-amor
Add missing migration related to django-simple-history update
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-06-13 09:41 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0031_add_index_together_for_reimbursement'), ] operations = [ migrations.AlterField( model_name='historicalreimbursement', name='history_type', field=models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1), ), ]
<commit_before><commit_msg>Add missing migration related to django-simple-history update<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-06-13 09:41 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0031_add_index_together_for_reimbursement'), ] operations = [ migrations.AlterField( model_name='historicalreimbursement', name='history_type', field=models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1), ), ]
Add missing migration related to django-simple-history update# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-06-13 09:41 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0031_add_index_together_for_reimbursement'), ] operations = [ migrations.AlterField( model_name='historicalreimbursement', name='history_type', field=models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1), ), ]
<commit_before><commit_msg>Add missing migration related to django-simple-history update<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-06-13 09:41 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0031_add_index_together_for_reimbursement'), ] operations = [ migrations.AlterField( model_name='historicalreimbursement', name='history_type', field=models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1), ), ]
2cd1da31b099cbf37552b2a049c3df6619e0e64f
rma/redis_types.py
rma/redis_types.py
REDIS_ENCODING_ID_RAW = 0 REDIS_ENCODING_ID_INT = 1 REDIS_ENCODING_ID_EMBSTR = 2 REDIS_ENCODING_ID_HASHTABLE = 3 REDIS_ENCODING_ID_ZIPLIST = 4 REDIS_ENCODING_ID_LINKEDLIST = 5 REDIS_ENCODING_ID_QUICKLIST =6 REDIS_ENCODING_ID_INTSET = 7 REDIS_ENCODING_ID_SKIPLIST = 8 REDIS_ENCODING_STR_TO_ID_LIB = { b'raw': REDIS_ENCODING_ID_RAW, b'int': REDIS_ENCODING_ID_INT, b'embstr': REDIS_ENCODING_ID_EMBSTR, b'hashtable': REDIS_ENCODING_ID_HASHTABLE, b'ziplist': REDIS_ENCODING_ID_ZIPLIST, b'linkedlist': REDIS_ENCODING_ID_LINKEDLIST, b'quicklist': REDIS_ENCODING_ID_QUICKLIST, b'intset': REDIS_ENCODING_ID_INTSET, b'skiplist': REDIS_ENCODING_ID_SKIPLIST, } REDIS_ENCODING_ID_TO_STR_LIB = dict((v, k) for k, v in REDIS_ENCODING_STR_TO_ID_LIB.items()) def redis_encoding_str_to_id(key_encoding): if key_encoding in REDIS_ENCODING_STR_TO_ID_LIB: return REDIS_ENCODING_STR_TO_ID_LIB[key_encoding] raise ValueError("Invalid encoding `%s` given" % key_encoding) def redis_encoding_id_to_str(key_encoding): if key_encoding in REDIS_ENCODING_ID_TO_STR_LIB: return REDIS_ENCODING_ID_TO_STR_LIB[key_encoding].decode('utf8') raise ValueError("Invalid encoding `%s` given" % key_encoding)
Add helper enums for type encodings
Add helper enums for type encodings
Python
mit
gamenet/redis-memory-analyzer
Add helper enums for type encodings
REDIS_ENCODING_ID_RAW = 0 REDIS_ENCODING_ID_INT = 1 REDIS_ENCODING_ID_EMBSTR = 2 REDIS_ENCODING_ID_HASHTABLE = 3 REDIS_ENCODING_ID_ZIPLIST = 4 REDIS_ENCODING_ID_LINKEDLIST = 5 REDIS_ENCODING_ID_QUICKLIST =6 REDIS_ENCODING_ID_INTSET = 7 REDIS_ENCODING_ID_SKIPLIST = 8 REDIS_ENCODING_STR_TO_ID_LIB = { b'raw': REDIS_ENCODING_ID_RAW, b'int': REDIS_ENCODING_ID_INT, b'embstr': REDIS_ENCODING_ID_EMBSTR, b'hashtable': REDIS_ENCODING_ID_HASHTABLE, b'ziplist': REDIS_ENCODING_ID_ZIPLIST, b'linkedlist': REDIS_ENCODING_ID_LINKEDLIST, b'quicklist': REDIS_ENCODING_ID_QUICKLIST, b'intset': REDIS_ENCODING_ID_INTSET, b'skiplist': REDIS_ENCODING_ID_SKIPLIST, } REDIS_ENCODING_ID_TO_STR_LIB = dict((v, k) for k, v in REDIS_ENCODING_STR_TO_ID_LIB.items()) def redis_encoding_str_to_id(key_encoding): if key_encoding in REDIS_ENCODING_STR_TO_ID_LIB: return REDIS_ENCODING_STR_TO_ID_LIB[key_encoding] raise ValueError("Invalid encoding `%s` given" % key_encoding) def redis_encoding_id_to_str(key_encoding): if key_encoding in REDIS_ENCODING_ID_TO_STR_LIB: return REDIS_ENCODING_ID_TO_STR_LIB[key_encoding].decode('utf8') raise ValueError("Invalid encoding `%s` given" % key_encoding)
<commit_before><commit_msg>Add helper enums for type encodings<commit_after>
REDIS_ENCODING_ID_RAW = 0 REDIS_ENCODING_ID_INT = 1 REDIS_ENCODING_ID_EMBSTR = 2 REDIS_ENCODING_ID_HASHTABLE = 3 REDIS_ENCODING_ID_ZIPLIST = 4 REDIS_ENCODING_ID_LINKEDLIST = 5 REDIS_ENCODING_ID_QUICKLIST =6 REDIS_ENCODING_ID_INTSET = 7 REDIS_ENCODING_ID_SKIPLIST = 8 REDIS_ENCODING_STR_TO_ID_LIB = { b'raw': REDIS_ENCODING_ID_RAW, b'int': REDIS_ENCODING_ID_INT, b'embstr': REDIS_ENCODING_ID_EMBSTR, b'hashtable': REDIS_ENCODING_ID_HASHTABLE, b'ziplist': REDIS_ENCODING_ID_ZIPLIST, b'linkedlist': REDIS_ENCODING_ID_LINKEDLIST, b'quicklist': REDIS_ENCODING_ID_QUICKLIST, b'intset': REDIS_ENCODING_ID_INTSET, b'skiplist': REDIS_ENCODING_ID_SKIPLIST, } REDIS_ENCODING_ID_TO_STR_LIB = dict((v, k) for k, v in REDIS_ENCODING_STR_TO_ID_LIB.items()) def redis_encoding_str_to_id(key_encoding): if key_encoding in REDIS_ENCODING_STR_TO_ID_LIB: return REDIS_ENCODING_STR_TO_ID_LIB[key_encoding] raise ValueError("Invalid encoding `%s` given" % key_encoding) def redis_encoding_id_to_str(key_encoding): if key_encoding in REDIS_ENCODING_ID_TO_STR_LIB: return REDIS_ENCODING_ID_TO_STR_LIB[key_encoding].decode('utf8') raise ValueError("Invalid encoding `%s` given" % key_encoding)
Add helper enums for type encodingsREDIS_ENCODING_ID_RAW = 0 REDIS_ENCODING_ID_INT = 1 REDIS_ENCODING_ID_EMBSTR = 2 REDIS_ENCODING_ID_HASHTABLE = 3 REDIS_ENCODING_ID_ZIPLIST = 4 REDIS_ENCODING_ID_LINKEDLIST = 5 REDIS_ENCODING_ID_QUICKLIST =6 REDIS_ENCODING_ID_INTSET = 7 REDIS_ENCODING_ID_SKIPLIST = 8 REDIS_ENCODING_STR_TO_ID_LIB = { b'raw': REDIS_ENCODING_ID_RAW, b'int': REDIS_ENCODING_ID_INT, b'embstr': REDIS_ENCODING_ID_EMBSTR, b'hashtable': REDIS_ENCODING_ID_HASHTABLE, b'ziplist': REDIS_ENCODING_ID_ZIPLIST, b'linkedlist': REDIS_ENCODING_ID_LINKEDLIST, b'quicklist': REDIS_ENCODING_ID_QUICKLIST, b'intset': REDIS_ENCODING_ID_INTSET, b'skiplist': REDIS_ENCODING_ID_SKIPLIST, } REDIS_ENCODING_ID_TO_STR_LIB = dict((v, k) for k, v in REDIS_ENCODING_STR_TO_ID_LIB.items()) def redis_encoding_str_to_id(key_encoding): if key_encoding in REDIS_ENCODING_STR_TO_ID_LIB: return REDIS_ENCODING_STR_TO_ID_LIB[key_encoding] raise ValueError("Invalid encoding `%s` given" % key_encoding) def redis_encoding_id_to_str(key_encoding): if key_encoding in REDIS_ENCODING_ID_TO_STR_LIB: return REDIS_ENCODING_ID_TO_STR_LIB[key_encoding].decode('utf8') raise ValueError("Invalid encoding `%s` given" % key_encoding)
<commit_before><commit_msg>Add helper enums for type encodings<commit_after>REDIS_ENCODING_ID_RAW = 0 REDIS_ENCODING_ID_INT = 1 REDIS_ENCODING_ID_EMBSTR = 2 REDIS_ENCODING_ID_HASHTABLE = 3 REDIS_ENCODING_ID_ZIPLIST = 4 REDIS_ENCODING_ID_LINKEDLIST = 5 REDIS_ENCODING_ID_QUICKLIST =6 REDIS_ENCODING_ID_INTSET = 7 REDIS_ENCODING_ID_SKIPLIST = 8 REDIS_ENCODING_STR_TO_ID_LIB = { b'raw': REDIS_ENCODING_ID_RAW, b'int': REDIS_ENCODING_ID_INT, b'embstr': REDIS_ENCODING_ID_EMBSTR, b'hashtable': REDIS_ENCODING_ID_HASHTABLE, b'ziplist': REDIS_ENCODING_ID_ZIPLIST, b'linkedlist': REDIS_ENCODING_ID_LINKEDLIST, b'quicklist': REDIS_ENCODING_ID_QUICKLIST, b'intset': REDIS_ENCODING_ID_INTSET, b'skiplist': REDIS_ENCODING_ID_SKIPLIST, } REDIS_ENCODING_ID_TO_STR_LIB = dict((v, k) for k, v in REDIS_ENCODING_STR_TO_ID_LIB.items()) def redis_encoding_str_to_id(key_encoding): if key_encoding in REDIS_ENCODING_STR_TO_ID_LIB: return REDIS_ENCODING_STR_TO_ID_LIB[key_encoding] raise ValueError("Invalid encoding `%s` given" % key_encoding) def redis_encoding_id_to_str(key_encoding): if key_encoding in REDIS_ENCODING_ID_TO_STR_LIB: return REDIS_ENCODING_ID_TO_STR_LIB[key_encoding].decode('utf8') raise ValueError("Invalid encoding `%s` given" % key_encoding)
6ae82ecdd749b936289b496a10faa2caf1aa94c6
bibsort.py
bibsort.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import re from collections import OrderedDict import codecs class BibEntry: def __init__(self, **kwargs): self.data = {} for key, value in kwargs.iteritems(): self.data[key] = value def entry(self): data = OrderedDict(sorted(self.data.items(), key=lambda t: t[0])) result = u'@{0}{{{1},\n'.format(self.data['type'].upper(), self.data['key']) for key, value in data.items(): if key in ['type','key']: continue result += u'\t{0} = {{{1}}},\n'.format(key, value) result = result[:-2] + u'\n}\n' return result def must_omit(i): return re.match("comment", i) or re.match("%%", i) def entries_from_file(file): keywords = ['address', 'annote', 'author', 'booktitle', 'chapter', 'crossref', 'doi', 'edition', 'editor', 'eprint', 'eprintclass', 'eprinttype', 'howpublished', 'institution', 'journal', 'month', 'note', 'number', 'organization', 'pages', 'publisher', 'school', 'series', 'title', 'type', 'url', 'urldate', 'volume', 'year'] with codecs.open(file, "r", "utf-8") as f: text = f.read() entries = [] entry_blocks = [i for i in re.split("\n@", text) if not must_omit(i)] for entry in entry_blocks: entry_dict = {} search = re.match("(?P<type>.*){(?P<key>.*)", entry) if search: key = search.group("key")[:-1] if search.group("type").startswith('@'): type = search.group("type")[1:] else: type = search.group("type") entry_dict["key"] = key entry_dict["type"] = type for keyword in keywords: string = "\s*"+keyword+"\s*=\s*[{]?(?P<"+keyword+">\S.*),?\n" search = re.search(string, entry) if search: # Prohibits that 'eprinttype' overrides 'type' if keyword in entry_dict.keys(): continue value = search.group(keyword) if value.endswith(','): value = value[:-1] if value.endswith('}}'): value = value[:-1] if value.endswith('}') and not value.startswith('{'): value = value[:-1] entry_dict[keyword] = value if entry_dict != {}: entries.append(BibEntry(**entry_dict)) return entries BibEntries = entries_from_file('bibliography.bib') BibEntries.sort(key=lambda x: x.data['key'].lower()) for _ in BibEntries: print _.entry()
Add first version of the code
Add first version of the code
Python
mit
derherrg/pybibsort
Add first version of the code
#!/usr/bin/env python # -*- coding: utf-8 -*- import re from collections import OrderedDict import codecs class BibEntry: def __init__(self, **kwargs): self.data = {} for key, value in kwargs.iteritems(): self.data[key] = value def entry(self): data = OrderedDict(sorted(self.data.items(), key=lambda t: t[0])) result = u'@{0}{{{1},\n'.format(self.data['type'].upper(), self.data['key']) for key, value in data.items(): if key in ['type','key']: continue result += u'\t{0} = {{{1}}},\n'.format(key, value) result = result[:-2] + u'\n}\n' return result def must_omit(i): return re.match("comment", i) or re.match("%%", i) def entries_from_file(file): keywords = ['address', 'annote', 'author', 'booktitle', 'chapter', 'crossref', 'doi', 'edition', 'editor', 'eprint', 'eprintclass', 'eprinttype', 'howpublished', 'institution', 'journal', 'month', 'note', 'number', 'organization', 'pages', 'publisher', 'school', 'series', 'title', 'type', 'url', 'urldate', 'volume', 'year'] with codecs.open(file, "r", "utf-8") as f: text = f.read() entries = [] entry_blocks = [i for i in re.split("\n@", text) if not must_omit(i)] for entry in entry_blocks: entry_dict = {} search = re.match("(?P<type>.*){(?P<key>.*)", entry) if search: key = search.group("key")[:-1] if search.group("type").startswith('@'): type = search.group("type")[1:] else: type = search.group("type") entry_dict["key"] = key entry_dict["type"] = type for keyword in keywords: string = "\s*"+keyword+"\s*=\s*[{]?(?P<"+keyword+">\S.*),?\n" search = re.search(string, entry) if search: # Prohibits that 'eprinttype' overrides 'type' if keyword in entry_dict.keys(): continue value = search.group(keyword) if value.endswith(','): value = value[:-1] if value.endswith('}}'): value = value[:-1] if value.endswith('}') and not value.startswith('{'): value = value[:-1] entry_dict[keyword] = value if entry_dict != {}: entries.append(BibEntry(**entry_dict)) return entries BibEntries = entries_from_file('bibliography.bib') BibEntries.sort(key=lambda x: x.data['key'].lower()) for _ in BibEntries: print _.entry()
<commit_before><commit_msg>Add first version of the code<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- import re from collections import OrderedDict import codecs class BibEntry: def __init__(self, **kwargs): self.data = {} for key, value in kwargs.iteritems(): self.data[key] = value def entry(self): data = OrderedDict(sorted(self.data.items(), key=lambda t: t[0])) result = u'@{0}{{{1},\n'.format(self.data['type'].upper(), self.data['key']) for key, value in data.items(): if key in ['type','key']: continue result += u'\t{0} = {{{1}}},\n'.format(key, value) result = result[:-2] + u'\n}\n' return result def must_omit(i): return re.match("comment", i) or re.match("%%", i) def entries_from_file(file): keywords = ['address', 'annote', 'author', 'booktitle', 'chapter', 'crossref', 'doi', 'edition', 'editor', 'eprint', 'eprintclass', 'eprinttype', 'howpublished', 'institution', 'journal', 'month', 'note', 'number', 'organization', 'pages', 'publisher', 'school', 'series', 'title', 'type', 'url', 'urldate', 'volume', 'year'] with codecs.open(file, "r", "utf-8") as f: text = f.read() entries = [] entry_blocks = [i for i in re.split("\n@", text) if not must_omit(i)] for entry in entry_blocks: entry_dict = {} search = re.match("(?P<type>.*){(?P<key>.*)", entry) if search: key = search.group("key")[:-1] if search.group("type").startswith('@'): type = search.group("type")[1:] else: type = search.group("type") entry_dict["key"] = key entry_dict["type"] = type for keyword in keywords: string = "\s*"+keyword+"\s*=\s*[{]?(?P<"+keyword+">\S.*),?\n" search = re.search(string, entry) if search: # Prohibits that 'eprinttype' overrides 'type' if keyword in entry_dict.keys(): continue value = search.group(keyword) if value.endswith(','): value = value[:-1] if value.endswith('}}'): value = value[:-1] if value.endswith('}') and not value.startswith('{'): value = value[:-1] entry_dict[keyword] = value if entry_dict != {}: entries.append(BibEntry(**entry_dict)) return entries BibEntries = entries_from_file('bibliography.bib') BibEntries.sort(key=lambda x: x.data['key'].lower()) for _ in BibEntries: print _.entry()
Add first version of the code#!/usr/bin/env python # -*- coding: utf-8 -*- import re from collections import OrderedDict import codecs class BibEntry: def __init__(self, **kwargs): self.data = {} for key, value in kwargs.iteritems(): self.data[key] = value def entry(self): data = OrderedDict(sorted(self.data.items(), key=lambda t: t[0])) result = u'@{0}{{{1},\n'.format(self.data['type'].upper(), self.data['key']) for key, value in data.items(): if key in ['type','key']: continue result += u'\t{0} = {{{1}}},\n'.format(key, value) result = result[:-2] + u'\n}\n' return result def must_omit(i): return re.match("comment", i) or re.match("%%", i) def entries_from_file(file): keywords = ['address', 'annote', 'author', 'booktitle', 'chapter', 'crossref', 'doi', 'edition', 'editor', 'eprint', 'eprintclass', 'eprinttype', 'howpublished', 'institution', 'journal', 'month', 'note', 'number', 'organization', 'pages', 'publisher', 'school', 'series', 'title', 'type', 'url', 'urldate', 'volume', 'year'] with codecs.open(file, "r", "utf-8") as f: text = f.read() entries = [] entry_blocks = [i for i in re.split("\n@", text) if not must_omit(i)] for entry in entry_blocks: entry_dict = {} search = re.match("(?P<type>.*){(?P<key>.*)", entry) if search: key = search.group("key")[:-1] if search.group("type").startswith('@'): type = search.group("type")[1:] else: type = search.group("type") entry_dict["key"] = key entry_dict["type"] = type for keyword in keywords: string = "\s*"+keyword+"\s*=\s*[{]?(?P<"+keyword+">\S.*),?\n" search = re.search(string, entry) if search: # Prohibits that 'eprinttype' overrides 'type' if keyword in entry_dict.keys(): continue value = search.group(keyword) if value.endswith(','): value = value[:-1] if value.endswith('}}'): value = value[:-1] if value.endswith('}') and not value.startswith('{'): value = value[:-1] entry_dict[keyword] = value if entry_dict != {}: entries.append(BibEntry(**entry_dict)) return entries BibEntries = entries_from_file('bibliography.bib') BibEntries.sort(key=lambda x: x.data['key'].lower()) for _ in BibEntries: print _.entry()
<commit_before><commit_msg>Add first version of the code<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- import re from collections import OrderedDict import codecs class BibEntry: def __init__(self, **kwargs): self.data = {} for key, value in kwargs.iteritems(): self.data[key] = value def entry(self): data = OrderedDict(sorted(self.data.items(), key=lambda t: t[0])) result = u'@{0}{{{1},\n'.format(self.data['type'].upper(), self.data['key']) for key, value in data.items(): if key in ['type','key']: continue result += u'\t{0} = {{{1}}},\n'.format(key, value) result = result[:-2] + u'\n}\n' return result def must_omit(i): return re.match("comment", i) or re.match("%%", i) def entries_from_file(file): keywords = ['address', 'annote', 'author', 'booktitle', 'chapter', 'crossref', 'doi', 'edition', 'editor', 'eprint', 'eprintclass', 'eprinttype', 'howpublished', 'institution', 'journal', 'month', 'note', 'number', 'organization', 'pages', 'publisher', 'school', 'series', 'title', 'type', 'url', 'urldate', 'volume', 'year'] with codecs.open(file, "r", "utf-8") as f: text = f.read() entries = [] entry_blocks = [i for i in re.split("\n@", text) if not must_omit(i)] for entry in entry_blocks: entry_dict = {} search = re.match("(?P<type>.*){(?P<key>.*)", entry) if search: key = search.group("key")[:-1] if search.group("type").startswith('@'): type = search.group("type")[1:] else: type = search.group("type") entry_dict["key"] = key entry_dict["type"] = type for keyword in keywords: string = "\s*"+keyword+"\s*=\s*[{]?(?P<"+keyword+">\S.*),?\n" search = re.search(string, entry) if search: # Prohibits that 'eprinttype' overrides 'type' if keyword in entry_dict.keys(): continue value = search.group(keyword) if value.endswith(','): value = value[:-1] if value.endswith('}}'): value = value[:-1] if value.endswith('}') and not value.startswith('{'): value = value[:-1] entry_dict[keyword] = value if entry_dict != {}: entries.append(BibEntry(**entry_dict)) return entries BibEntries = entries_from_file('bibliography.bib') BibEntries.sort(key=lambda x: x.data['key'].lower()) for _ in BibEntries: print _.entry()
a086e7328ca920f269812a87be095ce638467f95
crawler/youtube_dl_op_sample.py
crawler/youtube_dl_op_sample.py
#!/usr/bin/env python2 #-*- coding: utf-8 -*- import sys import youtube_dl def main(): if len(sys.argv) < 2: print("Usage: youtube_dl_op_sample.py URL") return opts = { 'forceurl': True, 'quiet': True, 'simulate': True, } url = sys.argv[1] try: with youtube_dl.YoutubeDL(opts) as ydl: extract_info = ydl.extract_info(url) resource_uri = extract_info.get('url') if not resource_uri: format_id = extract_info.get('format_id') for fmt in extract_info.get('formats'): if format_id != fmt.get('format_id'): continue resource_uri = fmt.get('url') except Exception as e: print(e) resource_uri = None if resource_uri: print("resource_uri: %s" % resource_uri) else: print("Nothing at all.") if __name__ == '__main__': main()
Add youtube-dl library sample of operation
[Crawler] Add youtube-dl library sample of operation
Python
mit
daineseh/python_code
[Crawler] Add youtube-dl library sample of operation
#!/usr/bin/env python2 #-*- coding: utf-8 -*- import sys import youtube_dl def main(): if len(sys.argv) < 2: print("Usage: youtube_dl_op_sample.py URL") return opts = { 'forceurl': True, 'quiet': True, 'simulate': True, } url = sys.argv[1] try: with youtube_dl.YoutubeDL(opts) as ydl: extract_info = ydl.extract_info(url) resource_uri = extract_info.get('url') if not resource_uri: format_id = extract_info.get('format_id') for fmt in extract_info.get('formats'): if format_id != fmt.get('format_id'): continue resource_uri = fmt.get('url') except Exception as e: print(e) resource_uri = None if resource_uri: print("resource_uri: %s" % resource_uri) else: print("Nothing at all.") if __name__ == '__main__': main()
<commit_before><commit_msg>[Crawler] Add youtube-dl library sample of operation<commit_after>
#!/usr/bin/env python2 #-*- coding: utf-8 -*- import sys import youtube_dl def main(): if len(sys.argv) < 2: print("Usage: youtube_dl_op_sample.py URL") return opts = { 'forceurl': True, 'quiet': True, 'simulate': True, } url = sys.argv[1] try: with youtube_dl.YoutubeDL(opts) as ydl: extract_info = ydl.extract_info(url) resource_uri = extract_info.get('url') if not resource_uri: format_id = extract_info.get('format_id') for fmt in extract_info.get('formats'): if format_id != fmt.get('format_id'): continue resource_uri = fmt.get('url') except Exception as e: print(e) resource_uri = None if resource_uri: print("resource_uri: %s" % resource_uri) else: print("Nothing at all.") if __name__ == '__main__': main()
[Crawler] Add youtube-dl library sample of operation#!/usr/bin/env python2 #-*- coding: utf-8 -*- import sys import youtube_dl def main(): if len(sys.argv) < 2: print("Usage: youtube_dl_op_sample.py URL") return opts = { 'forceurl': True, 'quiet': True, 'simulate': True, } url = sys.argv[1] try: with youtube_dl.YoutubeDL(opts) as ydl: extract_info = ydl.extract_info(url) resource_uri = extract_info.get('url') if not resource_uri: format_id = extract_info.get('format_id') for fmt in extract_info.get('formats'): if format_id != fmt.get('format_id'): continue resource_uri = fmt.get('url') except Exception as e: print(e) resource_uri = None if resource_uri: print("resource_uri: %s" % resource_uri) else: print("Nothing at all.") if __name__ == '__main__': main()
<commit_before><commit_msg>[Crawler] Add youtube-dl library sample of operation<commit_after>#!/usr/bin/env python2 #-*- coding: utf-8 -*- import sys import youtube_dl def main(): if len(sys.argv) < 2: print("Usage: youtube_dl_op_sample.py URL") return opts = { 'forceurl': True, 'quiet': True, 'simulate': True, } url = sys.argv[1] try: with youtube_dl.YoutubeDL(opts) as ydl: extract_info = ydl.extract_info(url) resource_uri = extract_info.get('url') if not resource_uri: format_id = extract_info.get('format_id') for fmt in extract_info.get('formats'): if format_id != fmt.get('format_id'): continue resource_uri = fmt.get('url') except Exception as e: print(e) resource_uri = None if resource_uri: print("resource_uri: %s" % resource_uri) else: print("Nothing at all.") if __name__ == '__main__': main()
1058a9cb6e667c850f56b6003038496b77c359c5
website/tools/append_index_html_to_internal_links.py
website/tools/append_index_html_to_internal_links.py
"""Script to fix the links in the staged website. Finds all internal links which do not have index.html at the end and appends index.html in the appropriate place (preserving anchors, etc). Usage: From root directory, after running the jekyll build, execute 'python tools/append_index_html_to_internal_links.py'. Dependencies: beautifulsoup4 Installable via pip as 'sudo pip install beautifulsoup4' or apt via 'sudo apt-get install python-beautifulsoup4'. """ import fnmatch import os import re from bs4 import BeautifulSoup # Original link match. Matches any string which starts with '/' and doesn't # have a file extension. linkMatch = r'^\/(.*\.(?!([^\/]+)$))?[^.]*$' # Regex which matches strings of type /internal/link/#anchor. Breaks into two # groups for ease of inserting 'index.html'. anchorMatch1 = r'(.+\/)(#[^\/]+$)' # Regex which matches strings of type /internal/link#anchor. Breaks into two # groups for ease of inserting 'index.html'. anchorMatch2 = r'(.+\/[a-zA-Z0-9]+)(#[^\/]+$)' matches = [] # Recursively walk content directory and find all html files. for root, dirnames, filenames in os.walk('content'): for filename in fnmatch.filter(filenames, '*.html'): # Javadoc does not have the index.html problem, so omit it. if 'javadoc' not in root: matches.append(os.path.join(root, filename)) print 'Matches: ' + str(len(matches)) # Iterates over each matched file looking for link matches. for match in matches: print 'Fixing links in: ' + match mf = open(match) soup = BeautifulSoup(mf, "lxml") # Iterates over every <a> for a in soup.findAll('a'): try: hr = a['href'] if re.match(linkMatch, hr) is not None: if hr.endswith('/'): # /internal/link/ a['href'] = hr + 'index.html' elif re.match(anchorMatch1, hr) is not None: # /internal/link/#anchor mat = re.match(anchorMatch1, hr) a['href'] = mat.group(1) + 'index.html' + mat.group(2) elif re.match(anchorMatch2, hr) is not None: # /internal/link#anchor mat = re.match(anchorMatch2, hr) a['href'] = mat.group(1) + '/index.html' + mat.group(2) else: # /internal/link a['href'] = hr + '/index.html' mf.close() html = soup.prettify("utf-8") # Write back to the file. with open(match, "wb") as f: print 'Replacing ' + hr + ' with: ' + a['href'] f.write(html) except KeyError as e: # Some <a> tags don't have an href. continue
Add tool to fix links.
Add tool to fix links. Signed-off-by: Jason Kuster <68c46a606457643eab92053c1c05574abb26f861@google.com>
Python
apache-2.0
rangadi/beam,robertwb/incubator-beam,lukecwik/incubator-beam,lukecwik/incubator-beam,markflyhigh/incubator-beam,chamikaramj/beam,chamikaramj/beam,RyanSkraba/beam,rangadi/incubator-beam,lukecwik/incubator-beam,robertwb/incubator-beam,charlesccychen/incubator-beam,chamikaramj/beam,rangadi/beam,charlesccychen/beam,markflyhigh/incubator-beam,rangadi/beam,rangadi/beam,lukecwik/incubator-beam,chamikaramj/beam,charlesccychen/beam,apache/beam,lukecwik/incubator-beam,markflyhigh/incubator-beam,apache/beam,RyanSkraba/beam,robertwb/incubator-beam,RyanSkraba/beam,chamikaramj/beam,markflyhigh/incubator-beam,rangadi/incubator-beam,apache/beam,robertwb/incubator-beam,apache/beam,iemejia/incubator-beam,RyanSkraba/beam,chamikaramj/beam,iemejia/incubator-beam,apache/beam,mxm/incubator-beam,chamikaramj/beam,charlesccychen/beam,lukecwik/incubator-beam,robertwb/incubator-beam,robertwb/incubator-beam,apache/beam,lukecwik/incubator-beam,rangadi/beam,charlesccychen/beam,markflyhigh/incubator-beam,charlesccychen/incubator-beam,apache/beam,markflyhigh/incubator-beam,markflyhigh/incubator-beam,robertwb/incubator-beam,apache/beam,charlesccychen/beam,mxm/incubator-beam,charlesccychen/beam,lukecwik/incubator-beam,rangadi/incubator-beam,robertwb/incubator-beam,charlesccychen/incubator-beam,RyanSkraba/beam,charlesccychen/beam,robertwb/incubator-beam,chamikaramj/beam,chamikaramj/beam,chamikaramj/beam,rangadi/beam,robertwb/incubator-beam,apache/beam,apache/beam,lukecwik/incubator-beam,RyanSkraba/beam,rangadi/beam,lukecwik/incubator-beam,apache/beam,RyanSkraba/beam
Add tool to fix links. Signed-off-by: Jason Kuster <68c46a606457643eab92053c1c05574abb26f861@google.com>
"""Script to fix the links in the staged website. Finds all internal links which do not have index.html at the end and appends index.html in the appropriate place (preserving anchors, etc). Usage: From root directory, after running the jekyll build, execute 'python tools/append_index_html_to_internal_links.py'. Dependencies: beautifulsoup4 Installable via pip as 'sudo pip install beautifulsoup4' or apt via 'sudo apt-get install python-beautifulsoup4'. """ import fnmatch import os import re from bs4 import BeautifulSoup # Original link match. Matches any string which starts with '/' and doesn't # have a file extension. linkMatch = r'^\/(.*\.(?!([^\/]+)$))?[^.]*$' # Regex which matches strings of type /internal/link/#anchor. Breaks into two # groups for ease of inserting 'index.html'. anchorMatch1 = r'(.+\/)(#[^\/]+$)' # Regex which matches strings of type /internal/link#anchor. Breaks into two # groups for ease of inserting 'index.html'. anchorMatch2 = r'(.+\/[a-zA-Z0-9]+)(#[^\/]+$)' matches = [] # Recursively walk content directory and find all html files. for root, dirnames, filenames in os.walk('content'): for filename in fnmatch.filter(filenames, '*.html'): # Javadoc does not have the index.html problem, so omit it. if 'javadoc' not in root: matches.append(os.path.join(root, filename)) print 'Matches: ' + str(len(matches)) # Iterates over each matched file looking for link matches. for match in matches: print 'Fixing links in: ' + match mf = open(match) soup = BeautifulSoup(mf, "lxml") # Iterates over every <a> for a in soup.findAll('a'): try: hr = a['href'] if re.match(linkMatch, hr) is not None: if hr.endswith('/'): # /internal/link/ a['href'] = hr + 'index.html' elif re.match(anchorMatch1, hr) is not None: # /internal/link/#anchor mat = re.match(anchorMatch1, hr) a['href'] = mat.group(1) + 'index.html' + mat.group(2) elif re.match(anchorMatch2, hr) is not None: # /internal/link#anchor mat = re.match(anchorMatch2, hr) a['href'] = mat.group(1) + '/index.html' + mat.group(2) else: # /internal/link a['href'] = hr + '/index.html' mf.close() html = soup.prettify("utf-8") # Write back to the file. with open(match, "wb") as f: print 'Replacing ' + hr + ' with: ' + a['href'] f.write(html) except KeyError as e: # Some <a> tags don't have an href. continue
<commit_before><commit_msg>Add tool to fix links. Signed-off-by: Jason Kuster <68c46a606457643eab92053c1c05574abb26f861@google.com><commit_after>
"""Script to fix the links in the staged website. Finds all internal links which do not have index.html at the end and appends index.html in the appropriate place (preserving anchors, etc). Usage: From root directory, after running the jekyll build, execute 'python tools/append_index_html_to_internal_links.py'. Dependencies: beautifulsoup4 Installable via pip as 'sudo pip install beautifulsoup4' or apt via 'sudo apt-get install python-beautifulsoup4'. """ import fnmatch import os import re from bs4 import BeautifulSoup # Original link match. Matches any string which starts with '/' and doesn't # have a file extension. linkMatch = r'^\/(.*\.(?!([^\/]+)$))?[^.]*$' # Regex which matches strings of type /internal/link/#anchor. Breaks into two # groups for ease of inserting 'index.html'. anchorMatch1 = r'(.+\/)(#[^\/]+$)' # Regex which matches strings of type /internal/link#anchor. Breaks into two # groups for ease of inserting 'index.html'. anchorMatch2 = r'(.+\/[a-zA-Z0-9]+)(#[^\/]+$)' matches = [] # Recursively walk content directory and find all html files. for root, dirnames, filenames in os.walk('content'): for filename in fnmatch.filter(filenames, '*.html'): # Javadoc does not have the index.html problem, so omit it. if 'javadoc' not in root: matches.append(os.path.join(root, filename)) print 'Matches: ' + str(len(matches)) # Iterates over each matched file looking for link matches. for match in matches: print 'Fixing links in: ' + match mf = open(match) soup = BeautifulSoup(mf, "lxml") # Iterates over every <a> for a in soup.findAll('a'): try: hr = a['href'] if re.match(linkMatch, hr) is not None: if hr.endswith('/'): # /internal/link/ a['href'] = hr + 'index.html' elif re.match(anchorMatch1, hr) is not None: # /internal/link/#anchor mat = re.match(anchorMatch1, hr) a['href'] = mat.group(1) + 'index.html' + mat.group(2) elif re.match(anchorMatch2, hr) is not None: # /internal/link#anchor mat = re.match(anchorMatch2, hr) a['href'] = mat.group(1) + '/index.html' + mat.group(2) else: # /internal/link a['href'] = hr + '/index.html' mf.close() html = soup.prettify("utf-8") # Write back to the file. with open(match, "wb") as f: print 'Replacing ' + hr + ' with: ' + a['href'] f.write(html) except KeyError as e: # Some <a> tags don't have an href. continue
Add tool to fix links. Signed-off-by: Jason Kuster <68c46a606457643eab92053c1c05574abb26f861@google.com>"""Script to fix the links in the staged website. Finds all internal links which do not have index.html at the end and appends index.html in the appropriate place (preserving anchors, etc). Usage: From root directory, after running the jekyll build, execute 'python tools/append_index_html_to_internal_links.py'. Dependencies: beautifulsoup4 Installable via pip as 'sudo pip install beautifulsoup4' or apt via 'sudo apt-get install python-beautifulsoup4'. """ import fnmatch import os import re from bs4 import BeautifulSoup # Original link match. Matches any string which starts with '/' and doesn't # have a file extension. linkMatch = r'^\/(.*\.(?!([^\/]+)$))?[^.]*$' # Regex which matches strings of type /internal/link/#anchor. Breaks into two # groups for ease of inserting 'index.html'. anchorMatch1 = r'(.+\/)(#[^\/]+$)' # Regex which matches strings of type /internal/link#anchor. Breaks into two # groups for ease of inserting 'index.html'. anchorMatch2 = r'(.+\/[a-zA-Z0-9]+)(#[^\/]+$)' matches = [] # Recursively walk content directory and find all html files. for root, dirnames, filenames in os.walk('content'): for filename in fnmatch.filter(filenames, '*.html'): # Javadoc does not have the index.html problem, so omit it. if 'javadoc' not in root: matches.append(os.path.join(root, filename)) print 'Matches: ' + str(len(matches)) # Iterates over each matched file looking for link matches. for match in matches: print 'Fixing links in: ' + match mf = open(match) soup = BeautifulSoup(mf, "lxml") # Iterates over every <a> for a in soup.findAll('a'): try: hr = a['href'] if re.match(linkMatch, hr) is not None: if hr.endswith('/'): # /internal/link/ a['href'] = hr + 'index.html' elif re.match(anchorMatch1, hr) is not None: # /internal/link/#anchor mat = re.match(anchorMatch1, hr) a['href'] = mat.group(1) + 'index.html' + mat.group(2) elif re.match(anchorMatch2, hr) is not None: # /internal/link#anchor mat = re.match(anchorMatch2, hr) a['href'] = mat.group(1) + '/index.html' + mat.group(2) else: # /internal/link a['href'] = hr + '/index.html' mf.close() html = soup.prettify("utf-8") # Write back to the file. with open(match, "wb") as f: print 'Replacing ' + hr + ' with: ' + a['href'] f.write(html) except KeyError as e: # Some <a> tags don't have an href. continue
<commit_before><commit_msg>Add tool to fix links. Signed-off-by: Jason Kuster <68c46a606457643eab92053c1c05574abb26f861@google.com><commit_after>"""Script to fix the links in the staged website. Finds all internal links which do not have index.html at the end and appends index.html in the appropriate place (preserving anchors, etc). Usage: From root directory, after running the jekyll build, execute 'python tools/append_index_html_to_internal_links.py'. Dependencies: beautifulsoup4 Installable via pip as 'sudo pip install beautifulsoup4' or apt via 'sudo apt-get install python-beautifulsoup4'. """ import fnmatch import os import re from bs4 import BeautifulSoup # Original link match. Matches any string which starts with '/' and doesn't # have a file extension. linkMatch = r'^\/(.*\.(?!([^\/]+)$))?[^.]*$' # Regex which matches strings of type /internal/link/#anchor. Breaks into two # groups for ease of inserting 'index.html'. anchorMatch1 = r'(.+\/)(#[^\/]+$)' # Regex which matches strings of type /internal/link#anchor. Breaks into two # groups for ease of inserting 'index.html'. anchorMatch2 = r'(.+\/[a-zA-Z0-9]+)(#[^\/]+$)' matches = [] # Recursively walk content directory and find all html files. for root, dirnames, filenames in os.walk('content'): for filename in fnmatch.filter(filenames, '*.html'): # Javadoc does not have the index.html problem, so omit it. if 'javadoc' not in root: matches.append(os.path.join(root, filename)) print 'Matches: ' + str(len(matches)) # Iterates over each matched file looking for link matches. for match in matches: print 'Fixing links in: ' + match mf = open(match) soup = BeautifulSoup(mf, "lxml") # Iterates over every <a> for a in soup.findAll('a'): try: hr = a['href'] if re.match(linkMatch, hr) is not None: if hr.endswith('/'): # /internal/link/ a['href'] = hr + 'index.html' elif re.match(anchorMatch1, hr) is not None: # /internal/link/#anchor mat = re.match(anchorMatch1, hr) a['href'] = mat.group(1) + 'index.html' + mat.group(2) elif re.match(anchorMatch2, hr) is not None: # /internal/link#anchor mat = re.match(anchorMatch2, hr) a['href'] = mat.group(1) + '/index.html' + mat.group(2) else: # /internal/link a['href'] = hr + '/index.html' mf.close() html = soup.prettify("utf-8") # Write back to the file. with open(match, "wb") as f: print 'Replacing ' + hr + ' with: ' + a['href'] f.write(html) except KeyError as e: # Some <a> tags don't have an href. continue
8d8f89c82511b86fb87cef5db3bad633283283cc
modelview/migrations/0044_auto_20191007_1227.py
modelview/migrations/0044_auto_20191007_1227.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.25 on 2019-10-07 10:27 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('modelview', '0043_merge_20190425_1036'), ] operations = [ migrations.RemoveField( model_name='energyscenario', name='networks_electricity_gas_electricity', ), migrations.RemoveField( model_name='energyscenario', name='networks_electricity_gas_gas', ), migrations.AlterField( model_name='basicfactsheet', name='logo', field=models.ImageField(help_text='If a logo for the model exists load it up', null=True, upload_to='logos', verbose_name='Logo'), ), migrations.AlterField( model_name='basicfactsheet', name='methodical_focus_1', field=models.CharField(help_text='1-3 Keyords describing the main methodical focus of the model e.g."open source", "sector coupling"', max_length=50, verbose_name='Methodical Focus'), ), migrations.AlterField( model_name='basicfactsheet', name='source_of_funding', field=models.CharField(help_text='What is the main source of funding for the development of the model?', max_length=200, null=True, verbose_name='Source of funding'), ), ]
Add missing migrations in develop branch
Add missing migrations in develop branch
Python
agpl-3.0
openego/oeplatform,openego/oeplatform,openego/oeplatform,openego/oeplatform
Add missing migrations in develop branch
# -*- coding: utf-8 -*- # Generated by Django 1.11.25 on 2019-10-07 10:27 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('modelview', '0043_merge_20190425_1036'), ] operations = [ migrations.RemoveField( model_name='energyscenario', name='networks_electricity_gas_electricity', ), migrations.RemoveField( model_name='energyscenario', name='networks_electricity_gas_gas', ), migrations.AlterField( model_name='basicfactsheet', name='logo', field=models.ImageField(help_text='If a logo for the model exists load it up', null=True, upload_to='logos', verbose_name='Logo'), ), migrations.AlterField( model_name='basicfactsheet', name='methodical_focus_1', field=models.CharField(help_text='1-3 Keyords describing the main methodical focus of the model e.g."open source", "sector coupling"', max_length=50, verbose_name='Methodical Focus'), ), migrations.AlterField( model_name='basicfactsheet', name='source_of_funding', field=models.CharField(help_text='What is the main source of funding for the development of the model?', max_length=200, null=True, verbose_name='Source of funding'), ), ]
<commit_before><commit_msg>Add missing migrations in develop branch<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.11.25 on 2019-10-07 10:27 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('modelview', '0043_merge_20190425_1036'), ] operations = [ migrations.RemoveField( model_name='energyscenario', name='networks_electricity_gas_electricity', ), migrations.RemoveField( model_name='energyscenario', name='networks_electricity_gas_gas', ), migrations.AlterField( model_name='basicfactsheet', name='logo', field=models.ImageField(help_text='If a logo for the model exists load it up', null=True, upload_to='logos', verbose_name='Logo'), ), migrations.AlterField( model_name='basicfactsheet', name='methodical_focus_1', field=models.CharField(help_text='1-3 Keyords describing the main methodical focus of the model e.g."open source", "sector coupling"', max_length=50, verbose_name='Methodical Focus'), ), migrations.AlterField( model_name='basicfactsheet', name='source_of_funding', field=models.CharField(help_text='What is the main source of funding for the development of the model?', max_length=200, null=True, verbose_name='Source of funding'), ), ]
Add missing migrations in develop branch# -*- coding: utf-8 -*- # Generated by Django 1.11.25 on 2019-10-07 10:27 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('modelview', '0043_merge_20190425_1036'), ] operations = [ migrations.RemoveField( model_name='energyscenario', name='networks_electricity_gas_electricity', ), migrations.RemoveField( model_name='energyscenario', name='networks_electricity_gas_gas', ), migrations.AlterField( model_name='basicfactsheet', name='logo', field=models.ImageField(help_text='If a logo for the model exists load it up', null=True, upload_to='logos', verbose_name='Logo'), ), migrations.AlterField( model_name='basicfactsheet', name='methodical_focus_1', field=models.CharField(help_text='1-3 Keyords describing the main methodical focus of the model e.g."open source", "sector coupling"', max_length=50, verbose_name='Methodical Focus'), ), migrations.AlterField( model_name='basicfactsheet', name='source_of_funding', field=models.CharField(help_text='What is the main source of funding for the development of the model?', max_length=200, null=True, verbose_name='Source of funding'), ), ]
<commit_before><commit_msg>Add missing migrations in develop branch<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.11.25 on 2019-10-07 10:27 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('modelview', '0043_merge_20190425_1036'), ] operations = [ migrations.RemoveField( model_name='energyscenario', name='networks_electricity_gas_electricity', ), migrations.RemoveField( model_name='energyscenario', name='networks_electricity_gas_gas', ), migrations.AlterField( model_name='basicfactsheet', name='logo', field=models.ImageField(help_text='If a logo for the model exists load it up', null=True, upload_to='logos', verbose_name='Logo'), ), migrations.AlterField( model_name='basicfactsheet', name='methodical_focus_1', field=models.CharField(help_text='1-3 Keyords describing the main methodical focus of the model e.g."open source", "sector coupling"', max_length=50, verbose_name='Methodical Focus'), ), migrations.AlterField( model_name='basicfactsheet', name='source_of_funding', field=models.CharField(help_text='What is the main source of funding for the development of the model?', max_length=200, null=True, verbose_name='Source of funding'), ), ]
e5be29bc3c5a77493fe64bb3fc8b52611cc13469
zerver/tests/test_outgoing_webhook_interfaces.py
zerver/tests/test_outgoing_webhook_interfaces.py
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function from typing import Any import mock import json from requests.models import Response from zerver.lib.test_classes import ZulipTestCase from zerver.outgoing_webhooks.generic import GenericOutgoingWebhookService class Test_GenericOutgoingWebhookService(ZulipTestCase): def setUp(self): # type: () -> None self.event = { u'command': '@**test**', u'message': { 'content': 'test_content', } } self.handler = GenericOutgoingWebhookService(service_name='test-service', base_url='http://example.domain.com', token='abcdef', user_profile=None) def test_process_event(self): # type: () -> None rest_operation, request_data = self.handler.process_event(self.event) request_data = json.loads(request_data) self.assertEqual(request_data['data'], "@**test**") self.assertEqual(request_data['token'], "abcdef") self.assertEqual(rest_operation['base_url'], "http://example.domain.com") self.assertEqual(rest_operation['method'], "POST") self.assertEqual(request_data['message'], self.event['message']) def test_process_success(self): # type: () -> None response = mock.Mock(spec=Response) response.text = json.dumps({"response_not_required": True}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, None) response.text = json.dumps({"response_string": 'test_content'}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, 'test_content') response.text = json.dumps({}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, "") def test_process_failure(self): # type: () -> None response = mock.Mock(spec=Response) response.text = 'test_content' success_response = self.handler.process_failure(response, self.event) self.assertEqual(success_response, 'test_content')
Add tests for Generic Interface.
Add tests for Generic Interface.
Python
apache-2.0
rht/zulip,mahim97/zulip,rht/zulip,mahim97/zulip,brainwane/zulip,hackerkid/zulip,zulip/zulip,eeshangarg/zulip,Galexrt/zulip,vabs22/zulip,kou/zulip,rishig/zulip,amanharitsh123/zulip,punchagan/zulip,eeshangarg/zulip,andersk/zulip,shubhamdhama/zulip,showell/zulip,jackrzhang/zulip,eeshangarg/zulip,hackerkid/zulip,vabs22/zulip,kou/zulip,verma-varsha/zulip,amanharitsh123/zulip,dhcrzf/zulip,brainwane/zulip,hackerkid/zulip,shubhamdhama/zulip,kou/zulip,tommyip/zulip,vabs22/zulip,andersk/zulip,dhcrzf/zulip,vaidap/zulip,showell/zulip,eeshangarg/zulip,vabs22/zulip,synicalsyntax/zulip,shubhamdhama/zulip,timabbott/zulip,mahim97/zulip,rht/zulip,brainwane/zulip,shubhamdhama/zulip,brainwane/zulip,vabs22/zulip,rishig/zulip,synicalsyntax/zulip,punchagan/zulip,shubhamdhama/zulip,verma-varsha/zulip,verma-varsha/zulip,Galexrt/zulip,jackrzhang/zulip,tommyip/zulip,rishig/zulip,vaidap/zulip,brockwhittaker/zulip,tommyip/zulip,tommyip/zulip,brockwhittaker/zulip,brockwhittaker/zulip,zulip/zulip,jackrzhang/zulip,showell/zulip,timabbott/zulip,amanharitsh123/zulip,synicalsyntax/zulip,rishig/zulip,punchagan/zulip,Galexrt/zulip,hackerkid/zulip,brainwane/zulip,brockwhittaker/zulip,hackerkid/zulip,rht/zulip,vabs22/zulip,mahim97/zulip,rishig/zulip,rishig/zulip,kou/zulip,eeshangarg/zulip,vaidap/zulip,amanharitsh123/zulip,andersk/zulip,synicalsyntax/zulip,punchagan/zulip,hackerkid/zulip,andersk/zulip,verma-varsha/zulip,timabbott/zulip,vaidap/zulip,timabbott/zulip,verma-varsha/zulip,mahim97/zulip,punchagan/zulip,synicalsyntax/zulip,timabbott/zulip,brainwane/zulip,zulip/zulip,rht/zulip,amanharitsh123/zulip,vaidap/zulip,vaidap/zulip,punchagan/zulip,showell/zulip,zulip/zulip,jackrzhang/zulip,kou/zulip,showell/zulip,synicalsyntax/zulip,jackrzhang/zulip,punchagan/zulip,shubhamdhama/zulip,kou/zulip,amanharitsh123/zulip,Galexrt/zulip,shubhamdhama/zulip,dhcrzf/zulip,brockwhittaker/zulip,kou/zulip,dhcrzf/zulip,jackrzhang/zulip,timabbott/zulip,eeshangarg/zulip,andersk/zulip,zulip/zulip,hackerkid/zulip,zulip/zulip,rishig/zulip,timabbott/zulip,showell/zulip,zulip/zulip,tommyip/zulip,brockwhittaker/zulip,synicalsyntax/zulip,Galexrt/zulip,andersk/zulip,verma-varsha/zulip,brainwane/zulip,jackrzhang/zulip,Galexrt/zulip,rht/zulip,mahim97/zulip,Galexrt/zulip,andersk/zulip,dhcrzf/zulip,dhcrzf/zulip,dhcrzf/zulip,tommyip/zulip,eeshangarg/zulip,tommyip/zulip,rht/zulip,showell/zulip
Add tests for Generic Interface.
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function from typing import Any import mock import json from requests.models import Response from zerver.lib.test_classes import ZulipTestCase from zerver.outgoing_webhooks.generic import GenericOutgoingWebhookService class Test_GenericOutgoingWebhookService(ZulipTestCase): def setUp(self): # type: () -> None self.event = { u'command': '@**test**', u'message': { 'content': 'test_content', } } self.handler = GenericOutgoingWebhookService(service_name='test-service', base_url='http://example.domain.com', token='abcdef', user_profile=None) def test_process_event(self): # type: () -> None rest_operation, request_data = self.handler.process_event(self.event) request_data = json.loads(request_data) self.assertEqual(request_data['data'], "@**test**") self.assertEqual(request_data['token'], "abcdef") self.assertEqual(rest_operation['base_url'], "http://example.domain.com") self.assertEqual(rest_operation['method'], "POST") self.assertEqual(request_data['message'], self.event['message']) def test_process_success(self): # type: () -> None response = mock.Mock(spec=Response) response.text = json.dumps({"response_not_required": True}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, None) response.text = json.dumps({"response_string": 'test_content'}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, 'test_content') response.text = json.dumps({}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, "") def test_process_failure(self): # type: () -> None response = mock.Mock(spec=Response) response.text = 'test_content' success_response = self.handler.process_failure(response, self.event) self.assertEqual(success_response, 'test_content')
<commit_before><commit_msg>Add tests for Generic Interface.<commit_after>
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function from typing import Any import mock import json from requests.models import Response from zerver.lib.test_classes import ZulipTestCase from zerver.outgoing_webhooks.generic import GenericOutgoingWebhookService class Test_GenericOutgoingWebhookService(ZulipTestCase): def setUp(self): # type: () -> None self.event = { u'command': '@**test**', u'message': { 'content': 'test_content', } } self.handler = GenericOutgoingWebhookService(service_name='test-service', base_url='http://example.domain.com', token='abcdef', user_profile=None) def test_process_event(self): # type: () -> None rest_operation, request_data = self.handler.process_event(self.event) request_data = json.loads(request_data) self.assertEqual(request_data['data'], "@**test**") self.assertEqual(request_data['token'], "abcdef") self.assertEqual(rest_operation['base_url'], "http://example.domain.com") self.assertEqual(rest_operation['method'], "POST") self.assertEqual(request_data['message'], self.event['message']) def test_process_success(self): # type: () -> None response = mock.Mock(spec=Response) response.text = json.dumps({"response_not_required": True}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, None) response.text = json.dumps({"response_string": 'test_content'}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, 'test_content') response.text = json.dumps({}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, "") def test_process_failure(self): # type: () -> None response = mock.Mock(spec=Response) response.text = 'test_content' success_response = self.handler.process_failure(response, self.event) self.assertEqual(success_response, 'test_content')
Add tests for Generic Interface.# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function from typing import Any import mock import json from requests.models import Response from zerver.lib.test_classes import ZulipTestCase from zerver.outgoing_webhooks.generic import GenericOutgoingWebhookService class Test_GenericOutgoingWebhookService(ZulipTestCase): def setUp(self): # type: () -> None self.event = { u'command': '@**test**', u'message': { 'content': 'test_content', } } self.handler = GenericOutgoingWebhookService(service_name='test-service', base_url='http://example.domain.com', token='abcdef', user_profile=None) def test_process_event(self): # type: () -> None rest_operation, request_data = self.handler.process_event(self.event) request_data = json.loads(request_data) self.assertEqual(request_data['data'], "@**test**") self.assertEqual(request_data['token'], "abcdef") self.assertEqual(rest_operation['base_url'], "http://example.domain.com") self.assertEqual(rest_operation['method'], "POST") self.assertEqual(request_data['message'], self.event['message']) def test_process_success(self): # type: () -> None response = mock.Mock(spec=Response) response.text = json.dumps({"response_not_required": True}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, None) response.text = json.dumps({"response_string": 'test_content'}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, 'test_content') response.text = json.dumps({}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, "") def test_process_failure(self): # type: () -> None response = mock.Mock(spec=Response) response.text = 'test_content' success_response = self.handler.process_failure(response, self.event) self.assertEqual(success_response, 'test_content')
<commit_before><commit_msg>Add tests for Generic Interface.<commit_after># -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function from typing import Any import mock import json from requests.models import Response from zerver.lib.test_classes import ZulipTestCase from zerver.outgoing_webhooks.generic import GenericOutgoingWebhookService class Test_GenericOutgoingWebhookService(ZulipTestCase): def setUp(self): # type: () -> None self.event = { u'command': '@**test**', u'message': { 'content': 'test_content', } } self.handler = GenericOutgoingWebhookService(service_name='test-service', base_url='http://example.domain.com', token='abcdef', user_profile=None) def test_process_event(self): # type: () -> None rest_operation, request_data = self.handler.process_event(self.event) request_data = json.loads(request_data) self.assertEqual(request_data['data'], "@**test**") self.assertEqual(request_data['token'], "abcdef") self.assertEqual(rest_operation['base_url'], "http://example.domain.com") self.assertEqual(rest_operation['method'], "POST") self.assertEqual(request_data['message'], self.event['message']) def test_process_success(self): # type: () -> None response = mock.Mock(spec=Response) response.text = json.dumps({"response_not_required": True}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, None) response.text = json.dumps({"response_string": 'test_content'}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, 'test_content') response.text = json.dumps({}) success_response = self.handler.process_success(response, self.event) self.assertEqual(success_response, "") def test_process_failure(self): # type: () -> None response = mock.Mock(spec=Response) response.text = 'test_content' success_response = self.handler.process_failure(response, self.event) self.assertEqual(success_response, 'test_content')
03fce72b60eb8cad2368447cf23f72f8084f4a4b
py/distribute-candies.py
py/distribute-candies.py
class Solution(object): def distributeCandies(self, candies): """ :type candies: List[int] :rtype: int """ return min(len(candies) / 2, len(set(candies)))
Add py solution for 575. Distribute Candies
Add py solution for 575. Distribute Candies 575. Distribute Candies: https://leetcode.com/problems/distribute-candies/
Python
apache-2.0
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
Add py solution for 575. Distribute Candies 575. Distribute Candies: https://leetcode.com/problems/distribute-candies/
class Solution(object): def distributeCandies(self, candies): """ :type candies: List[int] :rtype: int """ return min(len(candies) / 2, len(set(candies)))
<commit_before><commit_msg>Add py solution for 575. Distribute Candies 575. Distribute Candies: https://leetcode.com/problems/distribute-candies/<commit_after>
class Solution(object): def distributeCandies(self, candies): """ :type candies: List[int] :rtype: int """ return min(len(candies) / 2, len(set(candies)))
Add py solution for 575. Distribute Candies 575. Distribute Candies: https://leetcode.com/problems/distribute-candies/class Solution(object): def distributeCandies(self, candies): """ :type candies: List[int] :rtype: int """ return min(len(candies) / 2, len(set(candies)))
<commit_before><commit_msg>Add py solution for 575. Distribute Candies 575. Distribute Candies: https://leetcode.com/problems/distribute-candies/<commit_after>class Solution(object): def distributeCandies(self, candies): """ :type candies: List[int] :rtype: int """ return min(len(candies) / 2, len(set(candies)))
d34dcf1179e6e5c2b864627266ae1788d10142aa
Week01/Problem02/cyu_02.py
Week01/Problem02/cyu_02.py
#!/usr/bin/env python3 """This script is written by Chuanping Yu, on Jul 24, 2017, for the Assignment#1 in IDEaS workshop""" #Problem 2 FIB = [] F = 1 S = 0 FIB.append(F) FIB.append(F) while F <= 4000000: F = FIB[-1] + FIB[-2] FIB.append(F) if F%2 == 0 and F <= 4000000: S = S + F print(S)
Add Chuanping Yu's solutions to Problem02
Add Chuanping Yu's solutions to Problem02
Python
bsd-3-clause
GT-IDEaS/SkillsWorkshop2017,GT-IDEaS/SkillsWorkshop2017,GT-IDEaS/SkillsWorkshop2017
Add Chuanping Yu's solutions to Problem02
#!/usr/bin/env python3 """This script is written by Chuanping Yu, on Jul 24, 2017, for the Assignment#1 in IDEaS workshop""" #Problem 2 FIB = [] F = 1 S = 0 FIB.append(F) FIB.append(F) while F <= 4000000: F = FIB[-1] + FIB[-2] FIB.append(F) if F%2 == 0 and F <= 4000000: S = S + F print(S)
<commit_before><commit_msg>Add Chuanping Yu's solutions to Problem02<commit_after>
#!/usr/bin/env python3 """This script is written by Chuanping Yu, on Jul 24, 2017, for the Assignment#1 in IDEaS workshop""" #Problem 2 FIB = [] F = 1 S = 0 FIB.append(F) FIB.append(F) while F <= 4000000: F = FIB[-1] + FIB[-2] FIB.append(F) if F%2 == 0 and F <= 4000000: S = S + F print(S)
Add Chuanping Yu's solutions to Problem02#!/usr/bin/env python3 """This script is written by Chuanping Yu, on Jul 24, 2017, for the Assignment#1 in IDEaS workshop""" #Problem 2 FIB = [] F = 1 S = 0 FIB.append(F) FIB.append(F) while F <= 4000000: F = FIB[-1] + FIB[-2] FIB.append(F) if F%2 == 0 and F <= 4000000: S = S + F print(S)
<commit_before><commit_msg>Add Chuanping Yu's solutions to Problem02<commit_after>#!/usr/bin/env python3 """This script is written by Chuanping Yu, on Jul 24, 2017, for the Assignment#1 in IDEaS workshop""" #Problem 2 FIB = [] F = 1 S = 0 FIB.append(F) FIB.append(F) while F <= 4000000: F = FIB[-1] + FIB[-2] FIB.append(F) if F%2 == 0 and F <= 4000000: S = S + F print(S)
f59749db263291f481c4bdc9f6ede2f6de6cb6d4
create_input_files.py
create_input_files.py
import csv import argparse import itertools from thermo_utils import csv_row_writer, read_csv_rows # Read input/output arguments parser = argparse.ArgumentParser() parser.add_argument('-o','--output',required=True) parser.add_argument('-d','--dof',required=True) # parser.add_argument('-v','--version',required=False) args = parser.parse_args() # Write all rows to equations CSV file csv_row_writer(args.output,outRows) print('Output file: %s' % args.output)
Create foundation for input file generation (csv for connectivity table, etc.)
Create foundation for input file generation (csv for connectivity table, etc.)
Python
mit
ndebuhr/openfea,ndebuhr/openfea
Create foundation for input file generation (csv for connectivity table, etc.)
import csv import argparse import itertools from thermo_utils import csv_row_writer, read_csv_rows # Read input/output arguments parser = argparse.ArgumentParser() parser.add_argument('-o','--output',required=True) parser.add_argument('-d','--dof',required=True) # parser.add_argument('-v','--version',required=False) args = parser.parse_args() # Write all rows to equations CSV file csv_row_writer(args.output,outRows) print('Output file: %s' % args.output)
<commit_before><commit_msg>Create foundation for input file generation (csv for connectivity table, etc.)<commit_after>
import csv import argparse import itertools from thermo_utils import csv_row_writer, read_csv_rows # Read input/output arguments parser = argparse.ArgumentParser() parser.add_argument('-o','--output',required=True) parser.add_argument('-d','--dof',required=True) # parser.add_argument('-v','--version',required=False) args = parser.parse_args() # Write all rows to equations CSV file csv_row_writer(args.output,outRows) print('Output file: %s' % args.output)
Create foundation for input file generation (csv for connectivity table, etc.)import csv import argparse import itertools from thermo_utils import csv_row_writer, read_csv_rows # Read input/output arguments parser = argparse.ArgumentParser() parser.add_argument('-o','--output',required=True) parser.add_argument('-d','--dof',required=True) # parser.add_argument('-v','--version',required=False) args = parser.parse_args() # Write all rows to equations CSV file csv_row_writer(args.output,outRows) print('Output file: %s' % args.output)
<commit_before><commit_msg>Create foundation for input file generation (csv for connectivity table, etc.)<commit_after>import csv import argparse import itertools from thermo_utils import csv_row_writer, read_csv_rows # Read input/output arguments parser = argparse.ArgumentParser() parser.add_argument('-o','--output',required=True) parser.add_argument('-d','--dof',required=True) # parser.add_argument('-v','--version',required=False) args = parser.parse_args() # Write all rows to equations CSV file csv_row_writer(args.output,outRows) print('Output file: %s' % args.output)
a5d5dde8c523aa28452d790e7f0291c1cf52aacb
tests/external/py2/testfixture_test.py
tests/external/py2/testfixture_test.py
#!/usr/bin/env python # ---------------------------------------------------------------------- # Copyright (C) 2013 Numenta Inc. All rights reserved. # # The information and source code contained herein is the # exclusive property of Numenta Inc. No part of this software # may be used, reproduced, stored or distributed in any form, # without explicit written authorization from Numenta Inc. # ---------------------------------------------------------------------- """ Unit tests for our dependencies in the pytest package; at the time of this writing, we were using an unreleased version of pytest that added support for the unittest setUpModule fixture and friends. Some of our tests rely on setUpModule. Once, there was a conflict with pytest installation in our build system, and an older version of pytest was installed that didn't support setUpModule, which resulted in suble side-effects in some of these tests. """ import unittest2 as unittest g_setUpModuleCalled = False def setUpModule(): global g_setUpModuleCalled g_setUpModuleCalled = True class TestPytest(unittest.TestCase): def testSetUpModuleCalled(self): self.assertTrue(g_setUpModuleCalled) if __name__ == '__main__': unittest.main()
Make sure setUpModule is called by the test framework. We brought in pytest-2.4.0.dev8 for that specific functionality. However, one time we regressed, and our tests started misbehaving. So, this test is here to keep us honest.
Make sure setUpModule is called by the test framework. We brought in pytest-2.4.0.dev8 for that specific functionality. However, one time we regressed, and our tests started misbehaving. So, this test is here to keep us honest.
Python
agpl-3.0
breznak/nupic,breznak/nupic,breznak/nupic
Make sure setUpModule is called by the test framework. We brought in pytest-2.4.0.dev8 for that specific functionality. However, one time we regressed, and our tests started misbehaving. So, this test is here to keep us honest.
#!/usr/bin/env python # ---------------------------------------------------------------------- # Copyright (C) 2013 Numenta Inc. All rights reserved. # # The information and source code contained herein is the # exclusive property of Numenta Inc. No part of this software # may be used, reproduced, stored or distributed in any form, # without explicit written authorization from Numenta Inc. # ---------------------------------------------------------------------- """ Unit tests for our dependencies in the pytest package; at the time of this writing, we were using an unreleased version of pytest that added support for the unittest setUpModule fixture and friends. Some of our tests rely on setUpModule. Once, there was a conflict with pytest installation in our build system, and an older version of pytest was installed that didn't support setUpModule, which resulted in suble side-effects in some of these tests. """ import unittest2 as unittest g_setUpModuleCalled = False def setUpModule(): global g_setUpModuleCalled g_setUpModuleCalled = True class TestPytest(unittest.TestCase): def testSetUpModuleCalled(self): self.assertTrue(g_setUpModuleCalled) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Make sure setUpModule is called by the test framework. We brought in pytest-2.4.0.dev8 for that specific functionality. However, one time we regressed, and our tests started misbehaving. So, this test is here to keep us honest.<commit_after>
#!/usr/bin/env python # ---------------------------------------------------------------------- # Copyright (C) 2013 Numenta Inc. All rights reserved. # # The information and source code contained herein is the # exclusive property of Numenta Inc. No part of this software # may be used, reproduced, stored or distributed in any form, # without explicit written authorization from Numenta Inc. # ---------------------------------------------------------------------- """ Unit tests for our dependencies in the pytest package; at the time of this writing, we were using an unreleased version of pytest that added support for the unittest setUpModule fixture and friends. Some of our tests rely on setUpModule. Once, there was a conflict with pytest installation in our build system, and an older version of pytest was installed that didn't support setUpModule, which resulted in suble side-effects in some of these tests. """ import unittest2 as unittest g_setUpModuleCalled = False def setUpModule(): global g_setUpModuleCalled g_setUpModuleCalled = True class TestPytest(unittest.TestCase): def testSetUpModuleCalled(self): self.assertTrue(g_setUpModuleCalled) if __name__ == '__main__': unittest.main()
Make sure setUpModule is called by the test framework. We brought in pytest-2.4.0.dev8 for that specific functionality. However, one time we regressed, and our tests started misbehaving. So, this test is here to keep us honest.#!/usr/bin/env python # ---------------------------------------------------------------------- # Copyright (C) 2013 Numenta Inc. All rights reserved. # # The information and source code contained herein is the # exclusive property of Numenta Inc. No part of this software # may be used, reproduced, stored or distributed in any form, # without explicit written authorization from Numenta Inc. # ---------------------------------------------------------------------- """ Unit tests for our dependencies in the pytest package; at the time of this writing, we were using an unreleased version of pytest that added support for the unittest setUpModule fixture and friends. Some of our tests rely on setUpModule. Once, there was a conflict with pytest installation in our build system, and an older version of pytest was installed that didn't support setUpModule, which resulted in suble side-effects in some of these tests. """ import unittest2 as unittest g_setUpModuleCalled = False def setUpModule(): global g_setUpModuleCalled g_setUpModuleCalled = True class TestPytest(unittest.TestCase): def testSetUpModuleCalled(self): self.assertTrue(g_setUpModuleCalled) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Make sure setUpModule is called by the test framework. We brought in pytest-2.4.0.dev8 for that specific functionality. However, one time we regressed, and our tests started misbehaving. So, this test is here to keep us honest.<commit_after>#!/usr/bin/env python # ---------------------------------------------------------------------- # Copyright (C) 2013 Numenta Inc. All rights reserved. # # The information and source code contained herein is the # exclusive property of Numenta Inc. No part of this software # may be used, reproduced, stored or distributed in any form, # without explicit written authorization from Numenta Inc. # ---------------------------------------------------------------------- """ Unit tests for our dependencies in the pytest package; at the time of this writing, we were using an unreleased version of pytest that added support for the unittest setUpModule fixture and friends. Some of our tests rely on setUpModule. Once, there was a conflict with pytest installation in our build system, and an older version of pytest was installed that didn't support setUpModule, which resulted in suble side-effects in some of these tests. """ import unittest2 as unittest g_setUpModuleCalled = False def setUpModule(): global g_setUpModuleCalled g_setUpModuleCalled = True class TestPytest(unittest.TestCase): def testSetUpModuleCalled(self): self.assertTrue(g_setUpModuleCalled) if __name__ == '__main__': unittest.main()
f18fd5c4ad61adb56ac7524a006ce9977aa06a31
mailing/management/commands/send_queued_mails_worker.py
mailing/management/commands/send_queued_mails_worker.py
# -*- coding: utf-8 -*- # Copyright (c) 2016 Aladom SAS & Hosting Dvpt SAS from django.core.management.base import BaseCommand from ...utils import send_queued_mails import time class Command(BaseCommand): help = """Send mails with `status` Mail.STATUS_PENDING and having `scheduled_on` set on a past date. In daemon mode.""" def handle(self, *args, **options): while True: send_queued_mails() time.sleep(15)
Add worker to send queue mails
Add worker to send queue mails
Python
mit
Aladom/django-mailing,Aladom/django-mailing
Add worker to send queue mails
# -*- coding: utf-8 -*- # Copyright (c) 2016 Aladom SAS & Hosting Dvpt SAS from django.core.management.base import BaseCommand from ...utils import send_queued_mails import time class Command(BaseCommand): help = """Send mails with `status` Mail.STATUS_PENDING and having `scheduled_on` set on a past date. In daemon mode.""" def handle(self, *args, **options): while True: send_queued_mails() time.sleep(15)
<commit_before><commit_msg>Add worker to send queue mails<commit_after>
# -*- coding: utf-8 -*- # Copyright (c) 2016 Aladom SAS & Hosting Dvpt SAS from django.core.management.base import BaseCommand from ...utils import send_queued_mails import time class Command(BaseCommand): help = """Send mails with `status` Mail.STATUS_PENDING and having `scheduled_on` set on a past date. In daemon mode.""" def handle(self, *args, **options): while True: send_queued_mails() time.sleep(15)
Add worker to send queue mails# -*- coding: utf-8 -*- # Copyright (c) 2016 Aladom SAS & Hosting Dvpt SAS from django.core.management.base import BaseCommand from ...utils import send_queued_mails import time class Command(BaseCommand): help = """Send mails with `status` Mail.STATUS_PENDING and having `scheduled_on` set on a past date. In daemon mode.""" def handle(self, *args, **options): while True: send_queued_mails() time.sleep(15)
<commit_before><commit_msg>Add worker to send queue mails<commit_after># -*- coding: utf-8 -*- # Copyright (c) 2016 Aladom SAS & Hosting Dvpt SAS from django.core.management.base import BaseCommand from ...utils import send_queued_mails import time class Command(BaseCommand): help = """Send mails with `status` Mail.STATUS_PENDING and having `scheduled_on` set on a past date. In daemon mode.""" def handle(self, *args, **options): while True: send_queued_mails() time.sleep(15)
5a7081c5c46a050566477adda19d30844192ceb2
src/mmw/apps/user/migrations/0002_auth_tokens.py
src/mmw/apps/user/migrations/0002_auth_tokens.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings from django.contrib.auth.models import User from rest_framework.authtoken.models import Token def add_auth_tokens_to_users(apps, schema_editor): for user in User.objects.all(): Token.objects.create(user=user) class Migration(migrations.Migration): dependencies = [ ('authtoken', '0001_initial'), ('user', '0001_initial') ] operations = [ migrations.RunPython(add_auth_tokens_to_users) ]
Add migration to add authtokens for existing users
Add migration to add authtokens for existing users * NB: Depending on how many users there are, this migration could be pretty RAM intensive...
Python
apache-2.0
WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed
Add migration to add authtokens for existing users * NB: Depending on how many users there are, this migration could be pretty RAM intensive...
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings from django.contrib.auth.models import User from rest_framework.authtoken.models import Token def add_auth_tokens_to_users(apps, schema_editor): for user in User.objects.all(): Token.objects.create(user=user) class Migration(migrations.Migration): dependencies = [ ('authtoken', '0001_initial'), ('user', '0001_initial') ] operations = [ migrations.RunPython(add_auth_tokens_to_users) ]
<commit_before><commit_msg>Add migration to add authtokens for existing users * NB: Depending on how many users there are, this migration could be pretty RAM intensive...<commit_after>
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings from django.contrib.auth.models import User from rest_framework.authtoken.models import Token def add_auth_tokens_to_users(apps, schema_editor): for user in User.objects.all(): Token.objects.create(user=user) class Migration(migrations.Migration): dependencies = [ ('authtoken', '0001_initial'), ('user', '0001_initial') ] operations = [ migrations.RunPython(add_auth_tokens_to_users) ]
Add migration to add authtokens for existing users * NB: Depending on how many users there are, this migration could be pretty RAM intensive...# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings from django.contrib.auth.models import User from rest_framework.authtoken.models import Token def add_auth_tokens_to_users(apps, schema_editor): for user in User.objects.all(): Token.objects.create(user=user) class Migration(migrations.Migration): dependencies = [ ('authtoken', '0001_initial'), ('user', '0001_initial') ] operations = [ migrations.RunPython(add_auth_tokens_to_users) ]
<commit_before><commit_msg>Add migration to add authtokens for existing users * NB: Depending on how many users there are, this migration could be pretty RAM intensive...<commit_after># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings from django.contrib.auth.models import User from rest_framework.authtoken.models import Token def add_auth_tokens_to_users(apps, schema_editor): for user in User.objects.all(): Token.objects.create(user=user) class Migration(migrations.Migration): dependencies = [ ('authtoken', '0001_initial'), ('user', '0001_initial') ] operations = [ migrations.RunPython(add_auth_tokens_to_users) ]
ab53993b708b3f9cf3b5762664fef58bae99ea20
recursive_remove_ltac.py
recursive_remove_ltac.py
import re __all__ = ["recursively_remove_ltac"] LTAC_REG = re.compile(r'^\s*(?:Local\s+|Global\s+)?Ltac\s+([^\s]+)', re.MULTILINE) def recursively_remove_ltac(statements, exclude_n=3): """Removes any Ltac statement which is not used later in statements. Does not remove any code in the last exclude_n statements.""" rtn = list(reversed(statements))[:exclude_n] for statement in reversed(statements)[exclude_n:]: match = LTAC_REG.search(statement) if match: ltac_name = match.groups()[0] # search for the name of the tactic, by itself reg = re.compile('\b%s\b' % ltac_name, re.MULTILINE) if any(reg.search(other_statement) for other_statement in rtn): rtn.append(statement) else: rtn.append(statement) return list(reversed(rtn))
Add some code to auto-remove Ltac
Add some code to auto-remove Ltac
Python
mit
JasonGross/coq-tools,JasonGross/coq-tools
Add some code to auto-remove Ltac
import re __all__ = ["recursively_remove_ltac"] LTAC_REG = re.compile(r'^\s*(?:Local\s+|Global\s+)?Ltac\s+([^\s]+)', re.MULTILINE) def recursively_remove_ltac(statements, exclude_n=3): """Removes any Ltac statement which is not used later in statements. Does not remove any code in the last exclude_n statements.""" rtn = list(reversed(statements))[:exclude_n] for statement in reversed(statements)[exclude_n:]: match = LTAC_REG.search(statement) if match: ltac_name = match.groups()[0] # search for the name of the tactic, by itself reg = re.compile('\b%s\b' % ltac_name, re.MULTILINE) if any(reg.search(other_statement) for other_statement in rtn): rtn.append(statement) else: rtn.append(statement) return list(reversed(rtn))
<commit_before><commit_msg>Add some code to auto-remove Ltac<commit_after>
import re __all__ = ["recursively_remove_ltac"] LTAC_REG = re.compile(r'^\s*(?:Local\s+|Global\s+)?Ltac\s+([^\s]+)', re.MULTILINE) def recursively_remove_ltac(statements, exclude_n=3): """Removes any Ltac statement which is not used later in statements. Does not remove any code in the last exclude_n statements.""" rtn = list(reversed(statements))[:exclude_n] for statement in reversed(statements)[exclude_n:]: match = LTAC_REG.search(statement) if match: ltac_name = match.groups()[0] # search for the name of the tactic, by itself reg = re.compile('\b%s\b' % ltac_name, re.MULTILINE) if any(reg.search(other_statement) for other_statement in rtn): rtn.append(statement) else: rtn.append(statement) return list(reversed(rtn))
Add some code to auto-remove Ltacimport re __all__ = ["recursively_remove_ltac"] LTAC_REG = re.compile(r'^\s*(?:Local\s+|Global\s+)?Ltac\s+([^\s]+)', re.MULTILINE) def recursively_remove_ltac(statements, exclude_n=3): """Removes any Ltac statement which is not used later in statements. Does not remove any code in the last exclude_n statements.""" rtn = list(reversed(statements))[:exclude_n] for statement in reversed(statements)[exclude_n:]: match = LTAC_REG.search(statement) if match: ltac_name = match.groups()[0] # search for the name of the tactic, by itself reg = re.compile('\b%s\b' % ltac_name, re.MULTILINE) if any(reg.search(other_statement) for other_statement in rtn): rtn.append(statement) else: rtn.append(statement) return list(reversed(rtn))
<commit_before><commit_msg>Add some code to auto-remove Ltac<commit_after>import re __all__ = ["recursively_remove_ltac"] LTAC_REG = re.compile(r'^\s*(?:Local\s+|Global\s+)?Ltac\s+([^\s]+)', re.MULTILINE) def recursively_remove_ltac(statements, exclude_n=3): """Removes any Ltac statement which is not used later in statements. Does not remove any code in the last exclude_n statements.""" rtn = list(reversed(statements))[:exclude_n] for statement in reversed(statements)[exclude_n:]: match = LTAC_REG.search(statement) if match: ltac_name = match.groups()[0] # search for the name of the tactic, by itself reg = re.compile('\b%s\b' % ltac_name, re.MULTILINE) if any(reg.search(other_statement) for other_statement in rtn): rtn.append(statement) else: rtn.append(statement) return list(reversed(rtn))
8d6ca433d33551cc1fe5c08edcf68ec65e5447b0
exercises/chapter_03/exercise_03_03/exercies_03_03.py
exercises/chapter_03/exercise_03_03/exercies_03_03.py
# 3-3 Your Own List transportation = ["mountainbike", "teleportation", "Citroën DS3"] print("A " + transportation[0] + " is good when exercising in the woods.\n") print("The ultimate form of trarsportation must be " + transportation[1] + ".\n") print("Should I buy a " + transportation[2] + "?\n")
Add solution to exercise 3.3.
Add solution to exercise 3.3.
Python
mit
HenrikSamuelsson/python-crash-course
Add solution to exercise 3.3.
# 3-3 Your Own List transportation = ["mountainbike", "teleportation", "Citroën DS3"] print("A " + transportation[0] + " is good when exercising in the woods.\n") print("The ultimate form of trarsportation must be " + transportation[1] + ".\n") print("Should I buy a " + transportation[2] + "?\n")
<commit_before><commit_msg>Add solution to exercise 3.3.<commit_after>
# 3-3 Your Own List transportation = ["mountainbike", "teleportation", "Citroën DS3"] print("A " + transportation[0] + " is good when exercising in the woods.\n") print("The ultimate form of trarsportation must be " + transportation[1] + ".\n") print("Should I buy a " + transportation[2] + "?\n")
Add solution to exercise 3.3.# 3-3 Your Own List transportation = ["mountainbike", "teleportation", "Citroën DS3"] print("A " + transportation[0] + " is good when exercising in the woods.\n") print("The ultimate form of trarsportation must be " + transportation[1] + ".\n") print("Should I buy a " + transportation[2] + "?\n")
<commit_before><commit_msg>Add solution to exercise 3.3.<commit_after># 3-3 Your Own List transportation = ["mountainbike", "teleportation", "Citroën DS3"] print("A " + transportation[0] + " is good when exercising in the woods.\n") print("The ultimate form of trarsportation must be " + transportation[1] + ".\n") print("Should I buy a " + transportation[2] + "?\n")
c4b7bd5b74aaba210a05f946d59c98894b60b21f
tests/cli/test_pixel.py
tests/cli/test_pixel.py
""" Test ``yatsm line`` """ import os from click.testing import CliRunner import pytest from yatsm.cli.main import cli @pytest.mark.skipif("DISPLAY" not in os.environ, reason="requires display") def test_cli_pixel_pass_1(example_timeseries): """ Correctly run for one pixel """ runner = CliRunner() result = runner.invoke( cli, ['-v', 'pixel', '--band', '5', '--plot', 'TS', '--style', 'ggplot', example_timeseries['config'], '1', '1' ]) assert result.exit_code == 0
Add test for pixel CLI
Add test for pixel CLI
Python
mit
ceholden/yatsm,c11/yatsm,ceholden/yatsm,valpasq/yatsm,c11/yatsm,valpasq/yatsm
Add test for pixel CLI
""" Test ``yatsm line`` """ import os from click.testing import CliRunner import pytest from yatsm.cli.main import cli @pytest.mark.skipif("DISPLAY" not in os.environ, reason="requires display") def test_cli_pixel_pass_1(example_timeseries): """ Correctly run for one pixel """ runner = CliRunner() result = runner.invoke( cli, ['-v', 'pixel', '--band', '5', '--plot', 'TS', '--style', 'ggplot', example_timeseries['config'], '1', '1' ]) assert result.exit_code == 0
<commit_before><commit_msg>Add test for pixel CLI<commit_after>
""" Test ``yatsm line`` """ import os from click.testing import CliRunner import pytest from yatsm.cli.main import cli @pytest.mark.skipif("DISPLAY" not in os.environ, reason="requires display") def test_cli_pixel_pass_1(example_timeseries): """ Correctly run for one pixel """ runner = CliRunner() result = runner.invoke( cli, ['-v', 'pixel', '--band', '5', '--plot', 'TS', '--style', 'ggplot', example_timeseries['config'], '1', '1' ]) assert result.exit_code == 0
Add test for pixel CLI""" Test ``yatsm line`` """ import os from click.testing import CliRunner import pytest from yatsm.cli.main import cli @pytest.mark.skipif("DISPLAY" not in os.environ, reason="requires display") def test_cli_pixel_pass_1(example_timeseries): """ Correctly run for one pixel """ runner = CliRunner() result = runner.invoke( cli, ['-v', 'pixel', '--band', '5', '--plot', 'TS', '--style', 'ggplot', example_timeseries['config'], '1', '1' ]) assert result.exit_code == 0
<commit_before><commit_msg>Add test for pixel CLI<commit_after>""" Test ``yatsm line`` """ import os from click.testing import CliRunner import pytest from yatsm.cli.main import cli @pytest.mark.skipif("DISPLAY" not in os.environ, reason="requires display") def test_cli_pixel_pass_1(example_timeseries): """ Correctly run for one pixel """ runner = CliRunner() result = runner.invoke( cli, ['-v', 'pixel', '--band', '5', '--plot', 'TS', '--style', 'ggplot', example_timeseries['config'], '1', '1' ]) assert result.exit_code == 0
555cfbb827532c54598cecde01ef4e6e5e07714d
test/worker_external_task_test.py
test/worker_external_task_test.py
# Copyright (c) 2015 # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import luigi import unittest from mock import Mock, patch from helpers import with_config mock_external_task = Mock(spec=luigi.ExternalTask) mock_external_task.complete.side_effect = [False, False, True] class TestTask(luigi.Task): """ Requires a single file dependency """ def __init__(self): super(TestTask, self).__init__() self.has_run = False def requires(self): return mock_external_task def output(self): mock_target = Mock(spec=luigi.Target) # the return is False so that this task will be scheduled mock_target.exists.return_value = False def run(self): self.has_run = True class WorkerExternalTaskTest(unittest.TestCase): @with_config({'core': {'retry-external-tasks': 'true'}}) def test_external_dependency_satisified_later(self): """ Test that an external dependency that is not `complete` when luigi is invoked, but \ becomes `complete` while the workflow is executing is re-evaluated. """ assert luigi.configuration.get_config().getboolean('core', 'retry-external-tasks', False) == True test_task = TestTask() luigi.build([test_task], local_scheduler=True) assert test_task.has_run == True assert mock_external_task.complete.call_count == 3 if __name__ == '__main__': unittest.main()
Create a test for re-evaluating external tasks while a workflow is running.
Create a test for re-evaluating external tasks while a workflow is running.
Python
apache-2.0
dlstadther/luigi,ViaSat/luigi,adaitche/luigi,humanlongevity/luigi,PeteW/luigi,Houzz/luigi,ZhenxingWu/luigi,springcoil/luigi,ContextLogic/luigi,ivannotes/luigi,percyfal/luigi,belevtsoff/luigi,Yoone/luigi,meyerson/luigi,tuulos/luigi,dkroy/luigi,Houzz/luigi,Dawny33/luigi,theoryno3/luigi,JackDanger/luigi,Magnetic/luigi,bowlofstew/luigi,Tarrasch/luigi,h3biomed/luigi,stephenpascoe/luigi,gpoulin/luigi,dylanjbarth/luigi,slvnperron/luigi,SeedScientific/luigi,edx/luigi,graingert/luigi,jamesmcm/luigi,JackDanger/luigi,casey-green/luigi,laserson/luigi,dlstadther/luigi,mfcabrera/luigi,ZhenxingWu/luigi,wakamori/luigi,bowlofstew/luigi,glenndmello/luigi,samuell/luigi,casey-green/luigi,huiyi1990/luigi,h3biomed/luigi,sahitya-pavurala/luigi,torypages/luigi,samuell/luigi,pkexcellent/luigi,realgo/luigi,rizzatti/luigi,percyfal/luigi,17zuoye/luigi,dylanjbarth/luigi,h3biomed/luigi,ehdr/luigi,rizzatti/luigi,walkers-mv/luigi,pkexcellent/luigi,leafjungle/luigi,torypages/luigi,drincruz/luigi,penelopy/luigi,moandcompany/luigi,harveyxia/luigi,dhruvg/luigi,theoryno3/luigi,republic-analytics/luigi,fw1121/luigi,penelopy/luigi,hellais/luigi,foursquare/luigi,hadesbox/luigi,joeshaw/luigi,kalaidin/luigi,pkexcellent/luigi,javrasya/luigi,linsomniac/luigi,DomainGroupOSS/luigi,samuell/luigi,jw0201/luigi,fabriziodemaria/luigi,pkexcellent/luigi,edx/luigi,jamesmcm/luigi,dlstadther/luigi,sahitya-pavurala/luigi,ContextLogic/luigi,JackDanger/luigi,aeron15/luigi,JackDanger/luigi,altaf-ali/luigi,mbruggmann/luigi,fabriziodemaria/luigi,mbruggmann/luigi,adaitche/luigi,ehdr/luigi,Wattpad/luigi,hellais/luigi,wakamori/luigi,kalaidin/luigi,dstandish/luigi,realgo/luigi,Magnetic/luigi,stroykova/luigi,joeshaw/luigi,republic-analytics/luigi,vine/luigi,soxofaan/luigi,walkers-mv/luigi,kalaidin/luigi,tuulos/luigi,stroykova/luigi,dhruvg/luigi,lichia/luigi,gpoulin/luigi,neilisaac/luigi,laserson/luigi,Yoone/luigi,17zuoye/luigi,mfcabrera/luigi,drincruz/luigi,anyman/luigi,realgo/luigi,ivannotes/luigi,ivannotes/luigi,hellais/luigi,alkemics/luigi,linsomniac/luigi,LamCiuLoeng/luigi,ChrisBeaumont/luigi,oldpa/luigi,ThQ/luigi,alkemics/luigi,javrasya/luigi,fabriziodemaria/luigi,thejens/luigi,linearregression/luigi,dstandish/luigi,bmaggard/luigi,stroykova/luigi,stroykova/luigi,harveyxia/luigi,ThQ/luigi,rayrrr/luigi,moritzschaefer/luigi,SeedScientific/luigi,stephenpascoe/luigi,dkroy/luigi,graingert/luigi,hadesbox/luigi,qpxu007/luigi,aeron15/luigi,Dawny33/luigi,SkyTruth/luigi,foursquare/luigi,dkroy/luigi,stephenpascoe/luigi,moandcompany/luigi,vine/luigi,rizzatti/luigi,walkers-mv/luigi,Magnetic/luigi,ThQ/luigi,spotify/luigi,samepage-labs/luigi,LamCiuLoeng/luigi,dhruvg/luigi,PeteW/luigi,oldpa/luigi,soxofaan/luigi,bowlofstew/luigi,ChrisBeaumont/luigi,jamesmcm/luigi,samuell/luigi,altaf-ali/luigi,wakamori/luigi,sahitya-pavurala/luigi,qpxu007/luigi,h3biomed/luigi,lungetech/luigi,DomainGroupOSS/luigi,fw1121/luigi,lungetech/luigi,moritzschaefer/luigi,17zuoye/luigi,linsomniac/luigi,belevtsoff/luigi,dylanjbarth/luigi,dstandish/luigi,Magnetic/luigi,qpxu007/luigi,drincruz/luigi,joeshaw/luigi,PeteW/luigi,lichia/luigi,ViaSat/luigi,SkyTruth/luigi,casey-green/luigi,samepage-labs/luigi,casey-green/luigi,adaitche/luigi,hadesbox/luigi,moandcompany/luigi,Dawny33/luigi,Yoone/luigi,mbruggmann/luigi,linsomniac/luigi,ViaSat/luigi,lungetech/luigi,riga/luigi,springcoil/luigi,upworthy/luigi,penelopy/luigi,hadesbox/luigi,vine/luigi,PeteW/luigi,kevhill/luigi,riga/luigi,samepage-labs/luigi,moritzschaefer/luigi,dylanjbarth/luigi,republic-analytics/luigi,anyman/luigi,edx/luigi,Tarrasch/luigi,glenndmello/luigi,ehdr/luigi,sahitya-pavurala/luigi,aeron15/luigi,linearregression/luigi,SeedScientific/luigi,dkroy/luigi,stephenpascoe/luigi,fw1121/luigi,fw1121/luigi,ivannotes/luigi,graingert/luigi,mbruggmann/luigi,SkyTruth/luigi,alkemics/luigi,soxofaan/luigi,linearregression/luigi,neilisaac/luigi,gpoulin/luigi,ChrisBeaumont/luigi,walkers-mv/luigi,aeron15/luigi,alkemics/luigi,springcoil/luigi,theoryno3/luigi,oldpa/luigi,ThQ/luigi,riga/luigi,harveyxia/luigi,realgo/luigi,tuulos/luigi,meyerson/luigi,dstandish/luigi,wakamori/luigi,qpxu007/luigi,Tarrasch/luigi,slvnperron/luigi,bmaggard/luigi,bmaggard/luigi,springcoil/luigi,harveyxia/luigi,dhruvg/luigi,soxofaan/luigi,ZhenxingWu/luigi,spotify/luigi,kevhill/luigi,vine/luigi,rayrrr/luigi,foursquare/luigi,slvnperron/luigi,LamCiuLoeng/luigi,samepage-labs/luigi,altaf-ali/luigi,torypages/luigi,Yoone/luigi,linearregression/luigi,rayrrr/luigi,foursquare/luigi,ContextLogic/luigi,Houzz/luigi,upworthy/luigi,lungetech/luigi,bowlofstew/luigi,drincruz/luigi,humanlongevity/luigi,SkyTruth/luigi,rizzatti/luigi,altaf-ali/luigi,glenndmello/luigi,lichia/luigi,Wattpad/luigi,SeedScientific/luigi,jw0201/luigi,penelopy/luigi,javrasya/luigi,upworthy/luigi,laserson/luigi,DomainGroupOSS/luigi,DomainGroupOSS/luigi,mfcabrera/luigi,anyman/luigi,thejens/luigi,belevtsoff/luigi,huiyi1990/luigi,humanlongevity/luigi,neilisaac/luigi,Dawny33/luigi,riga/luigi,jw0201/luigi,anyman/luigi,jamesmcm/luigi,Houzz/luigi,fabriziodemaria/luigi,torypages/luigi,percyfal/luigi,oldpa/luigi,humanlongevity/luigi,republic-analytics/luigi,ContextLogic/luigi,kevhill/luigi,neilisaac/luigi,thejens/luigi,tuulos/luigi,joeshaw/luigi,meyerson/luigi,ViaSat/luigi,glenndmello/luigi,ehdr/luigi,upworthy/luigi,graingert/luigi,ZhenxingWu/luigi,kevhill/luigi,huiyi1990/luigi,17zuoye/luigi,edx/luigi,bmaggard/luigi,moandcompany/luigi,leafjungle/luigi,javrasya/luigi,gpoulin/luigi,laserson/luigi,Wattpad/luigi,thejens/luigi,huiyi1990/luigi,Tarrasch/luigi,slvnperron/luigi,moritzschaefer/luigi,theoryno3/luigi,meyerson/luigi,percyfal/luigi,jw0201/luigi,lichia/luigi,belevtsoff/luigi,hellais/luigi,LamCiuLoeng/luigi,spotify/luigi,dlstadther/luigi,adaitche/luigi,kalaidin/luigi,leafjungle/luigi,mfcabrera/luigi,rayrrr/luigi,ChrisBeaumont/luigi,spotify/luigi,leafjungle/luigi
Create a test for re-evaluating external tasks while a workflow is running.
# Copyright (c) 2015 # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import luigi import unittest from mock import Mock, patch from helpers import with_config mock_external_task = Mock(spec=luigi.ExternalTask) mock_external_task.complete.side_effect = [False, False, True] class TestTask(luigi.Task): """ Requires a single file dependency """ def __init__(self): super(TestTask, self).__init__() self.has_run = False def requires(self): return mock_external_task def output(self): mock_target = Mock(spec=luigi.Target) # the return is False so that this task will be scheduled mock_target.exists.return_value = False def run(self): self.has_run = True class WorkerExternalTaskTest(unittest.TestCase): @with_config({'core': {'retry-external-tasks': 'true'}}) def test_external_dependency_satisified_later(self): """ Test that an external dependency that is not `complete` when luigi is invoked, but \ becomes `complete` while the workflow is executing is re-evaluated. """ assert luigi.configuration.get_config().getboolean('core', 'retry-external-tasks', False) == True test_task = TestTask() luigi.build([test_task], local_scheduler=True) assert test_task.has_run == True assert mock_external_task.complete.call_count == 3 if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Create a test for re-evaluating external tasks while a workflow is running.<commit_after>
# Copyright (c) 2015 # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import luigi import unittest from mock import Mock, patch from helpers import with_config mock_external_task = Mock(spec=luigi.ExternalTask) mock_external_task.complete.side_effect = [False, False, True] class TestTask(luigi.Task): """ Requires a single file dependency """ def __init__(self): super(TestTask, self).__init__() self.has_run = False def requires(self): return mock_external_task def output(self): mock_target = Mock(spec=luigi.Target) # the return is False so that this task will be scheduled mock_target.exists.return_value = False def run(self): self.has_run = True class WorkerExternalTaskTest(unittest.TestCase): @with_config({'core': {'retry-external-tasks': 'true'}}) def test_external_dependency_satisified_later(self): """ Test that an external dependency that is not `complete` when luigi is invoked, but \ becomes `complete` while the workflow is executing is re-evaluated. """ assert luigi.configuration.get_config().getboolean('core', 'retry-external-tasks', False) == True test_task = TestTask() luigi.build([test_task], local_scheduler=True) assert test_task.has_run == True assert mock_external_task.complete.call_count == 3 if __name__ == '__main__': unittest.main()
Create a test for re-evaluating external tasks while a workflow is running.# Copyright (c) 2015 # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import luigi import unittest from mock import Mock, patch from helpers import with_config mock_external_task = Mock(spec=luigi.ExternalTask) mock_external_task.complete.side_effect = [False, False, True] class TestTask(luigi.Task): """ Requires a single file dependency """ def __init__(self): super(TestTask, self).__init__() self.has_run = False def requires(self): return mock_external_task def output(self): mock_target = Mock(spec=luigi.Target) # the return is False so that this task will be scheduled mock_target.exists.return_value = False def run(self): self.has_run = True class WorkerExternalTaskTest(unittest.TestCase): @with_config({'core': {'retry-external-tasks': 'true'}}) def test_external_dependency_satisified_later(self): """ Test that an external dependency that is not `complete` when luigi is invoked, but \ becomes `complete` while the workflow is executing is re-evaluated. """ assert luigi.configuration.get_config().getboolean('core', 'retry-external-tasks', False) == True test_task = TestTask() luigi.build([test_task], local_scheduler=True) assert test_task.has_run == True assert mock_external_task.complete.call_count == 3 if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Create a test for re-evaluating external tasks while a workflow is running.<commit_after># Copyright (c) 2015 # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import luigi import unittest from mock import Mock, patch from helpers import with_config mock_external_task = Mock(spec=luigi.ExternalTask) mock_external_task.complete.side_effect = [False, False, True] class TestTask(luigi.Task): """ Requires a single file dependency """ def __init__(self): super(TestTask, self).__init__() self.has_run = False def requires(self): return mock_external_task def output(self): mock_target = Mock(spec=luigi.Target) # the return is False so that this task will be scheduled mock_target.exists.return_value = False def run(self): self.has_run = True class WorkerExternalTaskTest(unittest.TestCase): @with_config({'core': {'retry-external-tasks': 'true'}}) def test_external_dependency_satisified_later(self): """ Test that an external dependency that is not `complete` when luigi is invoked, but \ becomes `complete` while the workflow is executing is re-evaluated. """ assert luigi.configuration.get_config().getboolean('core', 'retry-external-tasks', False) == True test_task = TestTask() luigi.build([test_task], local_scheduler=True) assert test_task.has_run == True assert mock_external_task.complete.call_count == 3 if __name__ == '__main__': unittest.main()
68e056459dd3818ebb0c5dbdc8b4f1089bec9f07
tests/selection_test.py
tests/selection_test.py
import os import pytest import yaml from photoshell.selection import Selection @pytest.fixture def sidecar(tmpdir): tmpdir.join("test.sidecar").write(yaml.dump({ 'developed_path': os.path.join(tmpdir.strpath, "test.jpeg"), 'datetime': '2014-10-10 00:00' }, default_flow_style=False)) return os.path.join(tmpdir.strpath, "test.sidecar") @pytest.fixture def empty_selection(): s = Selection('', '') return s @pytest.fixture def selection(empty_selection): empty_selection.images.append('image') empty_selection.photos.append('image') return empty_selection def test_current_default_selection(selection): assert selection.current() def test_current_is_none_if_selection_empty(empty_selection): assert empty_selection.current() is None def test_current_photo_default_selection(selection): assert selection.current_photo() def test_current_photo_is_none_if_selection_empty(empty_selection): assert empty_selection.current_photo() is None def test_next_prev_does_nothing_single_photo(selection): assert selection.current() == selection.next() assert selection.current() == selection.prev() def test_next_prev_wrap_around(selection): selection.photos.append('photo2') selection.images.append('image2') assert selection.next() == 'image2' assert selection.next() == 'image' assert selection.prev() == 'image2' assert selection.prev() == 'image'
Add a few behavior tests for selection
Add a few behavior tests for selection
Python
mit
photoshell/photoshell,SamWhited/photoshell,campaul/photoshell
Add a few behavior tests for selection
import os import pytest import yaml from photoshell.selection import Selection @pytest.fixture def sidecar(tmpdir): tmpdir.join("test.sidecar").write(yaml.dump({ 'developed_path': os.path.join(tmpdir.strpath, "test.jpeg"), 'datetime': '2014-10-10 00:00' }, default_flow_style=False)) return os.path.join(tmpdir.strpath, "test.sidecar") @pytest.fixture def empty_selection(): s = Selection('', '') return s @pytest.fixture def selection(empty_selection): empty_selection.images.append('image') empty_selection.photos.append('image') return empty_selection def test_current_default_selection(selection): assert selection.current() def test_current_is_none_if_selection_empty(empty_selection): assert empty_selection.current() is None def test_current_photo_default_selection(selection): assert selection.current_photo() def test_current_photo_is_none_if_selection_empty(empty_selection): assert empty_selection.current_photo() is None def test_next_prev_does_nothing_single_photo(selection): assert selection.current() == selection.next() assert selection.current() == selection.prev() def test_next_prev_wrap_around(selection): selection.photos.append('photo2') selection.images.append('image2') assert selection.next() == 'image2' assert selection.next() == 'image' assert selection.prev() == 'image2' assert selection.prev() == 'image'
<commit_before><commit_msg>Add a few behavior tests for selection<commit_after>
import os import pytest import yaml from photoshell.selection import Selection @pytest.fixture def sidecar(tmpdir): tmpdir.join("test.sidecar").write(yaml.dump({ 'developed_path': os.path.join(tmpdir.strpath, "test.jpeg"), 'datetime': '2014-10-10 00:00' }, default_flow_style=False)) return os.path.join(tmpdir.strpath, "test.sidecar") @pytest.fixture def empty_selection(): s = Selection('', '') return s @pytest.fixture def selection(empty_selection): empty_selection.images.append('image') empty_selection.photos.append('image') return empty_selection def test_current_default_selection(selection): assert selection.current() def test_current_is_none_if_selection_empty(empty_selection): assert empty_selection.current() is None def test_current_photo_default_selection(selection): assert selection.current_photo() def test_current_photo_is_none_if_selection_empty(empty_selection): assert empty_selection.current_photo() is None def test_next_prev_does_nothing_single_photo(selection): assert selection.current() == selection.next() assert selection.current() == selection.prev() def test_next_prev_wrap_around(selection): selection.photos.append('photo2') selection.images.append('image2') assert selection.next() == 'image2' assert selection.next() == 'image' assert selection.prev() == 'image2' assert selection.prev() == 'image'
Add a few behavior tests for selectionimport os import pytest import yaml from photoshell.selection import Selection @pytest.fixture def sidecar(tmpdir): tmpdir.join("test.sidecar").write(yaml.dump({ 'developed_path': os.path.join(tmpdir.strpath, "test.jpeg"), 'datetime': '2014-10-10 00:00' }, default_flow_style=False)) return os.path.join(tmpdir.strpath, "test.sidecar") @pytest.fixture def empty_selection(): s = Selection('', '') return s @pytest.fixture def selection(empty_selection): empty_selection.images.append('image') empty_selection.photos.append('image') return empty_selection def test_current_default_selection(selection): assert selection.current() def test_current_is_none_if_selection_empty(empty_selection): assert empty_selection.current() is None def test_current_photo_default_selection(selection): assert selection.current_photo() def test_current_photo_is_none_if_selection_empty(empty_selection): assert empty_selection.current_photo() is None def test_next_prev_does_nothing_single_photo(selection): assert selection.current() == selection.next() assert selection.current() == selection.prev() def test_next_prev_wrap_around(selection): selection.photos.append('photo2') selection.images.append('image2') assert selection.next() == 'image2' assert selection.next() == 'image' assert selection.prev() == 'image2' assert selection.prev() == 'image'
<commit_before><commit_msg>Add a few behavior tests for selection<commit_after>import os import pytest import yaml from photoshell.selection import Selection @pytest.fixture def sidecar(tmpdir): tmpdir.join("test.sidecar").write(yaml.dump({ 'developed_path': os.path.join(tmpdir.strpath, "test.jpeg"), 'datetime': '2014-10-10 00:00' }, default_flow_style=False)) return os.path.join(tmpdir.strpath, "test.sidecar") @pytest.fixture def empty_selection(): s = Selection('', '') return s @pytest.fixture def selection(empty_selection): empty_selection.images.append('image') empty_selection.photos.append('image') return empty_selection def test_current_default_selection(selection): assert selection.current() def test_current_is_none_if_selection_empty(empty_selection): assert empty_selection.current() is None def test_current_photo_default_selection(selection): assert selection.current_photo() def test_current_photo_is_none_if_selection_empty(empty_selection): assert empty_selection.current_photo() is None def test_next_prev_does_nothing_single_photo(selection): assert selection.current() == selection.next() assert selection.current() == selection.prev() def test_next_prev_wrap_around(selection): selection.photos.append('photo2') selection.images.append('image2') assert selection.next() == 'image2' assert selection.next() == 'image' assert selection.prev() == 'image2' assert selection.prev() == 'image'
ff2c4b68a5eace4451eeef4fd6ca84d37435c556
project/editorial/migrations/0087_auto_20180226_1409.py
project/editorial/migrations/0087_auto_20180226_1409.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-26 22:09 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('editorial', '0086_auto_20180102_2145'), ] operations = [ migrations.AddField( model_name='privatemessage', name='network_invitation', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='editorial.Network'), ), migrations.AddField( model_name='privatemessage', name='network_invitation_response', field=models.NullBooleanField(), ), ]
Add fields to privatemessage for network invitations.
Add fields to privatemessage for network invitations.
Python
mit
ProjectFacet/facet,ProjectFacet/facet,ProjectFacet/facet,ProjectFacet/facet,ProjectFacet/facet
Add fields to privatemessage for network invitations.
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-26 22:09 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('editorial', '0086_auto_20180102_2145'), ] operations = [ migrations.AddField( model_name='privatemessage', name='network_invitation', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='editorial.Network'), ), migrations.AddField( model_name='privatemessage', name='network_invitation_response', field=models.NullBooleanField(), ), ]
<commit_before><commit_msg>Add fields to privatemessage for network invitations.<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-26 22:09 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('editorial', '0086_auto_20180102_2145'), ] operations = [ migrations.AddField( model_name='privatemessage', name='network_invitation', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='editorial.Network'), ), migrations.AddField( model_name='privatemessage', name='network_invitation_response', field=models.NullBooleanField(), ), ]
Add fields to privatemessage for network invitations.# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-26 22:09 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('editorial', '0086_auto_20180102_2145'), ] operations = [ migrations.AddField( model_name='privatemessage', name='network_invitation', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='editorial.Network'), ), migrations.AddField( model_name='privatemessage', name='network_invitation_response', field=models.NullBooleanField(), ), ]
<commit_before><commit_msg>Add fields to privatemessage for network invitations.<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-26 22:09 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('editorial', '0086_auto_20180102_2145'), ] operations = [ migrations.AddField( model_name='privatemessage', name='network_invitation', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='editorial.Network'), ), migrations.AddField( model_name='privatemessage', name='network_invitation_response', field=models.NullBooleanField(), ), ]
50742b6e629e6f54a9f3784a3c1495eb9d82c238
brightway_projects/processing/processed_package.py
brightway_projects/processing/processed_package.py
from ..errors import InconsistentFields, NonUnique def greedy_set_cover(data, exclude=None): """Find unique set of attributes that uniquely identifies each element in ``data``. Feature selection is a well known problem, and is analogous to the `set cover problem <https://en.wikipedia.org/wiki/Set_cover_problem>`__, for which there is a `well known heuristic <https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm>`__. Args: data (iterable): List of dictionaries with the same fields. exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded. Returns: Set of attributes (strings) Raises: NonUnique: The given fields are not enough to ensure uniqueness """ if exclude is None: exclude = {"id"} else: exclude = set(exclude) exclude.add("id") def as_unique_attributes(data, exclude=None, include=None): """Format ``data`` as unique set of attributes and values for use in ``create_processed_datapackage``. Note: Each element in ``data`` must have the attributes ``id``. data = [ {}, ] Args: data (iterable): List of dictionaries with the same fields. exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded. include (iterable): Fields to include when returning, even if not unique Returns: (list of field names as strings, dictionary of data ids to values for given field names) Raises: InconsistentFields: Not all features provides all fields. """ include = set([]) if include is None else set(include) fields = greedy_set_cover(data, exclude) if len({set(obj.keys()) for obj in data}) > 1: raise InconsistentFields def formatter(obj, fields, include): return { key: value for key, value in obj.items() if (key in fields or key in include or key == "id") } return (fields, [formatter(obj, fields, include) for obj in data]) def create_processed_datapackage( array, rows, cols, filepath=None, id_=None, metadata=None, replace=True, compress=True, in_memory=False, ): """Create a datapackage with numpy structured arrays and metadata. Exchanging large, dense datasets like MRIO tables is not efficient if each exchange must be listed separately. Instead, we would prefer to exchange the processed arrays used to build the matrices directly. However, these arrays use integer indices which are not consistent across computers or even Brightway projects. This function includes additional metadata to solve this problem, mapping these integer ids to enough attributes to uniquely identify each feature. Separate metadata files are included for each column in the array (i.e. the row and column indices). Args: array (numpy structured array): The numeric data. Usually generated via ``create_numpy_structured_array``. rows (dict): Dictionary mapping integer indices in ``row_value`` to a dictionary of attributes. cols (dict): Dictionary mapping integer indices in ``col_value`` to a dictionary of attributes. Returns: Something :) """ pass
Add start of processed package
Add start of processed package
Python
bsd-3-clause
brightway-lca/brightway
Add start of processed package
from ..errors import InconsistentFields, NonUnique def greedy_set_cover(data, exclude=None): """Find unique set of attributes that uniquely identifies each element in ``data``. Feature selection is a well known problem, and is analogous to the `set cover problem <https://en.wikipedia.org/wiki/Set_cover_problem>`__, for which there is a `well known heuristic <https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm>`__. Args: data (iterable): List of dictionaries with the same fields. exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded. Returns: Set of attributes (strings) Raises: NonUnique: The given fields are not enough to ensure uniqueness """ if exclude is None: exclude = {"id"} else: exclude = set(exclude) exclude.add("id") def as_unique_attributes(data, exclude=None, include=None): """Format ``data`` as unique set of attributes and values for use in ``create_processed_datapackage``. Note: Each element in ``data`` must have the attributes ``id``. data = [ {}, ] Args: data (iterable): List of dictionaries with the same fields. exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded. include (iterable): Fields to include when returning, even if not unique Returns: (list of field names as strings, dictionary of data ids to values for given field names) Raises: InconsistentFields: Not all features provides all fields. """ include = set([]) if include is None else set(include) fields = greedy_set_cover(data, exclude) if len({set(obj.keys()) for obj in data}) > 1: raise InconsistentFields def formatter(obj, fields, include): return { key: value for key, value in obj.items() if (key in fields or key in include or key == "id") } return (fields, [formatter(obj, fields, include) for obj in data]) def create_processed_datapackage( array, rows, cols, filepath=None, id_=None, metadata=None, replace=True, compress=True, in_memory=False, ): """Create a datapackage with numpy structured arrays and metadata. Exchanging large, dense datasets like MRIO tables is not efficient if each exchange must be listed separately. Instead, we would prefer to exchange the processed arrays used to build the matrices directly. However, these arrays use integer indices which are not consistent across computers or even Brightway projects. This function includes additional metadata to solve this problem, mapping these integer ids to enough attributes to uniquely identify each feature. Separate metadata files are included for each column in the array (i.e. the row and column indices). Args: array (numpy structured array): The numeric data. Usually generated via ``create_numpy_structured_array``. rows (dict): Dictionary mapping integer indices in ``row_value`` to a dictionary of attributes. cols (dict): Dictionary mapping integer indices in ``col_value`` to a dictionary of attributes. Returns: Something :) """ pass
<commit_before><commit_msg>Add start of processed package<commit_after>
from ..errors import InconsistentFields, NonUnique def greedy_set_cover(data, exclude=None): """Find unique set of attributes that uniquely identifies each element in ``data``. Feature selection is a well known problem, and is analogous to the `set cover problem <https://en.wikipedia.org/wiki/Set_cover_problem>`__, for which there is a `well known heuristic <https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm>`__. Args: data (iterable): List of dictionaries with the same fields. exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded. Returns: Set of attributes (strings) Raises: NonUnique: The given fields are not enough to ensure uniqueness """ if exclude is None: exclude = {"id"} else: exclude = set(exclude) exclude.add("id") def as_unique_attributes(data, exclude=None, include=None): """Format ``data`` as unique set of attributes and values for use in ``create_processed_datapackage``. Note: Each element in ``data`` must have the attributes ``id``. data = [ {}, ] Args: data (iterable): List of dictionaries with the same fields. exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded. include (iterable): Fields to include when returning, even if not unique Returns: (list of field names as strings, dictionary of data ids to values for given field names) Raises: InconsistentFields: Not all features provides all fields. """ include = set([]) if include is None else set(include) fields = greedy_set_cover(data, exclude) if len({set(obj.keys()) for obj in data}) > 1: raise InconsistentFields def formatter(obj, fields, include): return { key: value for key, value in obj.items() if (key in fields or key in include or key == "id") } return (fields, [formatter(obj, fields, include) for obj in data]) def create_processed_datapackage( array, rows, cols, filepath=None, id_=None, metadata=None, replace=True, compress=True, in_memory=False, ): """Create a datapackage with numpy structured arrays and metadata. Exchanging large, dense datasets like MRIO tables is not efficient if each exchange must be listed separately. Instead, we would prefer to exchange the processed arrays used to build the matrices directly. However, these arrays use integer indices which are not consistent across computers or even Brightway projects. This function includes additional metadata to solve this problem, mapping these integer ids to enough attributes to uniquely identify each feature. Separate metadata files are included for each column in the array (i.e. the row and column indices). Args: array (numpy structured array): The numeric data. Usually generated via ``create_numpy_structured_array``. rows (dict): Dictionary mapping integer indices in ``row_value`` to a dictionary of attributes. cols (dict): Dictionary mapping integer indices in ``col_value`` to a dictionary of attributes. Returns: Something :) """ pass
Add start of processed packagefrom ..errors import InconsistentFields, NonUnique def greedy_set_cover(data, exclude=None): """Find unique set of attributes that uniquely identifies each element in ``data``. Feature selection is a well known problem, and is analogous to the `set cover problem <https://en.wikipedia.org/wiki/Set_cover_problem>`__, for which there is a `well known heuristic <https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm>`__. Args: data (iterable): List of dictionaries with the same fields. exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded. Returns: Set of attributes (strings) Raises: NonUnique: The given fields are not enough to ensure uniqueness """ if exclude is None: exclude = {"id"} else: exclude = set(exclude) exclude.add("id") def as_unique_attributes(data, exclude=None, include=None): """Format ``data`` as unique set of attributes and values for use in ``create_processed_datapackage``. Note: Each element in ``data`` must have the attributes ``id``. data = [ {}, ] Args: data (iterable): List of dictionaries with the same fields. exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded. include (iterable): Fields to include when returning, even if not unique Returns: (list of field names as strings, dictionary of data ids to values for given field names) Raises: InconsistentFields: Not all features provides all fields. """ include = set([]) if include is None else set(include) fields = greedy_set_cover(data, exclude) if len({set(obj.keys()) for obj in data}) > 1: raise InconsistentFields def formatter(obj, fields, include): return { key: value for key, value in obj.items() if (key in fields or key in include or key == "id") } return (fields, [formatter(obj, fields, include) for obj in data]) def create_processed_datapackage( array, rows, cols, filepath=None, id_=None, metadata=None, replace=True, compress=True, in_memory=False, ): """Create a datapackage with numpy structured arrays and metadata. Exchanging large, dense datasets like MRIO tables is not efficient if each exchange must be listed separately. Instead, we would prefer to exchange the processed arrays used to build the matrices directly. However, these arrays use integer indices which are not consistent across computers or even Brightway projects. This function includes additional metadata to solve this problem, mapping these integer ids to enough attributes to uniquely identify each feature. Separate metadata files are included for each column in the array (i.e. the row and column indices). Args: array (numpy structured array): The numeric data. Usually generated via ``create_numpy_structured_array``. rows (dict): Dictionary mapping integer indices in ``row_value`` to a dictionary of attributes. cols (dict): Dictionary mapping integer indices in ``col_value`` to a dictionary of attributes. Returns: Something :) """ pass
<commit_before><commit_msg>Add start of processed package<commit_after>from ..errors import InconsistentFields, NonUnique def greedy_set_cover(data, exclude=None): """Find unique set of attributes that uniquely identifies each element in ``data``. Feature selection is a well known problem, and is analogous to the `set cover problem <https://en.wikipedia.org/wiki/Set_cover_problem>`__, for which there is a `well known heuristic <https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm>`__. Args: data (iterable): List of dictionaries with the same fields. exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded. Returns: Set of attributes (strings) Raises: NonUnique: The given fields are not enough to ensure uniqueness """ if exclude is None: exclude = {"id"} else: exclude = set(exclude) exclude.add("id") def as_unique_attributes(data, exclude=None, include=None): """Format ``data`` as unique set of attributes and values for use in ``create_processed_datapackage``. Note: Each element in ``data`` must have the attributes ``id``. data = [ {}, ] Args: data (iterable): List of dictionaries with the same fields. exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded. include (iterable): Fields to include when returning, even if not unique Returns: (list of field names as strings, dictionary of data ids to values for given field names) Raises: InconsistentFields: Not all features provides all fields. """ include = set([]) if include is None else set(include) fields = greedy_set_cover(data, exclude) if len({set(obj.keys()) for obj in data}) > 1: raise InconsistentFields def formatter(obj, fields, include): return { key: value for key, value in obj.items() if (key in fields or key in include or key == "id") } return (fields, [formatter(obj, fields, include) for obj in data]) def create_processed_datapackage( array, rows, cols, filepath=None, id_=None, metadata=None, replace=True, compress=True, in_memory=False, ): """Create a datapackage with numpy structured arrays and metadata. Exchanging large, dense datasets like MRIO tables is not efficient if each exchange must be listed separately. Instead, we would prefer to exchange the processed arrays used to build the matrices directly. However, these arrays use integer indices which are not consistent across computers or even Brightway projects. This function includes additional metadata to solve this problem, mapping these integer ids to enough attributes to uniquely identify each feature. Separate metadata files are included for each column in the array (i.e. the row and column indices). Args: array (numpy structured array): The numeric data. Usually generated via ``create_numpy_structured_array``. rows (dict): Dictionary mapping integer indices in ``row_value`` to a dictionary of attributes. cols (dict): Dictionary mapping integer indices in ``col_value`` to a dictionary of attributes. Returns: Something :) """ pass
842869063ead9b2e6a1e22d11c9901072f2319aa
docs/generate_spec.py
docs/generate_spec.py
# -*- encoding: utf-8 -*- # # This script is to be used to automagically generate the recurring data types # documentation based on the API specification. # # to run it just do: # # $ python generate_spec.py > outputfile.md # # :authors: Arturo Filastò # :licence: see LICENSE import inspect from globaleaks.rest.messages import base def create_spec(spec): doc = "" for k, v in spec.items(): doc += " %s: %s\n" % (k, v) return doc def create_class_doc(klass): doc = "## %s\n" % klass.__name__ if klass.__doc__: docstring = [line.strip() for line in klass.__doc__.split("\n")] doc += '\n'.join(docstring) doc += "\n" doc += create_spec(klass.specification) return doc for name, klass in inspect.getmembers(base, inspect.isclass): if issubclass(klass, base.GLTypes) and klass != base.GLTypes: print create_class_doc(klass)
Add script to self generate docs for recurring data types
Add script to self generate docs for recurring data types
Python
agpl-3.0
vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks
Add script to self generate docs for recurring data types
# -*- encoding: utf-8 -*- # # This script is to be used to automagically generate the recurring data types # documentation based on the API specification. # # to run it just do: # # $ python generate_spec.py > outputfile.md # # :authors: Arturo Filastò # :licence: see LICENSE import inspect from globaleaks.rest.messages import base def create_spec(spec): doc = "" for k, v in spec.items(): doc += " %s: %s\n" % (k, v) return doc def create_class_doc(klass): doc = "## %s\n" % klass.__name__ if klass.__doc__: docstring = [line.strip() for line in klass.__doc__.split("\n")] doc += '\n'.join(docstring) doc += "\n" doc += create_spec(klass.specification) return doc for name, klass in inspect.getmembers(base, inspect.isclass): if issubclass(klass, base.GLTypes) and klass != base.GLTypes: print create_class_doc(klass)
<commit_before><commit_msg>Add script to self generate docs for recurring data types<commit_after>
# -*- encoding: utf-8 -*- # # This script is to be used to automagically generate the recurring data types # documentation based on the API specification. # # to run it just do: # # $ python generate_spec.py > outputfile.md # # :authors: Arturo Filastò # :licence: see LICENSE import inspect from globaleaks.rest.messages import base def create_spec(spec): doc = "" for k, v in spec.items(): doc += " %s: %s\n" % (k, v) return doc def create_class_doc(klass): doc = "## %s\n" % klass.__name__ if klass.__doc__: docstring = [line.strip() for line in klass.__doc__.split("\n")] doc += '\n'.join(docstring) doc += "\n" doc += create_spec(klass.specification) return doc for name, klass in inspect.getmembers(base, inspect.isclass): if issubclass(klass, base.GLTypes) and klass != base.GLTypes: print create_class_doc(klass)
Add script to self generate docs for recurring data types# -*- encoding: utf-8 -*- # # This script is to be used to automagically generate the recurring data types # documentation based on the API specification. # # to run it just do: # # $ python generate_spec.py > outputfile.md # # :authors: Arturo Filastò # :licence: see LICENSE import inspect from globaleaks.rest.messages import base def create_spec(spec): doc = "" for k, v in spec.items(): doc += " %s: %s\n" % (k, v) return doc def create_class_doc(klass): doc = "## %s\n" % klass.__name__ if klass.__doc__: docstring = [line.strip() for line in klass.__doc__.split("\n")] doc += '\n'.join(docstring) doc += "\n" doc += create_spec(klass.specification) return doc for name, klass in inspect.getmembers(base, inspect.isclass): if issubclass(klass, base.GLTypes) and klass != base.GLTypes: print create_class_doc(klass)
<commit_before><commit_msg>Add script to self generate docs for recurring data types<commit_after># -*- encoding: utf-8 -*- # # This script is to be used to automagically generate the recurring data types # documentation based on the API specification. # # to run it just do: # # $ python generate_spec.py > outputfile.md # # :authors: Arturo Filastò # :licence: see LICENSE import inspect from globaleaks.rest.messages import base def create_spec(spec): doc = "" for k, v in spec.items(): doc += " %s: %s\n" % (k, v) return doc def create_class_doc(klass): doc = "## %s\n" % klass.__name__ if klass.__doc__: docstring = [line.strip() for line in klass.__doc__.split("\n")] doc += '\n'.join(docstring) doc += "\n" doc += create_spec(klass.specification) return doc for name, klass in inspect.getmembers(base, inspect.isclass): if issubclass(klass, base.GLTypes) and klass != base.GLTypes: print create_class_doc(klass)
4694f6bf2405d0aae5e6c3fc393f8a839e8aac07
tests/test_converter.py
tests/test_converter.py
# coding: utf-8 # Copyright (c) 2010-2012 Raphaël Barrois import unittest from confmgr import converter class LineTestCase(unittest.TestCase): def test_repr(self): self.assertEqual("Line('foo', 'bar')", repr(converter.Line('foo', 'bar'))) def test_equality(self): self.assertEqual( converter.Line('foo', 'bar'), converter.Line('foo', 'bar')) self.assertNotEqual( converter.Line('foo', 'bar'), converter.Line('foo', 'baz')) self.assertNotEqual( converter.Line('foo', 'bar'), converter.Line('fo', 'bar')) def test_compare_to_other(self): self.assertNotEqual('foo', converter.Line('foo', 'bar')) self.assertNotEqual(converter.Line('foo', 'bar'), 'foo') def test_hash(self): s = set() for _i in range(5): s.add(converter.Line('foo', 'bar')) self.assertEqual(1, len(s)) self.assertEqual(set([converter.Line('foo', 'bar')]), s) def test_fill_original_normal(self): l = converter.Line('foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('foo', l.original) def test_fill_original_comment(self): l = converter.Line('#@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('#@@foo', l.original) l = converter.Line('"@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('"@@foo', l.original) l = converter.Line('!@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('!@@foo', l.original) class GeneratorTestCase(unittest.TestCase): def test_no_special(self): txt = [ 'foo', 'bar', 'baz', ] g = converter.Generator(txt, categories=[], fs=None) expected = [converter.Line(s, s) for s in txt] out = list(g) self.assertItemsEqual(expected, out) if __name__ == '__main__': unittest.main()
Add tests for converter.Line and converter.Generator.
Add tests for converter.Line and converter.Generator. Signed-off-by: Raphaël Barrois <8eb3b37a023209373fcd61a2fdc08256a14fb19c@polytechnique.org>
Python
bsd-2-clause
rbarrois/uconf
Add tests for converter.Line and converter.Generator. Signed-off-by: Raphaël Barrois <8eb3b37a023209373fcd61a2fdc08256a14fb19c@polytechnique.org>
# coding: utf-8 # Copyright (c) 2010-2012 Raphaël Barrois import unittest from confmgr import converter class LineTestCase(unittest.TestCase): def test_repr(self): self.assertEqual("Line('foo', 'bar')", repr(converter.Line('foo', 'bar'))) def test_equality(self): self.assertEqual( converter.Line('foo', 'bar'), converter.Line('foo', 'bar')) self.assertNotEqual( converter.Line('foo', 'bar'), converter.Line('foo', 'baz')) self.assertNotEqual( converter.Line('foo', 'bar'), converter.Line('fo', 'bar')) def test_compare_to_other(self): self.assertNotEqual('foo', converter.Line('foo', 'bar')) self.assertNotEqual(converter.Line('foo', 'bar'), 'foo') def test_hash(self): s = set() for _i in range(5): s.add(converter.Line('foo', 'bar')) self.assertEqual(1, len(s)) self.assertEqual(set([converter.Line('foo', 'bar')]), s) def test_fill_original_normal(self): l = converter.Line('foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('foo', l.original) def test_fill_original_comment(self): l = converter.Line('#@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('#@@foo', l.original) l = converter.Line('"@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('"@@foo', l.original) l = converter.Line('!@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('!@@foo', l.original) class GeneratorTestCase(unittest.TestCase): def test_no_special(self): txt = [ 'foo', 'bar', 'baz', ] g = converter.Generator(txt, categories=[], fs=None) expected = [converter.Line(s, s) for s in txt] out = list(g) self.assertItemsEqual(expected, out) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add tests for converter.Line and converter.Generator. Signed-off-by: Raphaël Barrois <8eb3b37a023209373fcd61a2fdc08256a14fb19c@polytechnique.org><commit_after>
# coding: utf-8 # Copyright (c) 2010-2012 Raphaël Barrois import unittest from confmgr import converter class LineTestCase(unittest.TestCase): def test_repr(self): self.assertEqual("Line('foo', 'bar')", repr(converter.Line('foo', 'bar'))) def test_equality(self): self.assertEqual( converter.Line('foo', 'bar'), converter.Line('foo', 'bar')) self.assertNotEqual( converter.Line('foo', 'bar'), converter.Line('foo', 'baz')) self.assertNotEqual( converter.Line('foo', 'bar'), converter.Line('fo', 'bar')) def test_compare_to_other(self): self.assertNotEqual('foo', converter.Line('foo', 'bar')) self.assertNotEqual(converter.Line('foo', 'bar'), 'foo') def test_hash(self): s = set() for _i in range(5): s.add(converter.Line('foo', 'bar')) self.assertEqual(1, len(s)) self.assertEqual(set([converter.Line('foo', 'bar')]), s) def test_fill_original_normal(self): l = converter.Line('foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('foo', l.original) def test_fill_original_comment(self): l = converter.Line('#@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('#@@foo', l.original) l = converter.Line('"@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('"@@foo', l.original) l = converter.Line('!@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('!@@foo', l.original) class GeneratorTestCase(unittest.TestCase): def test_no_special(self): txt = [ 'foo', 'bar', 'baz', ] g = converter.Generator(txt, categories=[], fs=None) expected = [converter.Line(s, s) for s in txt] out = list(g) self.assertItemsEqual(expected, out) if __name__ == '__main__': unittest.main()
Add tests for converter.Line and converter.Generator. Signed-off-by: Raphaël Barrois <8eb3b37a023209373fcd61a2fdc08256a14fb19c@polytechnique.org># coding: utf-8 # Copyright (c) 2010-2012 Raphaël Barrois import unittest from confmgr import converter class LineTestCase(unittest.TestCase): def test_repr(self): self.assertEqual("Line('foo', 'bar')", repr(converter.Line('foo', 'bar'))) def test_equality(self): self.assertEqual( converter.Line('foo', 'bar'), converter.Line('foo', 'bar')) self.assertNotEqual( converter.Line('foo', 'bar'), converter.Line('foo', 'baz')) self.assertNotEqual( converter.Line('foo', 'bar'), converter.Line('fo', 'bar')) def test_compare_to_other(self): self.assertNotEqual('foo', converter.Line('foo', 'bar')) self.assertNotEqual(converter.Line('foo', 'bar'), 'foo') def test_hash(self): s = set() for _i in range(5): s.add(converter.Line('foo', 'bar')) self.assertEqual(1, len(s)) self.assertEqual(set([converter.Line('foo', 'bar')]), s) def test_fill_original_normal(self): l = converter.Line('foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('foo', l.original) def test_fill_original_comment(self): l = converter.Line('#@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('#@@foo', l.original) l = converter.Line('"@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('"@@foo', l.original) l = converter.Line('!@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('!@@foo', l.original) class GeneratorTestCase(unittest.TestCase): def test_no_special(self): txt = [ 'foo', 'bar', 'baz', ] g = converter.Generator(txt, categories=[], fs=None) expected = [converter.Line(s, s) for s in txt] out = list(g) self.assertItemsEqual(expected, out) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add tests for converter.Line and converter.Generator. Signed-off-by: Raphaël Barrois <8eb3b37a023209373fcd61a2fdc08256a14fb19c@polytechnique.org><commit_after># coding: utf-8 # Copyright (c) 2010-2012 Raphaël Barrois import unittest from confmgr import converter class LineTestCase(unittest.TestCase): def test_repr(self): self.assertEqual("Line('foo', 'bar')", repr(converter.Line('foo', 'bar'))) def test_equality(self): self.assertEqual( converter.Line('foo', 'bar'), converter.Line('foo', 'bar')) self.assertNotEqual( converter.Line('foo', 'bar'), converter.Line('foo', 'baz')) self.assertNotEqual( converter.Line('foo', 'bar'), converter.Line('fo', 'bar')) def test_compare_to_other(self): self.assertNotEqual('foo', converter.Line('foo', 'bar')) self.assertNotEqual(converter.Line('foo', 'bar'), 'foo') def test_hash(self): s = set() for _i in range(5): s.add(converter.Line('foo', 'bar')) self.assertEqual(1, len(s)) self.assertEqual(set([converter.Line('foo', 'bar')]), s) def test_fill_original_normal(self): l = converter.Line('foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('foo', l.original) def test_fill_original_comment(self): l = converter.Line('#@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('#@@foo', l.original) l = converter.Line('"@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('"@@foo', l.original) l = converter.Line('!@foo', None) self.assertEqual(None, l.original) l.fill_original() self.assertEqual('!@@foo', l.original) class GeneratorTestCase(unittest.TestCase): def test_no_special(self): txt = [ 'foo', 'bar', 'baz', ] g = converter.Generator(txt, categories=[], fs=None) expected = [converter.Line(s, s) for s in txt] out = list(g) self.assertItemsEqual(expected, out) if __name__ == '__main__': unittest.main()
00bfd02f921a42d4f288254d1accb7546d8df2c5
check_hbase.py
check_hbase.py
#!/usr/bin/env python # vim: ts=4:sw=4:et:sts=4:ai:tw=80 from utils import krb_wrapper,StringContext import os import argparse import nagiosplugin import re import subprocess html_auth = None def parser(): version="0.1" parser = argparse.ArgumentParser(description="Checks datanode") parser.add_argument('-p', '--principal', action='store', dest='principal') parser.add_argument('-s', '--secure',action='store_true') parser.add_argument('-k', '--keytab',action='store') parser.add_argument('--cache_file',action='store', default='/tmp/nagios.krb') parser.add_argument('-v','--version', action='version', version='%(prog)s ' + version) args = parser.parse_args() if args.secure and (args.principal is None or args.keytab is None): parser.error("if secure cluster, both of --principal and --keytab required") return args class Hbase(nagiosplugin.Resource): def __init__(self): p = subprocess.Popen(['hbase','hbck'],stdout=subprocess.PIPE,stderr=None) output,err = p.communicate() self.status=None if err is None: for line in output.splitlines(): m = re.match('^\s*Status\s*:\s*(?P<STATUS>\w+)\s*',line) if m: self.status=m.group('STATUS') else: return 2,"Critical: "+err def probe(self): yield nagiosplugin.Metric('status',self.status,context="status") @nagiosplugin.guarded def main(): args = parser() if args.secure: auth_token = krb_wrapper(args.principal,args.keytab,args.cache_file) os.environ['KRB5CCNAME'] = args.cache_file check = nagiosplugin.Check(Hbase(), StringContext('status', 'OK')) check.main() if auth_token: auth_token.destroy() if __name__ == '__main__': main()
Add hbase consistency check throw hbase hbck command, easily can be added some checks like backups servers or region servers
Add hbase consistency check throw hbase hbck command, easily can be added some checks like backups servers or region servers
Python
apache-2.0
keedio/nagios-hadoop,keedio/nagios-hadoop
Add hbase consistency check throw hbase hbck command, easily can be added some checks like backups servers or region servers
#!/usr/bin/env python # vim: ts=4:sw=4:et:sts=4:ai:tw=80 from utils import krb_wrapper,StringContext import os import argparse import nagiosplugin import re import subprocess html_auth = None def parser(): version="0.1" parser = argparse.ArgumentParser(description="Checks datanode") parser.add_argument('-p', '--principal', action='store', dest='principal') parser.add_argument('-s', '--secure',action='store_true') parser.add_argument('-k', '--keytab',action='store') parser.add_argument('--cache_file',action='store', default='/tmp/nagios.krb') parser.add_argument('-v','--version', action='version', version='%(prog)s ' + version) args = parser.parse_args() if args.secure and (args.principal is None or args.keytab is None): parser.error("if secure cluster, both of --principal and --keytab required") return args class Hbase(nagiosplugin.Resource): def __init__(self): p = subprocess.Popen(['hbase','hbck'],stdout=subprocess.PIPE,stderr=None) output,err = p.communicate() self.status=None if err is None: for line in output.splitlines(): m = re.match('^\s*Status\s*:\s*(?P<STATUS>\w+)\s*',line) if m: self.status=m.group('STATUS') else: return 2,"Critical: "+err def probe(self): yield nagiosplugin.Metric('status',self.status,context="status") @nagiosplugin.guarded def main(): args = parser() if args.secure: auth_token = krb_wrapper(args.principal,args.keytab,args.cache_file) os.environ['KRB5CCNAME'] = args.cache_file check = nagiosplugin.Check(Hbase(), StringContext('status', 'OK')) check.main() if auth_token: auth_token.destroy() if __name__ == '__main__': main()
<commit_before><commit_msg>Add hbase consistency check throw hbase hbck command, easily can be added some checks like backups servers or region servers<commit_after>
#!/usr/bin/env python # vim: ts=4:sw=4:et:sts=4:ai:tw=80 from utils import krb_wrapper,StringContext import os import argparse import nagiosplugin import re import subprocess html_auth = None def parser(): version="0.1" parser = argparse.ArgumentParser(description="Checks datanode") parser.add_argument('-p', '--principal', action='store', dest='principal') parser.add_argument('-s', '--secure',action='store_true') parser.add_argument('-k', '--keytab',action='store') parser.add_argument('--cache_file',action='store', default='/tmp/nagios.krb') parser.add_argument('-v','--version', action='version', version='%(prog)s ' + version) args = parser.parse_args() if args.secure and (args.principal is None or args.keytab is None): parser.error("if secure cluster, both of --principal and --keytab required") return args class Hbase(nagiosplugin.Resource): def __init__(self): p = subprocess.Popen(['hbase','hbck'],stdout=subprocess.PIPE,stderr=None) output,err = p.communicate() self.status=None if err is None: for line in output.splitlines(): m = re.match('^\s*Status\s*:\s*(?P<STATUS>\w+)\s*',line) if m: self.status=m.group('STATUS') else: return 2,"Critical: "+err def probe(self): yield nagiosplugin.Metric('status',self.status,context="status") @nagiosplugin.guarded def main(): args = parser() if args.secure: auth_token = krb_wrapper(args.principal,args.keytab,args.cache_file) os.environ['KRB5CCNAME'] = args.cache_file check = nagiosplugin.Check(Hbase(), StringContext('status', 'OK')) check.main() if auth_token: auth_token.destroy() if __name__ == '__main__': main()
Add hbase consistency check throw hbase hbck command, easily can be added some checks like backups servers or region servers#!/usr/bin/env python # vim: ts=4:sw=4:et:sts=4:ai:tw=80 from utils import krb_wrapper,StringContext import os import argparse import nagiosplugin import re import subprocess html_auth = None def parser(): version="0.1" parser = argparse.ArgumentParser(description="Checks datanode") parser.add_argument('-p', '--principal', action='store', dest='principal') parser.add_argument('-s', '--secure',action='store_true') parser.add_argument('-k', '--keytab',action='store') parser.add_argument('--cache_file',action='store', default='/tmp/nagios.krb') parser.add_argument('-v','--version', action='version', version='%(prog)s ' + version) args = parser.parse_args() if args.secure and (args.principal is None or args.keytab is None): parser.error("if secure cluster, both of --principal and --keytab required") return args class Hbase(nagiosplugin.Resource): def __init__(self): p = subprocess.Popen(['hbase','hbck'],stdout=subprocess.PIPE,stderr=None) output,err = p.communicate() self.status=None if err is None: for line in output.splitlines(): m = re.match('^\s*Status\s*:\s*(?P<STATUS>\w+)\s*',line) if m: self.status=m.group('STATUS') else: return 2,"Critical: "+err def probe(self): yield nagiosplugin.Metric('status',self.status,context="status") @nagiosplugin.guarded def main(): args = parser() if args.secure: auth_token = krb_wrapper(args.principal,args.keytab,args.cache_file) os.environ['KRB5CCNAME'] = args.cache_file check = nagiosplugin.Check(Hbase(), StringContext('status', 'OK')) check.main() if auth_token: auth_token.destroy() if __name__ == '__main__': main()
<commit_before><commit_msg>Add hbase consistency check throw hbase hbck command, easily can be added some checks like backups servers or region servers<commit_after>#!/usr/bin/env python # vim: ts=4:sw=4:et:sts=4:ai:tw=80 from utils import krb_wrapper,StringContext import os import argparse import nagiosplugin import re import subprocess html_auth = None def parser(): version="0.1" parser = argparse.ArgumentParser(description="Checks datanode") parser.add_argument('-p', '--principal', action='store', dest='principal') parser.add_argument('-s', '--secure',action='store_true') parser.add_argument('-k', '--keytab',action='store') parser.add_argument('--cache_file',action='store', default='/tmp/nagios.krb') parser.add_argument('-v','--version', action='version', version='%(prog)s ' + version) args = parser.parse_args() if args.secure and (args.principal is None or args.keytab is None): parser.error("if secure cluster, both of --principal and --keytab required") return args class Hbase(nagiosplugin.Resource): def __init__(self): p = subprocess.Popen(['hbase','hbck'],stdout=subprocess.PIPE,stderr=None) output,err = p.communicate() self.status=None if err is None: for line in output.splitlines(): m = re.match('^\s*Status\s*:\s*(?P<STATUS>\w+)\s*',line) if m: self.status=m.group('STATUS') else: return 2,"Critical: "+err def probe(self): yield nagiosplugin.Metric('status',self.status,context="status") @nagiosplugin.guarded def main(): args = parser() if args.secure: auth_token = krb_wrapper(args.principal,args.keytab,args.cache_file) os.environ['KRB5CCNAME'] = args.cache_file check = nagiosplugin.Check(Hbase(), StringContext('status', 'OK')) check.main() if auth_token: auth_token.destroy() if __name__ == '__main__': main()
bcb6c0780aacf77069a08f8d5b44d295881d9b9d
swapOddEvenChar.py
swapOddEvenChar.py
#Python3 word = list(input().strip()) for i in range(0,len(word),2): if(i+1>=len(word)): break word[i],word[i+1] = word[i+1],word[i] print(''.join(word))
Create solution to swap odd even characters
Create solution to swap odd even characters
Python
mit
laxmena/CodeKata,laxmena/CodeKata
Create solution to swap odd even characters
#Python3 word = list(input().strip()) for i in range(0,len(word),2): if(i+1>=len(word)): break word[i],word[i+1] = word[i+1],word[i] print(''.join(word))
<commit_before><commit_msg>Create solution to swap odd even characters<commit_after>
#Python3 word = list(input().strip()) for i in range(0,len(word),2): if(i+1>=len(word)): break word[i],word[i+1] = word[i+1],word[i] print(''.join(word))
Create solution to swap odd even characters#Python3 word = list(input().strip()) for i in range(0,len(word),2): if(i+1>=len(word)): break word[i],word[i+1] = word[i+1],word[i] print(''.join(word))
<commit_before><commit_msg>Create solution to swap odd even characters<commit_after>#Python3 word = list(input().strip()) for i in range(0,len(word),2): if(i+1>=len(word)): break word[i],word[i+1] = word[i+1],word[i] print(''.join(word))
7bde47d48f4e80b4449049a8b05767b30eb2c516
utilities/export-csv.py
utilities/export-csv.py
#!/usr/bin/python import os import csv import sys sys.path.append('../pynipap') import pynipap class Export: def __init__(self, xmlrpc_uri): self.xmlrpc_uri = xmlrpc_uri def write(self, output_file, schema_name): """ """ f = open(output_file, "w+") writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL) pynipap.xmlrpc_uri = xmlrpc_uri ao = pynipap.AuthOptions({ 'authoritative_source': 'nipap' }) import socket,xmlrpclib try: schema = pynipap.Schema.list({ 'name': schema_name })[0] except socket.error: print >> sys.stderr, "Connection refused, please check hostname & port" sys.exit(1) except xmlrpclib.ProtocolError: print >> sys.stderr, "Authentication failed, please check your username / password" sys.exit(1) except IndexError: print >> sys.stderr, "Non existing schema (", schema_name, ")" sys.exit(1) res = pynipap.Prefix.smart_search(schema, ' ', { 'include_all_parents': True }) for p in res['result']: writer.writerow([p.display_prefix, p.type, p.node, p.order_id, p.description]) if __name__ == '__main__': import optparse parser = optparse.OptionParser() parser.add_option('--username', default='', help="Username") parser.add_option('--password', default='', help="Password") parser.add_option('--host', help="NIPAP backend host") parser.add_option('--port', default=1337, help="NIPAP backend port") parser.add_option('--schema', help="Schema name") parser.add_option('--file', help="Output file") (options, args) = parser.parse_args() if options.host is None: print >> sys.stderr, "Please specify the NIPAP backend host to work with" sys.exit(1) if options.schema is None: print >> sys.stderr, "Please specify a schema to export" sys.exit(1) if options.file is None: print >> sys.stderr, "Please specify an output file" sys.exit(1) auth_uri = '' if options.username: auth_uri = "%s:%s@" % (options.username, options.password) xmlrpc_uri = "http://%(auth_uri)s%(host)s:%(port)s" % { 'auth_uri' : auth_uri, 'host' : options.host, 'port' : options.port } wr = Export(xmlrpc_uri) wr.write(options.file, options.schema)
Add stupid CSV export example
Add stupid CSV export example Fixes #53
Python
mit
SoundGoof/NIPAP,plajjan/NIPAP,SpriteLink/NIPAP,bbaja42/NIPAP,ettrig/NIPAP,bbaja42/NIPAP,fredsod/NIPAP,SoundGoof/NIPAP,fredsod/NIPAP,ettrig/NIPAP,SpriteLink/NIPAP,ettrig/NIPAP,SoundGoof/NIPAP,bbaja42/NIPAP,fredsod/NIPAP,fredsod/NIPAP,fredsod/NIPAP,garberg/NIPAP,bbaja42/NIPAP,garberg/NIPAP,SpriteLink/NIPAP,plajjan/NIPAP,garberg/NIPAP,SpriteLink/NIPAP,SoundGoof/NIPAP,plajjan/NIPAP,bbaja42/NIPAP,SoundGoof/NIPAP,garberg/NIPAP,plajjan/NIPAP,SpriteLink/NIPAP,garberg/NIPAP,ettrig/NIPAP,ettrig/NIPAP,fredsod/NIPAP,bbaja42/NIPAP,ettrig/NIPAP,SpriteLink/NIPAP,plajjan/NIPAP,garberg/NIPAP,SoundGoof/NIPAP,plajjan/NIPAP
Add stupid CSV export example Fixes #53
#!/usr/bin/python import os import csv import sys sys.path.append('../pynipap') import pynipap class Export: def __init__(self, xmlrpc_uri): self.xmlrpc_uri = xmlrpc_uri def write(self, output_file, schema_name): """ """ f = open(output_file, "w+") writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL) pynipap.xmlrpc_uri = xmlrpc_uri ao = pynipap.AuthOptions({ 'authoritative_source': 'nipap' }) import socket,xmlrpclib try: schema = pynipap.Schema.list({ 'name': schema_name })[0] except socket.error: print >> sys.stderr, "Connection refused, please check hostname & port" sys.exit(1) except xmlrpclib.ProtocolError: print >> sys.stderr, "Authentication failed, please check your username / password" sys.exit(1) except IndexError: print >> sys.stderr, "Non existing schema (", schema_name, ")" sys.exit(1) res = pynipap.Prefix.smart_search(schema, ' ', { 'include_all_parents': True }) for p in res['result']: writer.writerow([p.display_prefix, p.type, p.node, p.order_id, p.description]) if __name__ == '__main__': import optparse parser = optparse.OptionParser() parser.add_option('--username', default='', help="Username") parser.add_option('--password', default='', help="Password") parser.add_option('--host', help="NIPAP backend host") parser.add_option('--port', default=1337, help="NIPAP backend port") parser.add_option('--schema', help="Schema name") parser.add_option('--file', help="Output file") (options, args) = parser.parse_args() if options.host is None: print >> sys.stderr, "Please specify the NIPAP backend host to work with" sys.exit(1) if options.schema is None: print >> sys.stderr, "Please specify a schema to export" sys.exit(1) if options.file is None: print >> sys.stderr, "Please specify an output file" sys.exit(1) auth_uri = '' if options.username: auth_uri = "%s:%s@" % (options.username, options.password) xmlrpc_uri = "http://%(auth_uri)s%(host)s:%(port)s" % { 'auth_uri' : auth_uri, 'host' : options.host, 'port' : options.port } wr = Export(xmlrpc_uri) wr.write(options.file, options.schema)
<commit_before><commit_msg>Add stupid CSV export example Fixes #53<commit_after>
#!/usr/bin/python import os import csv import sys sys.path.append('../pynipap') import pynipap class Export: def __init__(self, xmlrpc_uri): self.xmlrpc_uri = xmlrpc_uri def write(self, output_file, schema_name): """ """ f = open(output_file, "w+") writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL) pynipap.xmlrpc_uri = xmlrpc_uri ao = pynipap.AuthOptions({ 'authoritative_source': 'nipap' }) import socket,xmlrpclib try: schema = pynipap.Schema.list({ 'name': schema_name })[0] except socket.error: print >> sys.stderr, "Connection refused, please check hostname & port" sys.exit(1) except xmlrpclib.ProtocolError: print >> sys.stderr, "Authentication failed, please check your username / password" sys.exit(1) except IndexError: print >> sys.stderr, "Non existing schema (", schema_name, ")" sys.exit(1) res = pynipap.Prefix.smart_search(schema, ' ', { 'include_all_parents': True }) for p in res['result']: writer.writerow([p.display_prefix, p.type, p.node, p.order_id, p.description]) if __name__ == '__main__': import optparse parser = optparse.OptionParser() parser.add_option('--username', default='', help="Username") parser.add_option('--password', default='', help="Password") parser.add_option('--host', help="NIPAP backend host") parser.add_option('--port', default=1337, help="NIPAP backend port") parser.add_option('--schema', help="Schema name") parser.add_option('--file', help="Output file") (options, args) = parser.parse_args() if options.host is None: print >> sys.stderr, "Please specify the NIPAP backend host to work with" sys.exit(1) if options.schema is None: print >> sys.stderr, "Please specify a schema to export" sys.exit(1) if options.file is None: print >> sys.stderr, "Please specify an output file" sys.exit(1) auth_uri = '' if options.username: auth_uri = "%s:%s@" % (options.username, options.password) xmlrpc_uri = "http://%(auth_uri)s%(host)s:%(port)s" % { 'auth_uri' : auth_uri, 'host' : options.host, 'port' : options.port } wr = Export(xmlrpc_uri) wr.write(options.file, options.schema)
Add stupid CSV export example Fixes #53#!/usr/bin/python import os import csv import sys sys.path.append('../pynipap') import pynipap class Export: def __init__(self, xmlrpc_uri): self.xmlrpc_uri = xmlrpc_uri def write(self, output_file, schema_name): """ """ f = open(output_file, "w+") writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL) pynipap.xmlrpc_uri = xmlrpc_uri ao = pynipap.AuthOptions({ 'authoritative_source': 'nipap' }) import socket,xmlrpclib try: schema = pynipap.Schema.list({ 'name': schema_name })[0] except socket.error: print >> sys.stderr, "Connection refused, please check hostname & port" sys.exit(1) except xmlrpclib.ProtocolError: print >> sys.stderr, "Authentication failed, please check your username / password" sys.exit(1) except IndexError: print >> sys.stderr, "Non existing schema (", schema_name, ")" sys.exit(1) res = pynipap.Prefix.smart_search(schema, ' ', { 'include_all_parents': True }) for p in res['result']: writer.writerow([p.display_prefix, p.type, p.node, p.order_id, p.description]) if __name__ == '__main__': import optparse parser = optparse.OptionParser() parser.add_option('--username', default='', help="Username") parser.add_option('--password', default='', help="Password") parser.add_option('--host', help="NIPAP backend host") parser.add_option('--port', default=1337, help="NIPAP backend port") parser.add_option('--schema', help="Schema name") parser.add_option('--file', help="Output file") (options, args) = parser.parse_args() if options.host is None: print >> sys.stderr, "Please specify the NIPAP backend host to work with" sys.exit(1) if options.schema is None: print >> sys.stderr, "Please specify a schema to export" sys.exit(1) if options.file is None: print >> sys.stderr, "Please specify an output file" sys.exit(1) auth_uri = '' if options.username: auth_uri = "%s:%s@" % (options.username, options.password) xmlrpc_uri = "http://%(auth_uri)s%(host)s:%(port)s" % { 'auth_uri' : auth_uri, 'host' : options.host, 'port' : options.port } wr = Export(xmlrpc_uri) wr.write(options.file, options.schema)
<commit_before><commit_msg>Add stupid CSV export example Fixes #53<commit_after>#!/usr/bin/python import os import csv import sys sys.path.append('../pynipap') import pynipap class Export: def __init__(self, xmlrpc_uri): self.xmlrpc_uri = xmlrpc_uri def write(self, output_file, schema_name): """ """ f = open(output_file, "w+") writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL) pynipap.xmlrpc_uri = xmlrpc_uri ao = pynipap.AuthOptions({ 'authoritative_source': 'nipap' }) import socket,xmlrpclib try: schema = pynipap.Schema.list({ 'name': schema_name })[0] except socket.error: print >> sys.stderr, "Connection refused, please check hostname & port" sys.exit(1) except xmlrpclib.ProtocolError: print >> sys.stderr, "Authentication failed, please check your username / password" sys.exit(1) except IndexError: print >> sys.stderr, "Non existing schema (", schema_name, ")" sys.exit(1) res = pynipap.Prefix.smart_search(schema, ' ', { 'include_all_parents': True }) for p in res['result']: writer.writerow([p.display_prefix, p.type, p.node, p.order_id, p.description]) if __name__ == '__main__': import optparse parser = optparse.OptionParser() parser.add_option('--username', default='', help="Username") parser.add_option('--password', default='', help="Password") parser.add_option('--host', help="NIPAP backend host") parser.add_option('--port', default=1337, help="NIPAP backend port") parser.add_option('--schema', help="Schema name") parser.add_option('--file', help="Output file") (options, args) = parser.parse_args() if options.host is None: print >> sys.stderr, "Please specify the NIPAP backend host to work with" sys.exit(1) if options.schema is None: print >> sys.stderr, "Please specify a schema to export" sys.exit(1) if options.file is None: print >> sys.stderr, "Please specify an output file" sys.exit(1) auth_uri = '' if options.username: auth_uri = "%s:%s@" % (options.username, options.password) xmlrpc_uri = "http://%(auth_uri)s%(host)s:%(port)s" % { 'auth_uri' : auth_uri, 'host' : options.host, 'port' : options.port } wr = Export(xmlrpc_uri) wr.write(options.file, options.schema)
e9d87a087a0f0102157d7c718a048c72f655c54a
smore/ext/marshmallow.py
smore/ext/marshmallow.py
# -*- coding: utf-8 -*- from __future__ import absolute_import from marshmallow.compat import iteritems from marshmallow import class_registry from smore import swagger from smore.apispec.core import Path from smore.apispec.utils import load_operations_from_docstring def schema_definition_helper(name, schema, **kwargs): """Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide Swagger metadata. :param type schema: A marshmallow Schema class. """ return swagger.schema2jsonschema(schema) def schema_path_helper(view, **kwargs): doc_operations = load_operations_from_docstring(view.__doc__) if not doc_operations: return operations = doc_operations.copy() for method, config in iteritems(doc_operations): if 'schema' in config: schema_cls = class_registry.get_class(config['schema']) if not operations[method].get('responses'): operations[method]['responses'] = {} operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls) return Path(operations=operations) def setup(spec): spec.register_definition_helper(schema_definition_helper) spec.register_path_helper(schema_path_helper)
# -*- coding: utf-8 -*- from __future__ import absolute_import from marshmallow.compat import iteritems from marshmallow import class_registry from smore import swagger from smore.apispec.core import Path from smore.apispec.utils import load_operations_from_docstring def schema_definition_helper(spec, name, schema, **kwargs): """Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide Swagger metadata. :param type schema: A marshmallow Schema class. """ # Store registered refs, keyed by Schema class plug = spec.plugins['smore.ext.marshmallow'] if 'refs' not in plug: plug['refs'] = {} plug['refs'][schema] = name return swagger.schema2jsonschema(schema) def schema_path_helper(view, **kwargs): doc_operations = load_operations_from_docstring(view.__doc__) if not doc_operations: return operations = doc_operations.copy() for method, config in iteritems(doc_operations): if 'schema' in config: schema_cls = class_registry.get_class(config['schema']) if not operations[method].get('responses'): operations[method]['responses'] = {} operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls) return Path(operations=operations) def setup(spec): spec.register_definition_helper(schema_definition_helper) spec.register_path_helper(schema_path_helper)
Store registered refs as plugin metadata
Store registered refs as plugin metadata
Python
mit
marshmallow-code/apispec,Nobatek/apispec,marshmallow-code/smore,gorgias/apispec,jmcarp/smore
# -*- coding: utf-8 -*- from __future__ import absolute_import from marshmallow.compat import iteritems from marshmallow import class_registry from smore import swagger from smore.apispec.core import Path from smore.apispec.utils import load_operations_from_docstring def schema_definition_helper(name, schema, **kwargs): """Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide Swagger metadata. :param type schema: A marshmallow Schema class. """ return swagger.schema2jsonschema(schema) def schema_path_helper(view, **kwargs): doc_operations = load_operations_from_docstring(view.__doc__) if not doc_operations: return operations = doc_operations.copy() for method, config in iteritems(doc_operations): if 'schema' in config: schema_cls = class_registry.get_class(config['schema']) if not operations[method].get('responses'): operations[method]['responses'] = {} operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls) return Path(operations=operations) def setup(spec): spec.register_definition_helper(schema_definition_helper) spec.register_path_helper(schema_path_helper) Store registered refs as plugin metadata
# -*- coding: utf-8 -*- from __future__ import absolute_import from marshmallow.compat import iteritems from marshmallow import class_registry from smore import swagger from smore.apispec.core import Path from smore.apispec.utils import load_operations_from_docstring def schema_definition_helper(spec, name, schema, **kwargs): """Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide Swagger metadata. :param type schema: A marshmallow Schema class. """ # Store registered refs, keyed by Schema class plug = spec.plugins['smore.ext.marshmallow'] if 'refs' not in plug: plug['refs'] = {} plug['refs'][schema] = name return swagger.schema2jsonschema(schema) def schema_path_helper(view, **kwargs): doc_operations = load_operations_from_docstring(view.__doc__) if not doc_operations: return operations = doc_operations.copy() for method, config in iteritems(doc_operations): if 'schema' in config: schema_cls = class_registry.get_class(config['schema']) if not operations[method].get('responses'): operations[method]['responses'] = {} operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls) return Path(operations=operations) def setup(spec): spec.register_definition_helper(schema_definition_helper) spec.register_path_helper(schema_path_helper)
<commit_before># -*- coding: utf-8 -*- from __future__ import absolute_import from marshmallow.compat import iteritems from marshmallow import class_registry from smore import swagger from smore.apispec.core import Path from smore.apispec.utils import load_operations_from_docstring def schema_definition_helper(name, schema, **kwargs): """Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide Swagger metadata. :param type schema: A marshmallow Schema class. """ return swagger.schema2jsonschema(schema) def schema_path_helper(view, **kwargs): doc_operations = load_operations_from_docstring(view.__doc__) if not doc_operations: return operations = doc_operations.copy() for method, config in iteritems(doc_operations): if 'schema' in config: schema_cls = class_registry.get_class(config['schema']) if not operations[method].get('responses'): operations[method]['responses'] = {} operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls) return Path(operations=operations) def setup(spec): spec.register_definition_helper(schema_definition_helper) spec.register_path_helper(schema_path_helper) <commit_msg>Store registered refs as plugin metadata<commit_after>
# -*- coding: utf-8 -*- from __future__ import absolute_import from marshmallow.compat import iteritems from marshmallow import class_registry from smore import swagger from smore.apispec.core import Path from smore.apispec.utils import load_operations_from_docstring def schema_definition_helper(spec, name, schema, **kwargs): """Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide Swagger metadata. :param type schema: A marshmallow Schema class. """ # Store registered refs, keyed by Schema class plug = spec.plugins['smore.ext.marshmallow'] if 'refs' not in plug: plug['refs'] = {} plug['refs'][schema] = name return swagger.schema2jsonschema(schema) def schema_path_helper(view, **kwargs): doc_operations = load_operations_from_docstring(view.__doc__) if not doc_operations: return operations = doc_operations.copy() for method, config in iteritems(doc_operations): if 'schema' in config: schema_cls = class_registry.get_class(config['schema']) if not operations[method].get('responses'): operations[method]['responses'] = {} operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls) return Path(operations=operations) def setup(spec): spec.register_definition_helper(schema_definition_helper) spec.register_path_helper(schema_path_helper)
# -*- coding: utf-8 -*- from __future__ import absolute_import from marshmallow.compat import iteritems from marshmallow import class_registry from smore import swagger from smore.apispec.core import Path from smore.apispec.utils import load_operations_from_docstring def schema_definition_helper(name, schema, **kwargs): """Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide Swagger metadata. :param type schema: A marshmallow Schema class. """ return swagger.schema2jsonschema(schema) def schema_path_helper(view, **kwargs): doc_operations = load_operations_from_docstring(view.__doc__) if not doc_operations: return operations = doc_operations.copy() for method, config in iteritems(doc_operations): if 'schema' in config: schema_cls = class_registry.get_class(config['schema']) if not operations[method].get('responses'): operations[method]['responses'] = {} operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls) return Path(operations=operations) def setup(spec): spec.register_definition_helper(schema_definition_helper) spec.register_path_helper(schema_path_helper) Store registered refs as plugin metadata# -*- coding: utf-8 -*- from __future__ import absolute_import from marshmallow.compat import iteritems from marshmallow import class_registry from smore import swagger from smore.apispec.core import Path from smore.apispec.utils import load_operations_from_docstring def schema_definition_helper(spec, name, schema, **kwargs): """Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide Swagger metadata. :param type schema: A marshmallow Schema class. """ # Store registered refs, keyed by Schema class plug = spec.plugins['smore.ext.marshmallow'] if 'refs' not in plug: plug['refs'] = {} plug['refs'][schema] = name return swagger.schema2jsonschema(schema) def schema_path_helper(view, **kwargs): doc_operations = load_operations_from_docstring(view.__doc__) if not doc_operations: return operations = doc_operations.copy() for method, config in iteritems(doc_operations): if 'schema' in config: schema_cls = class_registry.get_class(config['schema']) if not operations[method].get('responses'): operations[method]['responses'] = {} operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls) return Path(operations=operations) def setup(spec): spec.register_definition_helper(schema_definition_helper) spec.register_path_helper(schema_path_helper)
<commit_before># -*- coding: utf-8 -*- from __future__ import absolute_import from marshmallow.compat import iteritems from marshmallow import class_registry from smore import swagger from smore.apispec.core import Path from smore.apispec.utils import load_operations_from_docstring def schema_definition_helper(name, schema, **kwargs): """Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide Swagger metadata. :param type schema: A marshmallow Schema class. """ return swagger.schema2jsonschema(schema) def schema_path_helper(view, **kwargs): doc_operations = load_operations_from_docstring(view.__doc__) if not doc_operations: return operations = doc_operations.copy() for method, config in iteritems(doc_operations): if 'schema' in config: schema_cls = class_registry.get_class(config['schema']) if not operations[method].get('responses'): operations[method]['responses'] = {} operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls) return Path(operations=operations) def setup(spec): spec.register_definition_helper(schema_definition_helper) spec.register_path_helper(schema_path_helper) <commit_msg>Store registered refs as plugin metadata<commit_after># -*- coding: utf-8 -*- from __future__ import absolute_import from marshmallow.compat import iteritems from marshmallow import class_registry from smore import swagger from smore.apispec.core import Path from smore.apispec.utils import load_operations_from_docstring def schema_definition_helper(spec, name, schema, **kwargs): """Definition helper that allows using a marshmallow :class:`Schema <marshmallow.Schema>` to provide Swagger metadata. :param type schema: A marshmallow Schema class. """ # Store registered refs, keyed by Schema class plug = spec.plugins['smore.ext.marshmallow'] if 'refs' not in plug: plug['refs'] = {} plug['refs'][schema] = name return swagger.schema2jsonschema(schema) def schema_path_helper(view, **kwargs): doc_operations = load_operations_from_docstring(view.__doc__) if not doc_operations: return operations = doc_operations.copy() for method, config in iteritems(doc_operations): if 'schema' in config: schema_cls = class_registry.get_class(config['schema']) if not operations[method].get('responses'): operations[method]['responses'] = {} operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls) return Path(operations=operations) def setup(spec): spec.register_definition_helper(schema_definition_helper) spec.register_path_helper(schema_path_helper)
7b09a44c7df8b2aa28e45c5382626c2f8c4bf61b
bin/run_redpen.py
bin/run_redpen.py
#!/usr/bin/python import os import re import shutil from optparse import OptionParser def main(): parser = OptionParser(usage="usage: %prog [options]", version="%prog 1.0") parser.add_option("-i", "--inputdir", action="store", dest="indir", default="source", help="specify the input directory containing rst files.") parser.add_option("-o", "--outdir", action="store", dest="outdir", default="build/mdfiles", help="specify the output directory of markdownized files.") (options, args) = parser.parse_args() indir = options.indir outdir = options.outdir if os.path.exists(outdir) == True: shutil.rmtree(outdir) os.makedirs(outdir) for root, dirs, files in os.walk(indir): for file in files: mdfile_pat = re.compile(".*\.rst") if not mdfile_pat.search(file): continue fileroot, ext = os.path.splitext(file) cmdline = "pandoc -r markdown -w rst %s -o %s" % (os.path.join(root, file), outdir + "/" + fileroot + ".md") os.system(cmdline) if __name__ == '__main__': main()
Add a script to convert from rst style files to markdown
Add a script to convert from rst style files to markdown
Python
apache-2.0
kenhys/redpen-doc,kenhys/redpen-doc
Add a script to convert from rst style files to markdown
#!/usr/bin/python import os import re import shutil from optparse import OptionParser def main(): parser = OptionParser(usage="usage: %prog [options]", version="%prog 1.0") parser.add_option("-i", "--inputdir", action="store", dest="indir", default="source", help="specify the input directory containing rst files.") parser.add_option("-o", "--outdir", action="store", dest="outdir", default="build/mdfiles", help="specify the output directory of markdownized files.") (options, args) = parser.parse_args() indir = options.indir outdir = options.outdir if os.path.exists(outdir) == True: shutil.rmtree(outdir) os.makedirs(outdir) for root, dirs, files in os.walk(indir): for file in files: mdfile_pat = re.compile(".*\.rst") if not mdfile_pat.search(file): continue fileroot, ext = os.path.splitext(file) cmdline = "pandoc -r markdown -w rst %s -o %s" % (os.path.join(root, file), outdir + "/" + fileroot + ".md") os.system(cmdline) if __name__ == '__main__': main()
<commit_before><commit_msg>Add a script to convert from rst style files to markdown<commit_after>
#!/usr/bin/python import os import re import shutil from optparse import OptionParser def main(): parser = OptionParser(usage="usage: %prog [options]", version="%prog 1.0") parser.add_option("-i", "--inputdir", action="store", dest="indir", default="source", help="specify the input directory containing rst files.") parser.add_option("-o", "--outdir", action="store", dest="outdir", default="build/mdfiles", help="specify the output directory of markdownized files.") (options, args) = parser.parse_args() indir = options.indir outdir = options.outdir if os.path.exists(outdir) == True: shutil.rmtree(outdir) os.makedirs(outdir) for root, dirs, files in os.walk(indir): for file in files: mdfile_pat = re.compile(".*\.rst") if not mdfile_pat.search(file): continue fileroot, ext = os.path.splitext(file) cmdline = "pandoc -r markdown -w rst %s -o %s" % (os.path.join(root, file), outdir + "/" + fileroot + ".md") os.system(cmdline) if __name__ == '__main__': main()
Add a script to convert from rst style files to markdown#!/usr/bin/python import os import re import shutil from optparse import OptionParser def main(): parser = OptionParser(usage="usage: %prog [options]", version="%prog 1.0") parser.add_option("-i", "--inputdir", action="store", dest="indir", default="source", help="specify the input directory containing rst files.") parser.add_option("-o", "--outdir", action="store", dest="outdir", default="build/mdfiles", help="specify the output directory of markdownized files.") (options, args) = parser.parse_args() indir = options.indir outdir = options.outdir if os.path.exists(outdir) == True: shutil.rmtree(outdir) os.makedirs(outdir) for root, dirs, files in os.walk(indir): for file in files: mdfile_pat = re.compile(".*\.rst") if not mdfile_pat.search(file): continue fileroot, ext = os.path.splitext(file) cmdline = "pandoc -r markdown -w rst %s -o %s" % (os.path.join(root, file), outdir + "/" + fileroot + ".md") os.system(cmdline) if __name__ == '__main__': main()
<commit_before><commit_msg>Add a script to convert from rst style files to markdown<commit_after>#!/usr/bin/python import os import re import shutil from optparse import OptionParser def main(): parser = OptionParser(usage="usage: %prog [options]", version="%prog 1.0") parser.add_option("-i", "--inputdir", action="store", dest="indir", default="source", help="specify the input directory containing rst files.") parser.add_option("-o", "--outdir", action="store", dest="outdir", default="build/mdfiles", help="specify the output directory of markdownized files.") (options, args) = parser.parse_args() indir = options.indir outdir = options.outdir if os.path.exists(outdir) == True: shutil.rmtree(outdir) os.makedirs(outdir) for root, dirs, files in os.walk(indir): for file in files: mdfile_pat = re.compile(".*\.rst") if not mdfile_pat.search(file): continue fileroot, ext = os.path.splitext(file) cmdline = "pandoc -r markdown -w rst %s -o %s" % (os.path.join(root, file), outdir + "/" + fileroot + ".md") os.system(cmdline) if __name__ == '__main__': main()
419ca7099bf47ed00ede73d9de14690a643a3943
test/test_integration.py
test/test_integration.py
"""Integrations tests for EcoData Retriever""" import os import shutil from retriever import HOME_DIR simple_csv = {'name': 'simple_csv', 'raw_data': "a,b,c\n1,2,3\n4,5,6", 'script': "shortname: simple_csv\ntable: simple_csv, http://example.com/simple_csv.txt", 'expect_out': "a,b,c\n1,2,3\n4,5,6"} crosstab = {'name': 'crosstab', 'raw_data': "a,b,c1,c2\n1,1,1.1,1.2\n1,2,2.1,2.2", 'script': "shortname: crosstab\ntable: crosstab, http://example.com/crosstab.txt\n*column: a, int\n*column: b, int\n*ct_column: c\n*column: val, ct-double\n*ct_names: c1,c2", 'expect_out': "a,b,c,val\n1,1,c1,1.1\n1,1,c2,1.2\n1,2,c1,2.1\n1,2,c2,2.2"} tests = [simple_csv, crosstab]
Add data for integration testing of basic csv and crosstab formats
Add data for integration testing of basic csv and crosstab formats
Python
mit
henrykironde/deletedret,goelakash/retriever,henrykironde/deletedret,goelakash/retriever
Add data for integration testing of basic csv and crosstab formats
"""Integrations tests for EcoData Retriever""" import os import shutil from retriever import HOME_DIR simple_csv = {'name': 'simple_csv', 'raw_data': "a,b,c\n1,2,3\n4,5,6", 'script': "shortname: simple_csv\ntable: simple_csv, http://example.com/simple_csv.txt", 'expect_out': "a,b,c\n1,2,3\n4,5,6"} crosstab = {'name': 'crosstab', 'raw_data': "a,b,c1,c2\n1,1,1.1,1.2\n1,2,2.1,2.2", 'script': "shortname: crosstab\ntable: crosstab, http://example.com/crosstab.txt\n*column: a, int\n*column: b, int\n*ct_column: c\n*column: val, ct-double\n*ct_names: c1,c2", 'expect_out': "a,b,c,val\n1,1,c1,1.1\n1,1,c2,1.2\n1,2,c1,2.1\n1,2,c2,2.2"} tests = [simple_csv, crosstab]
<commit_before><commit_msg>Add data for integration testing of basic csv and crosstab formats<commit_after>
"""Integrations tests for EcoData Retriever""" import os import shutil from retriever import HOME_DIR simple_csv = {'name': 'simple_csv', 'raw_data': "a,b,c\n1,2,3\n4,5,6", 'script': "shortname: simple_csv\ntable: simple_csv, http://example.com/simple_csv.txt", 'expect_out': "a,b,c\n1,2,3\n4,5,6"} crosstab = {'name': 'crosstab', 'raw_data': "a,b,c1,c2\n1,1,1.1,1.2\n1,2,2.1,2.2", 'script': "shortname: crosstab\ntable: crosstab, http://example.com/crosstab.txt\n*column: a, int\n*column: b, int\n*ct_column: c\n*column: val, ct-double\n*ct_names: c1,c2", 'expect_out': "a,b,c,val\n1,1,c1,1.1\n1,1,c2,1.2\n1,2,c1,2.1\n1,2,c2,2.2"} tests = [simple_csv, crosstab]
Add data for integration testing of basic csv and crosstab formats"""Integrations tests for EcoData Retriever""" import os import shutil from retriever import HOME_DIR simple_csv = {'name': 'simple_csv', 'raw_data': "a,b,c\n1,2,3\n4,5,6", 'script': "shortname: simple_csv\ntable: simple_csv, http://example.com/simple_csv.txt", 'expect_out': "a,b,c\n1,2,3\n4,5,6"} crosstab = {'name': 'crosstab', 'raw_data': "a,b,c1,c2\n1,1,1.1,1.2\n1,2,2.1,2.2", 'script': "shortname: crosstab\ntable: crosstab, http://example.com/crosstab.txt\n*column: a, int\n*column: b, int\n*ct_column: c\n*column: val, ct-double\n*ct_names: c1,c2", 'expect_out': "a,b,c,val\n1,1,c1,1.1\n1,1,c2,1.2\n1,2,c1,2.1\n1,2,c2,2.2"} tests = [simple_csv, crosstab]
<commit_before><commit_msg>Add data for integration testing of basic csv and crosstab formats<commit_after>"""Integrations tests for EcoData Retriever""" import os import shutil from retriever import HOME_DIR simple_csv = {'name': 'simple_csv', 'raw_data': "a,b,c\n1,2,3\n4,5,6", 'script': "shortname: simple_csv\ntable: simple_csv, http://example.com/simple_csv.txt", 'expect_out': "a,b,c\n1,2,3\n4,5,6"} crosstab = {'name': 'crosstab', 'raw_data': "a,b,c1,c2\n1,1,1.1,1.2\n1,2,2.1,2.2", 'script': "shortname: crosstab\ntable: crosstab, http://example.com/crosstab.txt\n*column: a, int\n*column: b, int\n*ct_column: c\n*column: val, ct-double\n*ct_names: c1,c2", 'expect_out': "a,b,c,val\n1,1,c1,1.1\n1,1,c2,1.2\n1,2,c1,2.1\n1,2,c2,2.2"} tests = [simple_csv, crosstab]
2af53a39096c0eab9d95c304c802281fe3c580ae
tests/pickle_test.py
tests/pickle_test.py
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for interoperability between JAX and pickling libraries.""" import pickle import unittest from absl.testing import absltest try: import cloudpickle except ImportError: cloudpickle = None import jax from jax.config import config from jax import test_util as jtu config.parse_flags_with_absl() class CloudpickleTest(jtu.JaxTestCase): @unittest.skipIf(cloudpickle is None, "Requires cloudpickle") @unittest.skipIf(jax.lib._xla_extension_version < 31, "Requires jaxlib 0.1.71") def testPickleOfJittedFunctions(self): @jax.jit def f(x, y): return x * y @jax.jit def g(z): return f(z, z + 77) # noqa: F821 expected = g(32) s = cloudpickle.dumps(g) del f, g g_unpickled = pickle.loads(s) actual = g_unpickled(32) self.assertEqual(expected, actual) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
Make JAX CompiledFunction objects pickle-able.
[XLA:Python] Make JAX CompiledFunction objects pickle-able. PiperOrigin-RevId: 388814246
Python
apache-2.0
google/jax,google/jax,google/jax,google/jax
[XLA:Python] Make JAX CompiledFunction objects pickle-able. PiperOrigin-RevId: 388814246
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for interoperability between JAX and pickling libraries.""" import pickle import unittest from absl.testing import absltest try: import cloudpickle except ImportError: cloudpickle = None import jax from jax.config import config from jax import test_util as jtu config.parse_flags_with_absl() class CloudpickleTest(jtu.JaxTestCase): @unittest.skipIf(cloudpickle is None, "Requires cloudpickle") @unittest.skipIf(jax.lib._xla_extension_version < 31, "Requires jaxlib 0.1.71") def testPickleOfJittedFunctions(self): @jax.jit def f(x, y): return x * y @jax.jit def g(z): return f(z, z + 77) # noqa: F821 expected = g(32) s = cloudpickle.dumps(g) del f, g g_unpickled = pickle.loads(s) actual = g_unpickled(32) self.assertEqual(expected, actual) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
<commit_before><commit_msg>[XLA:Python] Make JAX CompiledFunction objects pickle-able. PiperOrigin-RevId: 388814246<commit_after>
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for interoperability between JAX and pickling libraries.""" import pickle import unittest from absl.testing import absltest try: import cloudpickle except ImportError: cloudpickle = None import jax from jax.config import config from jax import test_util as jtu config.parse_flags_with_absl() class CloudpickleTest(jtu.JaxTestCase): @unittest.skipIf(cloudpickle is None, "Requires cloudpickle") @unittest.skipIf(jax.lib._xla_extension_version < 31, "Requires jaxlib 0.1.71") def testPickleOfJittedFunctions(self): @jax.jit def f(x, y): return x * y @jax.jit def g(z): return f(z, z + 77) # noqa: F821 expected = g(32) s = cloudpickle.dumps(g) del f, g g_unpickled = pickle.loads(s) actual = g_unpickled(32) self.assertEqual(expected, actual) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
[XLA:Python] Make JAX CompiledFunction objects pickle-able. PiperOrigin-RevId: 388814246# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for interoperability between JAX and pickling libraries.""" import pickle import unittest from absl.testing import absltest try: import cloudpickle except ImportError: cloudpickle = None import jax from jax.config import config from jax import test_util as jtu config.parse_flags_with_absl() class CloudpickleTest(jtu.JaxTestCase): @unittest.skipIf(cloudpickle is None, "Requires cloudpickle") @unittest.skipIf(jax.lib._xla_extension_version < 31, "Requires jaxlib 0.1.71") def testPickleOfJittedFunctions(self): @jax.jit def f(x, y): return x * y @jax.jit def g(z): return f(z, z + 77) # noqa: F821 expected = g(32) s = cloudpickle.dumps(g) del f, g g_unpickled = pickle.loads(s) actual = g_unpickled(32) self.assertEqual(expected, actual) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
<commit_before><commit_msg>[XLA:Python] Make JAX CompiledFunction objects pickle-able. PiperOrigin-RevId: 388814246<commit_after># Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for interoperability between JAX and pickling libraries.""" import pickle import unittest from absl.testing import absltest try: import cloudpickle except ImportError: cloudpickle = None import jax from jax.config import config from jax import test_util as jtu config.parse_flags_with_absl() class CloudpickleTest(jtu.JaxTestCase): @unittest.skipIf(cloudpickle is None, "Requires cloudpickle") @unittest.skipIf(jax.lib._xla_extension_version < 31, "Requires jaxlib 0.1.71") def testPickleOfJittedFunctions(self): @jax.jit def f(x, y): return x * y @jax.jit def g(z): return f(z, z + 77) # noqa: F821 expected = g(32) s = cloudpickle.dumps(g) del f, g g_unpickled = pickle.loads(s) actual = g_unpickled(32) self.assertEqual(expected, actual) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
6f00204ae2603063eafbd74a369e9da0864854ca
poll/management/commands/create_new_violence_polls.py
poll/management/commands/create_new_violence_polls.py
#!/usr/bin/python # -*- coding: utf-8 -*- from django.core.management.base import BaseCommand import traceback from poll.models import Poll from unregister.models import Blacklist from django.conf import settings from optparse import make_option from poll.forms import NewPollForm from django.contrib.sites.models import Site from django.contrib.auth.models import User from rapidsms.models import Contact from django.db.models import Q class Command(BaseCommand): help = "Create new violence polls" option_list = BaseCommand.option_list + ( make_option('-n', '--name', dest='n'), make_option('-t', '--poll_type', dest='t'), make_option('-q', '--question', dest='q'), make_option('-r', '--default_response', dest='r'), make_option('-c', '--contacts', dest='c'), make_option('-u', '--user', dest='u'), make_option('-s', '--start_immediately', dest='s'), make_option('-e', '--response_type', dest='e'), make_option('-g', '--groups', dest='g'), ) def handle(self, **options): edtrac_violence_girls = Poll.objects.create( name="edtrac_violence_girls", type="n", question="How many cases of violence against girls were recorded this month? Answer in figures e.g. 5", default_response='', user=User.objects.get(username='admin'), ) edtrac_violence_girls.sites.add(Site.objects.get_current()) edtrac_violence_boys = Poll.objects.create( name="edtrac_violence_boys", type="n", question="How many cases of violence against boys were recorded this month? Answer in figures e.g. 4", default_response='', user = User.objects.get(username='admin'), ) edtrac_violence_boys.sites.add(Site.objects.get_current()) edtrac_violence_reported = Poll.objects.create( name='edtrac_violence_reported', type='n', question='How many cases of violence were referred to the Police this month? Answer in figures e.g. 6', default_response='', user=User.objects.get(username='admin'), ) edtrac_violence_reported.sites.add(Site.objects.get_current())
Create new monthly violence polls
Create new monthly violence polls
Python
bsd-3-clause
unicefuganda/edtrac,unicefuganda/edtrac,unicefuganda/edtrac
Create new monthly violence polls
#!/usr/bin/python # -*- coding: utf-8 -*- from django.core.management.base import BaseCommand import traceback from poll.models import Poll from unregister.models import Blacklist from django.conf import settings from optparse import make_option from poll.forms import NewPollForm from django.contrib.sites.models import Site from django.contrib.auth.models import User from rapidsms.models import Contact from django.db.models import Q class Command(BaseCommand): help = "Create new violence polls" option_list = BaseCommand.option_list + ( make_option('-n', '--name', dest='n'), make_option('-t', '--poll_type', dest='t'), make_option('-q', '--question', dest='q'), make_option('-r', '--default_response', dest='r'), make_option('-c', '--contacts', dest='c'), make_option('-u', '--user', dest='u'), make_option('-s', '--start_immediately', dest='s'), make_option('-e', '--response_type', dest='e'), make_option('-g', '--groups', dest='g'), ) def handle(self, **options): edtrac_violence_girls = Poll.objects.create( name="edtrac_violence_girls", type="n", question="How many cases of violence against girls were recorded this month? Answer in figures e.g. 5", default_response='', user=User.objects.get(username='admin'), ) edtrac_violence_girls.sites.add(Site.objects.get_current()) edtrac_violence_boys = Poll.objects.create( name="edtrac_violence_boys", type="n", question="How many cases of violence against boys were recorded this month? Answer in figures e.g. 4", default_response='', user = User.objects.get(username='admin'), ) edtrac_violence_boys.sites.add(Site.objects.get_current()) edtrac_violence_reported = Poll.objects.create( name='edtrac_violence_reported', type='n', question='How many cases of violence were referred to the Police this month? Answer in figures e.g. 6', default_response='', user=User.objects.get(username='admin'), ) edtrac_violence_reported.sites.add(Site.objects.get_current())
<commit_before><commit_msg>Create new monthly violence polls<commit_after>
#!/usr/bin/python # -*- coding: utf-8 -*- from django.core.management.base import BaseCommand import traceback from poll.models import Poll from unregister.models import Blacklist from django.conf import settings from optparse import make_option from poll.forms import NewPollForm from django.contrib.sites.models import Site from django.contrib.auth.models import User from rapidsms.models import Contact from django.db.models import Q class Command(BaseCommand): help = "Create new violence polls" option_list = BaseCommand.option_list + ( make_option('-n', '--name', dest='n'), make_option('-t', '--poll_type', dest='t'), make_option('-q', '--question', dest='q'), make_option('-r', '--default_response', dest='r'), make_option('-c', '--contacts', dest='c'), make_option('-u', '--user', dest='u'), make_option('-s', '--start_immediately', dest='s'), make_option('-e', '--response_type', dest='e'), make_option('-g', '--groups', dest='g'), ) def handle(self, **options): edtrac_violence_girls = Poll.objects.create( name="edtrac_violence_girls", type="n", question="How many cases of violence against girls were recorded this month? Answer in figures e.g. 5", default_response='', user=User.objects.get(username='admin'), ) edtrac_violence_girls.sites.add(Site.objects.get_current()) edtrac_violence_boys = Poll.objects.create( name="edtrac_violence_boys", type="n", question="How many cases of violence against boys were recorded this month? Answer in figures e.g. 4", default_response='', user = User.objects.get(username='admin'), ) edtrac_violence_boys.sites.add(Site.objects.get_current()) edtrac_violence_reported = Poll.objects.create( name='edtrac_violence_reported', type='n', question='How many cases of violence were referred to the Police this month? Answer in figures e.g. 6', default_response='', user=User.objects.get(username='admin'), ) edtrac_violence_reported.sites.add(Site.objects.get_current())
Create new monthly violence polls#!/usr/bin/python # -*- coding: utf-8 -*- from django.core.management.base import BaseCommand import traceback from poll.models import Poll from unregister.models import Blacklist from django.conf import settings from optparse import make_option from poll.forms import NewPollForm from django.contrib.sites.models import Site from django.contrib.auth.models import User from rapidsms.models import Contact from django.db.models import Q class Command(BaseCommand): help = "Create new violence polls" option_list = BaseCommand.option_list + ( make_option('-n', '--name', dest='n'), make_option('-t', '--poll_type', dest='t'), make_option('-q', '--question', dest='q'), make_option('-r', '--default_response', dest='r'), make_option('-c', '--contacts', dest='c'), make_option('-u', '--user', dest='u'), make_option('-s', '--start_immediately', dest='s'), make_option('-e', '--response_type', dest='e'), make_option('-g', '--groups', dest='g'), ) def handle(self, **options): edtrac_violence_girls = Poll.objects.create( name="edtrac_violence_girls", type="n", question="How many cases of violence against girls were recorded this month? Answer in figures e.g. 5", default_response='', user=User.objects.get(username='admin'), ) edtrac_violence_girls.sites.add(Site.objects.get_current()) edtrac_violence_boys = Poll.objects.create( name="edtrac_violence_boys", type="n", question="How many cases of violence against boys were recorded this month? Answer in figures e.g. 4", default_response='', user = User.objects.get(username='admin'), ) edtrac_violence_boys.sites.add(Site.objects.get_current()) edtrac_violence_reported = Poll.objects.create( name='edtrac_violence_reported', type='n', question='How many cases of violence were referred to the Police this month? Answer in figures e.g. 6', default_response='', user=User.objects.get(username='admin'), ) edtrac_violence_reported.sites.add(Site.objects.get_current())
<commit_before><commit_msg>Create new monthly violence polls<commit_after>#!/usr/bin/python # -*- coding: utf-8 -*- from django.core.management.base import BaseCommand import traceback from poll.models import Poll from unregister.models import Blacklist from django.conf import settings from optparse import make_option from poll.forms import NewPollForm from django.contrib.sites.models import Site from django.contrib.auth.models import User from rapidsms.models import Contact from django.db.models import Q class Command(BaseCommand): help = "Create new violence polls" option_list = BaseCommand.option_list + ( make_option('-n', '--name', dest='n'), make_option('-t', '--poll_type', dest='t'), make_option('-q', '--question', dest='q'), make_option('-r', '--default_response', dest='r'), make_option('-c', '--contacts', dest='c'), make_option('-u', '--user', dest='u'), make_option('-s', '--start_immediately', dest='s'), make_option('-e', '--response_type', dest='e'), make_option('-g', '--groups', dest='g'), ) def handle(self, **options): edtrac_violence_girls = Poll.objects.create( name="edtrac_violence_girls", type="n", question="How many cases of violence against girls were recorded this month? Answer in figures e.g. 5", default_response='', user=User.objects.get(username='admin'), ) edtrac_violence_girls.sites.add(Site.objects.get_current()) edtrac_violence_boys = Poll.objects.create( name="edtrac_violence_boys", type="n", question="How many cases of violence against boys were recorded this month? Answer in figures e.g. 4", default_response='', user = User.objects.get(username='admin'), ) edtrac_violence_boys.sites.add(Site.objects.get_current()) edtrac_violence_reported = Poll.objects.create( name='edtrac_violence_reported', type='n', question='How many cases of violence were referred to the Police this month? Answer in figures e.g. 6', default_response='', user=User.objects.get(username='admin'), ) edtrac_violence_reported.sites.add(Site.objects.get_current())
23c09555221b3f7500a4c658452c9c0cb223799c
Train_SDAE/tools/evaluate_model.py
Train_SDAE/tools/evaluate_model.py
import numpy as np # import pandas as pd # import sys from scipy.special import expit from sklearn import ensemble def get_activations(exp_data, w, b): exp_data = np.transpose(exp_data) prod = exp_data.dot(w) prod_with_bias = prod + b return( expit(prod_with_bias) ) # Order of *args: first all the weights and then all the biases def run_random_forest(nHLayers, exp_data, labels, *args): print len(args[0]), len(args[0][0]), len(args[0][1]) print len(args[0][2]) print "NewLine!\n", len(args[0][3]) print "NewLine!\n", len(args[0][4]) assert len(exp_data) == len(labels) # I think they should be already transposed when running the code. Will see act = exp_data#.T for i in range(nHLayers): print('Weights and biases for layer: ' + str(i+1)) print np.asarray(args[0][i]).shape, np.asarray(args[0][nHLayers + i]).shape act = get_activations(act.T, args[0][i], args[0][nHLayers + i]) rf = ensemble.RandomForestClassifier(n_estimators=1000, oob_score=True, max_depth=5) rfit = rf.fit(act, labels) print('OOB score: %.2f\n' % rfit.oob_score_)
Add evaluation using random forest
Add evaluation using random forest
Python
apache-2.0
glrs/StackedDAE,glrs/StackedDAE
Add evaluation using random forest
import numpy as np # import pandas as pd # import sys from scipy.special import expit from sklearn import ensemble def get_activations(exp_data, w, b): exp_data = np.transpose(exp_data) prod = exp_data.dot(w) prod_with_bias = prod + b return( expit(prod_with_bias) ) # Order of *args: first all the weights and then all the biases def run_random_forest(nHLayers, exp_data, labels, *args): print len(args[0]), len(args[0][0]), len(args[0][1]) print len(args[0][2]) print "NewLine!\n", len(args[0][3]) print "NewLine!\n", len(args[0][4]) assert len(exp_data) == len(labels) # I think they should be already transposed when running the code. Will see act = exp_data#.T for i in range(nHLayers): print('Weights and biases for layer: ' + str(i+1)) print np.asarray(args[0][i]).shape, np.asarray(args[0][nHLayers + i]).shape act = get_activations(act.T, args[0][i], args[0][nHLayers + i]) rf = ensemble.RandomForestClassifier(n_estimators=1000, oob_score=True, max_depth=5) rfit = rf.fit(act, labels) print('OOB score: %.2f\n' % rfit.oob_score_)
<commit_before><commit_msg>Add evaluation using random forest<commit_after>
import numpy as np # import pandas as pd # import sys from scipy.special import expit from sklearn import ensemble def get_activations(exp_data, w, b): exp_data = np.transpose(exp_data) prod = exp_data.dot(w) prod_with_bias = prod + b return( expit(prod_with_bias) ) # Order of *args: first all the weights and then all the biases def run_random_forest(nHLayers, exp_data, labels, *args): print len(args[0]), len(args[0][0]), len(args[0][1]) print len(args[0][2]) print "NewLine!\n", len(args[0][3]) print "NewLine!\n", len(args[0][4]) assert len(exp_data) == len(labels) # I think they should be already transposed when running the code. Will see act = exp_data#.T for i in range(nHLayers): print('Weights and biases for layer: ' + str(i+1)) print np.asarray(args[0][i]).shape, np.asarray(args[0][nHLayers + i]).shape act = get_activations(act.T, args[0][i], args[0][nHLayers + i]) rf = ensemble.RandomForestClassifier(n_estimators=1000, oob_score=True, max_depth=5) rfit = rf.fit(act, labels) print('OOB score: %.2f\n' % rfit.oob_score_)
Add evaluation using random forestimport numpy as np # import pandas as pd # import sys from scipy.special import expit from sklearn import ensemble def get_activations(exp_data, w, b): exp_data = np.transpose(exp_data) prod = exp_data.dot(w) prod_with_bias = prod + b return( expit(prod_with_bias) ) # Order of *args: first all the weights and then all the biases def run_random_forest(nHLayers, exp_data, labels, *args): print len(args[0]), len(args[0][0]), len(args[0][1]) print len(args[0][2]) print "NewLine!\n", len(args[0][3]) print "NewLine!\n", len(args[0][4]) assert len(exp_data) == len(labels) # I think they should be already transposed when running the code. Will see act = exp_data#.T for i in range(nHLayers): print('Weights and biases for layer: ' + str(i+1)) print np.asarray(args[0][i]).shape, np.asarray(args[0][nHLayers + i]).shape act = get_activations(act.T, args[0][i], args[0][nHLayers + i]) rf = ensemble.RandomForestClassifier(n_estimators=1000, oob_score=True, max_depth=5) rfit = rf.fit(act, labels) print('OOB score: %.2f\n' % rfit.oob_score_)
<commit_before><commit_msg>Add evaluation using random forest<commit_after>import numpy as np # import pandas as pd # import sys from scipy.special import expit from sklearn import ensemble def get_activations(exp_data, w, b): exp_data = np.transpose(exp_data) prod = exp_data.dot(w) prod_with_bias = prod + b return( expit(prod_with_bias) ) # Order of *args: first all the weights and then all the biases def run_random_forest(nHLayers, exp_data, labels, *args): print len(args[0]), len(args[0][0]), len(args[0][1]) print len(args[0][2]) print "NewLine!\n", len(args[0][3]) print "NewLine!\n", len(args[0][4]) assert len(exp_data) == len(labels) # I think they should be already transposed when running the code. Will see act = exp_data#.T for i in range(nHLayers): print('Weights and biases for layer: ' + str(i+1)) print np.asarray(args[0][i]).shape, np.asarray(args[0][nHLayers + i]).shape act = get_activations(act.T, args[0][i], args[0][nHLayers + i]) rf = ensemble.RandomForestClassifier(n_estimators=1000, oob_score=True, max_depth=5) rfit = rf.fit(act, labels) print('OOB score: %.2f\n' % rfit.oob_score_)
009df3372804fa946b7e1bd4c0827e887b964b38
convert.py
convert.py
from bs4 import BeautifulSoup import io import markdown2 import time import codecs file = io.open("Import/blog-03-03-2013.xml") file_contents = file.read(-1) #lxml xpath doesn't seem to understand blogger export soup = BeautifulSoup(file_contents) entries = soup("entry") count = 0 def formatTime(timefield): time_obj = time.strptime(entry(timefield)[0].string[0:16], "%Y-%m-%dT%H:%M") return time.strftime("%Y%m%d%H%M%S", time_obj) for entry in entries: categories = entry("category") tags = [] post = False for category in categories: if category["term"] == "http://schemas.google.com/blogger/2008/kind#post": post = True if category["scheme"] == "http://www.blogger.com/atom/ns#" and category["term"]: tags.append(category["term"]) if post: pub = formatTime("published") updated = formatTime("updated") filename_xml = "%s.blogger.xml" % pub title = entry("title")[0].string content = entry("content")[0].string blog_file = io.open("Export/" + filename_xml, "w") blog_file.write("<blog>\n\t<title>%s</title>\n\t<content><![CDATA[%s]]></content>\n</blog>" % (title, content)) blog_file.close() count += 1 print "Found %d posts" % count print "done!"
Convert blogger to simple xml
Convert blogger to simple xml
Python
mit
progrn/csb
Convert blogger to simple xml
from bs4 import BeautifulSoup import io import markdown2 import time import codecs file = io.open("Import/blog-03-03-2013.xml") file_contents = file.read(-1) #lxml xpath doesn't seem to understand blogger export soup = BeautifulSoup(file_contents) entries = soup("entry") count = 0 def formatTime(timefield): time_obj = time.strptime(entry(timefield)[0].string[0:16], "%Y-%m-%dT%H:%M") return time.strftime("%Y%m%d%H%M%S", time_obj) for entry in entries: categories = entry("category") tags = [] post = False for category in categories: if category["term"] == "http://schemas.google.com/blogger/2008/kind#post": post = True if category["scheme"] == "http://www.blogger.com/atom/ns#" and category["term"]: tags.append(category["term"]) if post: pub = formatTime("published") updated = formatTime("updated") filename_xml = "%s.blogger.xml" % pub title = entry("title")[0].string content = entry("content")[0].string blog_file = io.open("Export/" + filename_xml, "w") blog_file.write("<blog>\n\t<title>%s</title>\n\t<content><![CDATA[%s]]></content>\n</blog>" % (title, content)) blog_file.close() count += 1 print "Found %d posts" % count print "done!"
<commit_before><commit_msg>Convert blogger to simple xml<commit_after>
from bs4 import BeautifulSoup import io import markdown2 import time import codecs file = io.open("Import/blog-03-03-2013.xml") file_contents = file.read(-1) #lxml xpath doesn't seem to understand blogger export soup = BeautifulSoup(file_contents) entries = soup("entry") count = 0 def formatTime(timefield): time_obj = time.strptime(entry(timefield)[0].string[0:16], "%Y-%m-%dT%H:%M") return time.strftime("%Y%m%d%H%M%S", time_obj) for entry in entries: categories = entry("category") tags = [] post = False for category in categories: if category["term"] == "http://schemas.google.com/blogger/2008/kind#post": post = True if category["scheme"] == "http://www.blogger.com/atom/ns#" and category["term"]: tags.append(category["term"]) if post: pub = formatTime("published") updated = formatTime("updated") filename_xml = "%s.blogger.xml" % pub title = entry("title")[0].string content = entry("content")[0].string blog_file = io.open("Export/" + filename_xml, "w") blog_file.write("<blog>\n\t<title>%s</title>\n\t<content><![CDATA[%s]]></content>\n</blog>" % (title, content)) blog_file.close() count += 1 print "Found %d posts" % count print "done!"
Convert blogger to simple xmlfrom bs4 import BeautifulSoup import io import markdown2 import time import codecs file = io.open("Import/blog-03-03-2013.xml") file_contents = file.read(-1) #lxml xpath doesn't seem to understand blogger export soup = BeautifulSoup(file_contents) entries = soup("entry") count = 0 def formatTime(timefield): time_obj = time.strptime(entry(timefield)[0].string[0:16], "%Y-%m-%dT%H:%M") return time.strftime("%Y%m%d%H%M%S", time_obj) for entry in entries: categories = entry("category") tags = [] post = False for category in categories: if category["term"] == "http://schemas.google.com/blogger/2008/kind#post": post = True if category["scheme"] == "http://www.blogger.com/atom/ns#" and category["term"]: tags.append(category["term"]) if post: pub = formatTime("published") updated = formatTime("updated") filename_xml = "%s.blogger.xml" % pub title = entry("title")[0].string content = entry("content")[0].string blog_file = io.open("Export/" + filename_xml, "w") blog_file.write("<blog>\n\t<title>%s</title>\n\t<content><![CDATA[%s]]></content>\n</blog>" % (title, content)) blog_file.close() count += 1 print "Found %d posts" % count print "done!"
<commit_before><commit_msg>Convert blogger to simple xml<commit_after>from bs4 import BeautifulSoup import io import markdown2 import time import codecs file = io.open("Import/blog-03-03-2013.xml") file_contents = file.read(-1) #lxml xpath doesn't seem to understand blogger export soup = BeautifulSoup(file_contents) entries = soup("entry") count = 0 def formatTime(timefield): time_obj = time.strptime(entry(timefield)[0].string[0:16], "%Y-%m-%dT%H:%M") return time.strftime("%Y%m%d%H%M%S", time_obj) for entry in entries: categories = entry("category") tags = [] post = False for category in categories: if category["term"] == "http://schemas.google.com/blogger/2008/kind#post": post = True if category["scheme"] == "http://www.blogger.com/atom/ns#" and category["term"]: tags.append(category["term"]) if post: pub = formatTime("published") updated = formatTime("updated") filename_xml = "%s.blogger.xml" % pub title = entry("title")[0].string content = entry("content")[0].string blog_file = io.open("Export/" + filename_xml, "w") blog_file.write("<blog>\n\t<title>%s</title>\n\t<content><![CDATA[%s]]></content>\n</blog>" % (title, content)) blog_file.close() count += 1 print "Found %d posts" % count print "done!"
7d5dcaa0a72dbdd78e192f082bbdf261de1d8963
Codewars/DeleteOccurrencesOfElementOverNTimes.py
Codewars/DeleteOccurrencesOfElementOverNTimes.py
# implemented with list comprehension with side-effects and a global variable # there's a simpler way to do it with list appends that's probably no less efficient, since Python arrays are dynamic, but I wanted to try this out instead from collections import Counter c = Counter() # for use in list comprehensions with side effects! Naughty... def count_and_return(x): c[x] += 1 return x def delete_nth(arr,max_e): if max_e <= 0: return [] global c c = Counter() return [count_and_return(x) for x in arr if c[x] < max_e] # note: condition is evaluated before the function is applied to x, hence < instead of <=
Delete occurrences of an element if it occurs more than n times
Codewars: Delete occurrences of an element if it occurs more than n times
Python
unlicense
SelvorWhim/competitive,SelvorWhim/competitive,SelvorWhim/competitive,SelvorWhim/competitive
Codewars: Delete occurrences of an element if it occurs more than n times
# implemented with list comprehension with side-effects and a global variable # there's a simpler way to do it with list appends that's probably no less efficient, since Python arrays are dynamic, but I wanted to try this out instead from collections import Counter c = Counter() # for use in list comprehensions with side effects! Naughty... def count_and_return(x): c[x] += 1 return x def delete_nth(arr,max_e): if max_e <= 0: return [] global c c = Counter() return [count_and_return(x) for x in arr if c[x] < max_e] # note: condition is evaluated before the function is applied to x, hence < instead of <=
<commit_before><commit_msg>Codewars: Delete occurrences of an element if it occurs more than n times<commit_after>
# implemented with list comprehension with side-effects and a global variable # there's a simpler way to do it with list appends that's probably no less efficient, since Python arrays are dynamic, but I wanted to try this out instead from collections import Counter c = Counter() # for use in list comprehensions with side effects! Naughty... def count_and_return(x): c[x] += 1 return x def delete_nth(arr,max_e): if max_e <= 0: return [] global c c = Counter() return [count_and_return(x) for x in arr if c[x] < max_e] # note: condition is evaluated before the function is applied to x, hence < instead of <=
Codewars: Delete occurrences of an element if it occurs more than n times# implemented with list comprehension with side-effects and a global variable # there's a simpler way to do it with list appends that's probably no less efficient, since Python arrays are dynamic, but I wanted to try this out instead from collections import Counter c = Counter() # for use in list comprehensions with side effects! Naughty... def count_and_return(x): c[x] += 1 return x def delete_nth(arr,max_e): if max_e <= 0: return [] global c c = Counter() return [count_and_return(x) for x in arr if c[x] < max_e] # note: condition is evaluated before the function is applied to x, hence < instead of <=
<commit_before><commit_msg>Codewars: Delete occurrences of an element if it occurs more than n times<commit_after># implemented with list comprehension with side-effects and a global variable # there's a simpler way to do it with list appends that's probably no less efficient, since Python arrays are dynamic, but I wanted to try this out instead from collections import Counter c = Counter() # for use in list comprehensions with side effects! Naughty... def count_and_return(x): c[x] += 1 return x def delete_nth(arr,max_e): if max_e <= 0: return [] global c c = Counter() return [count_and_return(x) for x in arr if c[x] < max_e] # note: condition is evaluated before the function is applied to x, hence < instead of <=
096c8165ec2beacbc4897285b8fed439765d3e01
test/integration/ggrc/models/test_document.py
test/integration/ggrc/models/test_document.py
# Copyright (C) 2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Integration tests for Document""" from ggrc.models import all_models from integration.ggrc import TestCase from integration.ggrc.api_helper import Api from integration.ggrc.models import factories class TestDocument(TestCase): """Document test cases""" # pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api = Api() def test_update_title(self): """Test update document title.""" create_title = "test_title" update_title = "update_test_title" document = factories.DocumentFactory(title=create_title) response = self.api.put(document, {"title": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title)
Add test on update document title
Add test on update document title
Python
apache-2.0
AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core
Add test on update document title
# Copyright (C) 2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Integration tests for Document""" from ggrc.models import all_models from integration.ggrc import TestCase from integration.ggrc.api_helper import Api from integration.ggrc.models import factories class TestDocument(TestCase): """Document test cases""" # pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api = Api() def test_update_title(self): """Test update document title.""" create_title = "test_title" update_title = "update_test_title" document = factories.DocumentFactory(title=create_title) response = self.api.put(document, {"title": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title)
<commit_before><commit_msg>Add test on update document title<commit_after>
# Copyright (C) 2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Integration tests for Document""" from ggrc.models import all_models from integration.ggrc import TestCase from integration.ggrc.api_helper import Api from integration.ggrc.models import factories class TestDocument(TestCase): """Document test cases""" # pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api = Api() def test_update_title(self): """Test update document title.""" create_title = "test_title" update_title = "update_test_title" document = factories.DocumentFactory(title=create_title) response = self.api.put(document, {"title": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title)
Add test on update document title# Copyright (C) 2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Integration tests for Document""" from ggrc.models import all_models from integration.ggrc import TestCase from integration.ggrc.api_helper import Api from integration.ggrc.models import factories class TestDocument(TestCase): """Document test cases""" # pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api = Api() def test_update_title(self): """Test update document title.""" create_title = "test_title" update_title = "update_test_title" document = factories.DocumentFactory(title=create_title) response = self.api.put(document, {"title": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title)
<commit_before><commit_msg>Add test on update document title<commit_after># Copyright (C) 2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Integration tests for Document""" from ggrc.models import all_models from integration.ggrc import TestCase from integration.ggrc.api_helper import Api from integration.ggrc.models import factories class TestDocument(TestCase): """Document test cases""" # pylint: disable=invalid-name def setUp(self): super(TestDocument, self).setUp() self.api = Api() def test_update_title(self): """Test update document title.""" create_title = "test_title" update_title = "update_test_title" document = factories.DocumentFactory(title=create_title) response = self.api.put(document, {"title": update_title}) self.assert200(response) self.assertEqual(all_models.Document.query.get(document.id).title, update_title)
864bf2bb3bdb731d0725cc33891145f2a7da17d3
db/common.py
db/common.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from contextlib import contextmanager from sqlalchemy import create_engine from sqlalchemy.orm.session import sessionmaker from sqlalchemy.schema import MetaData from sqlalchemy.ext.declarative import declarative_base from utils import get_connection_string_from_config_file cfg_src = os.path.join(os.path.dirname(__file__), "..", r"_config.ini") conn_string = get_connection_string_from_config_file(cfg_src, 'db_conn_3') Engine = create_engine(conn_string, echo=False, pool_size=5) Session = sessionmaker(bind=Engine) Base = declarative_base(metadata=MetaData(schema='nhl', bind=Engine)) @contextmanager def session_scope(): session = Session() try: yield session except: session.rollback() raise finally: session.close()
Add initialization functions for database connection
Add initialization functions for database connection
Python
mit
leaffan/pynhldb
Add initialization functions for database connection
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from contextlib import contextmanager from sqlalchemy import create_engine from sqlalchemy.orm.session import sessionmaker from sqlalchemy.schema import MetaData from sqlalchemy.ext.declarative import declarative_base from utils import get_connection_string_from_config_file cfg_src = os.path.join(os.path.dirname(__file__), "..", r"_config.ini") conn_string = get_connection_string_from_config_file(cfg_src, 'db_conn_3') Engine = create_engine(conn_string, echo=False, pool_size=5) Session = sessionmaker(bind=Engine) Base = declarative_base(metadata=MetaData(schema='nhl', bind=Engine)) @contextmanager def session_scope(): session = Session() try: yield session except: session.rollback() raise finally: session.close()
<commit_before><commit_msg>Add initialization functions for database connection<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from contextlib import contextmanager from sqlalchemy import create_engine from sqlalchemy.orm.session import sessionmaker from sqlalchemy.schema import MetaData from sqlalchemy.ext.declarative import declarative_base from utils import get_connection_string_from_config_file cfg_src = os.path.join(os.path.dirname(__file__), "..", r"_config.ini") conn_string = get_connection_string_from_config_file(cfg_src, 'db_conn_3') Engine = create_engine(conn_string, echo=False, pool_size=5) Session = sessionmaker(bind=Engine) Base = declarative_base(metadata=MetaData(schema='nhl', bind=Engine)) @contextmanager def session_scope(): session = Session() try: yield session except: session.rollback() raise finally: session.close()
Add initialization functions for database connection#!/usr/bin/env python # -*- coding: utf-8 -*- import os from contextlib import contextmanager from sqlalchemy import create_engine from sqlalchemy.orm.session import sessionmaker from sqlalchemy.schema import MetaData from sqlalchemy.ext.declarative import declarative_base from utils import get_connection_string_from_config_file cfg_src = os.path.join(os.path.dirname(__file__), "..", r"_config.ini") conn_string = get_connection_string_from_config_file(cfg_src, 'db_conn_3') Engine = create_engine(conn_string, echo=False, pool_size=5) Session = sessionmaker(bind=Engine) Base = declarative_base(metadata=MetaData(schema='nhl', bind=Engine)) @contextmanager def session_scope(): session = Session() try: yield session except: session.rollback() raise finally: session.close()
<commit_before><commit_msg>Add initialization functions for database connection<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- import os from contextlib import contextmanager from sqlalchemy import create_engine from sqlalchemy.orm.session import sessionmaker from sqlalchemy.schema import MetaData from sqlalchemy.ext.declarative import declarative_base from utils import get_connection_string_from_config_file cfg_src = os.path.join(os.path.dirname(__file__), "..", r"_config.ini") conn_string = get_connection_string_from_config_file(cfg_src, 'db_conn_3') Engine = create_engine(conn_string, echo=False, pool_size=5) Session = sessionmaker(bind=Engine) Base = declarative_base(metadata=MetaData(schema='nhl', bind=Engine)) @contextmanager def session_scope(): session = Session() try: yield session except: session.rollback() raise finally: session.close()
3fb3662e58e35ccb283074c1078e1c9e7aaf88ed
LendingClub/tests/live_session_test.py
LendingClub/tests/live_session_test.py
#!/usr/bin/env python import sys import unittest import getpass from logger import TestLogger sys.path.insert(0, '.') sys.path.insert(0, '../') sys.path.insert(0, '../../') from LendingClub import session class LiveTestSession(unittest.TestCase): http = None session = None logger = None def setUp(self): self.logger = TestLogger() self.session = session.Session(logger=self.logger) def tearDown(self): pass def test_login(self): """ test_valid_login Test login with credentials from the user """ print '\n\nEnter a valid LendingClub account information...' email = raw_input('Email:') password = getpass.getpass() self.assertTrue(self.session.authenticate(email, password)) print 'Authentication successful' def test_invalid_login(self): """ test_invalid_password Test login with the wrong password """ self.assertRaises( session.AuthenticationError, lambda: self.session.authenticate('test@test.com', 'wrongsecret') ) if __name__ == '__main__': unittest.main()
Add live test for session
Add live test for session
Python
mit
jgillick/LendingClub,carlosnasillo/LendingClub,jgillick/LendingClub,carlosnasillo/LendingClub
Add live test for session
#!/usr/bin/env python import sys import unittest import getpass from logger import TestLogger sys.path.insert(0, '.') sys.path.insert(0, '../') sys.path.insert(0, '../../') from LendingClub import session class LiveTestSession(unittest.TestCase): http = None session = None logger = None def setUp(self): self.logger = TestLogger() self.session = session.Session(logger=self.logger) def tearDown(self): pass def test_login(self): """ test_valid_login Test login with credentials from the user """ print '\n\nEnter a valid LendingClub account information...' email = raw_input('Email:') password = getpass.getpass() self.assertTrue(self.session.authenticate(email, password)) print 'Authentication successful' def test_invalid_login(self): """ test_invalid_password Test login with the wrong password """ self.assertRaises( session.AuthenticationError, lambda: self.session.authenticate('test@test.com', 'wrongsecret') ) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add live test for session<commit_after>
#!/usr/bin/env python import sys import unittest import getpass from logger import TestLogger sys.path.insert(0, '.') sys.path.insert(0, '../') sys.path.insert(0, '../../') from LendingClub import session class LiveTestSession(unittest.TestCase): http = None session = None logger = None def setUp(self): self.logger = TestLogger() self.session = session.Session(logger=self.logger) def tearDown(self): pass def test_login(self): """ test_valid_login Test login with credentials from the user """ print '\n\nEnter a valid LendingClub account information...' email = raw_input('Email:') password = getpass.getpass() self.assertTrue(self.session.authenticate(email, password)) print 'Authentication successful' def test_invalid_login(self): """ test_invalid_password Test login with the wrong password """ self.assertRaises( session.AuthenticationError, lambda: self.session.authenticate('test@test.com', 'wrongsecret') ) if __name__ == '__main__': unittest.main()
Add live test for session#!/usr/bin/env python import sys import unittest import getpass from logger import TestLogger sys.path.insert(0, '.') sys.path.insert(0, '../') sys.path.insert(0, '../../') from LendingClub import session class LiveTestSession(unittest.TestCase): http = None session = None logger = None def setUp(self): self.logger = TestLogger() self.session = session.Session(logger=self.logger) def tearDown(self): pass def test_login(self): """ test_valid_login Test login with credentials from the user """ print '\n\nEnter a valid LendingClub account information...' email = raw_input('Email:') password = getpass.getpass() self.assertTrue(self.session.authenticate(email, password)) print 'Authentication successful' def test_invalid_login(self): """ test_invalid_password Test login with the wrong password """ self.assertRaises( session.AuthenticationError, lambda: self.session.authenticate('test@test.com', 'wrongsecret') ) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add live test for session<commit_after>#!/usr/bin/env python import sys import unittest import getpass from logger import TestLogger sys.path.insert(0, '.') sys.path.insert(0, '../') sys.path.insert(0, '../../') from LendingClub import session class LiveTestSession(unittest.TestCase): http = None session = None logger = None def setUp(self): self.logger = TestLogger() self.session = session.Session(logger=self.logger) def tearDown(self): pass def test_login(self): """ test_valid_login Test login with credentials from the user """ print '\n\nEnter a valid LendingClub account information...' email = raw_input('Email:') password = getpass.getpass() self.assertTrue(self.session.authenticate(email, password)) print 'Authentication successful' def test_invalid_login(self): """ test_invalid_password Test login with the wrong password """ self.assertRaises( session.AuthenticationError, lambda: self.session.authenticate('test@test.com', 'wrongsecret') ) if __name__ == '__main__': unittest.main()
9e4858e652fba57f767a9c6d921853a6487301bd
epsilon/test/test_version.py
epsilon/test/test_version.py
""" Tests for turning simple version strings into twisted.python.versions.Version objects. """ from epsilon import asTwistedVersion from twisted.trial.unittest import SynchronousTestCase class AsTwistedVersionTests(SynchronousTestCase): def test_simple(self): """ A simple version string can be turned into a Version object. """ version = asTwistedVersion("package", "1.2.3") self.assertEqual(version.package, "package") self.assertEqual(version.major, 1) self.assertEqual(version.minor, 2) self.assertEqual(version.micro, 3)
Add a test for the version string parsing code
Add a test for the version string parsing code
Python
mit
twisted/epsilon
Add a test for the version string parsing code
""" Tests for turning simple version strings into twisted.python.versions.Version objects. """ from epsilon import asTwistedVersion from twisted.trial.unittest import SynchronousTestCase class AsTwistedVersionTests(SynchronousTestCase): def test_simple(self): """ A simple version string can be turned into a Version object. """ version = asTwistedVersion("package", "1.2.3") self.assertEqual(version.package, "package") self.assertEqual(version.major, 1) self.assertEqual(version.minor, 2) self.assertEqual(version.micro, 3)
<commit_before><commit_msg>Add a test for the version string parsing code<commit_after>
""" Tests for turning simple version strings into twisted.python.versions.Version objects. """ from epsilon import asTwistedVersion from twisted.trial.unittest import SynchronousTestCase class AsTwistedVersionTests(SynchronousTestCase): def test_simple(self): """ A simple version string can be turned into a Version object. """ version = asTwistedVersion("package", "1.2.3") self.assertEqual(version.package, "package") self.assertEqual(version.major, 1) self.assertEqual(version.minor, 2) self.assertEqual(version.micro, 3)
Add a test for the version string parsing code""" Tests for turning simple version strings into twisted.python.versions.Version objects. """ from epsilon import asTwistedVersion from twisted.trial.unittest import SynchronousTestCase class AsTwistedVersionTests(SynchronousTestCase): def test_simple(self): """ A simple version string can be turned into a Version object. """ version = asTwistedVersion("package", "1.2.3") self.assertEqual(version.package, "package") self.assertEqual(version.major, 1) self.assertEqual(version.minor, 2) self.assertEqual(version.micro, 3)
<commit_before><commit_msg>Add a test for the version string parsing code<commit_after>""" Tests for turning simple version strings into twisted.python.versions.Version objects. """ from epsilon import asTwistedVersion from twisted.trial.unittest import SynchronousTestCase class AsTwistedVersionTests(SynchronousTestCase): def test_simple(self): """ A simple version string can be turned into a Version object. """ version = asTwistedVersion("package", "1.2.3") self.assertEqual(version.package, "package") self.assertEqual(version.major, 1) self.assertEqual(version.minor, 2) self.assertEqual(version.micro, 3)
9668580633a1a8baaa59030e5a52d2478222cbd2
nodeconductor/openstack/cost_tracking.py
nodeconductor/openstack/cost_tracking.py
from . import models from nodeconductor.cost_tracking import CostTrackingBackend class OpenStackCostTrackingBackend(CostTrackingBackend): @classmethod def get_monthly_cost_estimate(cls, resource): backend = resource.get_backend() return backend.get_monthly_cost_estimate()
Add cost tracking file to openstack
Add cost tracking file to openstack - saas-951
Python
mit
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
Add cost tracking file to openstack - saas-951
from . import models from nodeconductor.cost_tracking import CostTrackingBackend class OpenStackCostTrackingBackend(CostTrackingBackend): @classmethod def get_monthly_cost_estimate(cls, resource): backend = resource.get_backend() return backend.get_monthly_cost_estimate()
<commit_before><commit_msg>Add cost tracking file to openstack - saas-951<commit_after>
from . import models from nodeconductor.cost_tracking import CostTrackingBackend class OpenStackCostTrackingBackend(CostTrackingBackend): @classmethod def get_monthly_cost_estimate(cls, resource): backend = resource.get_backend() return backend.get_monthly_cost_estimate()
Add cost tracking file to openstack - saas-951from . import models from nodeconductor.cost_tracking import CostTrackingBackend class OpenStackCostTrackingBackend(CostTrackingBackend): @classmethod def get_monthly_cost_estimate(cls, resource): backend = resource.get_backend() return backend.get_monthly_cost_estimate()
<commit_before><commit_msg>Add cost tracking file to openstack - saas-951<commit_after>from . import models from nodeconductor.cost_tracking import CostTrackingBackend class OpenStackCostTrackingBackend(CostTrackingBackend): @classmethod def get_monthly_cost_estimate(cls, resource): backend = resource.get_backend() return backend.get_monthly_cost_estimate()
a12dd320df30404df8c8ec196e21067376cc1e2c
astropy/table/tests/test_pickle.py
astropy/table/tests/test_pickle.py
import cPickle as pickle import numpy as np import pytest from ...table import Table, Column, MaskedColumn @pytest.fixture(params=[0, 1, -1]) def protocol(request): """ Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced). """ return request.param def test_pickle_column(protocol): c = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp == c) assert cp.attrs_equal(c) def test_pickle_masked_column(protocol): c = MaskedColumn(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) c.mask[1] = True c.fill_value = -99 cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp._data == c._data) assert np.all(cp.mask == c.mask) assert cp.attrs_equal(c) assert cp.fill_value == -99 def test_pickle_table(protocol): a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm', meta={'b': 1}) t = Table([a, b], meta={'a': 1}) ts = pickle.dumps(t) tp = pickle.loads(ts) assert np.all(tp['a'] == t['a']) assert np.all(tp['b'] == t['b']) assert tp['a'].attrs_equal(t['a']) assert tp['b'].attrs_equal(t['b']) assert tp.meta == t.meta def test_pickle_masked_table(protocol): a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm', meta={'b': 1}) t = Table([a, b], meta={'a': 1}, masked=True) t['a'].mask[1] = True t['a'].fill_value = -99 ts = pickle.dumps(t) tp = pickle.loads(ts) for colname in ('a', 'b'): for attr in ('_data', 'mask', 'fill_value'): assert np.all(getattr(tp[colname], attr) == getattr(tp[colname], attr)) assert tp['a'].attrs_equal(t['a']) assert tp['b'].attrs_equal(t['b']) assert tp.meta == t.meta
Add tests of table and column pickling
Add tests of table and column pickling
Python
bsd-3-clause
mhvk/astropy,astropy/astropy,astropy/astropy,larrybradley/astropy,funbaker/astropy,joergdietrich/astropy,joergdietrich/astropy,dhomeier/astropy,larrybradley/astropy,pllim/astropy,MSeifert04/astropy,pllim/astropy,dhomeier/astropy,StuartLittlefair/astropy,tbabej/astropy,lpsinger/astropy,saimn/astropy,tbabej/astropy,dhomeier/astropy,AustereCuriosity/astropy,stargaser/astropy,AustereCuriosity/astropy,kelle/astropy,stargaser/astropy,bsipocz/astropy,tbabej/astropy,larrybradley/astropy,StuartLittlefair/astropy,mhvk/astropy,stargaser/astropy,joergdietrich/astropy,pllim/astropy,bsipocz/astropy,dhomeier/astropy,kelle/astropy,aleksandr-bakanov/astropy,lpsinger/astropy,bsipocz/astropy,DougBurke/astropy,StuartLittlefair/astropy,lpsinger/astropy,lpsinger/astropy,joergdietrich/astropy,funbaker/astropy,saimn/astropy,StuartLittlefair/astropy,mhvk/astropy,pllim/astropy,stargaser/astropy,aleksandr-bakanov/astropy,funbaker/astropy,joergdietrich/astropy,saimn/astropy,tbabej/astropy,bsipocz/astropy,astropy/astropy,kelle/astropy,DougBurke/astropy,lpsinger/astropy,larrybradley/astropy,aleksandr-bakanov/astropy,astropy/astropy,funbaker/astropy,AustereCuriosity/astropy,mhvk/astropy,pllim/astropy,larrybradley/astropy,saimn/astropy,StuartLittlefair/astropy,astropy/astropy,tbabej/astropy,DougBurke/astropy,kelle/astropy,AustereCuriosity/astropy,dhomeier/astropy,AustereCuriosity/astropy,MSeifert04/astropy,MSeifert04/astropy,DougBurke/astropy,saimn/astropy,MSeifert04/astropy,mhvk/astropy,aleksandr-bakanov/astropy,kelle/astropy
Add tests of table and column pickling
import cPickle as pickle import numpy as np import pytest from ...table import Table, Column, MaskedColumn @pytest.fixture(params=[0, 1, -1]) def protocol(request): """ Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced). """ return request.param def test_pickle_column(protocol): c = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp == c) assert cp.attrs_equal(c) def test_pickle_masked_column(protocol): c = MaskedColumn(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) c.mask[1] = True c.fill_value = -99 cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp._data == c._data) assert np.all(cp.mask == c.mask) assert cp.attrs_equal(c) assert cp.fill_value == -99 def test_pickle_table(protocol): a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm', meta={'b': 1}) t = Table([a, b], meta={'a': 1}) ts = pickle.dumps(t) tp = pickle.loads(ts) assert np.all(tp['a'] == t['a']) assert np.all(tp['b'] == t['b']) assert tp['a'].attrs_equal(t['a']) assert tp['b'].attrs_equal(t['b']) assert tp.meta == t.meta def test_pickle_masked_table(protocol): a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm', meta={'b': 1}) t = Table([a, b], meta={'a': 1}, masked=True) t['a'].mask[1] = True t['a'].fill_value = -99 ts = pickle.dumps(t) tp = pickle.loads(ts) for colname in ('a', 'b'): for attr in ('_data', 'mask', 'fill_value'): assert np.all(getattr(tp[colname], attr) == getattr(tp[colname], attr)) assert tp['a'].attrs_equal(t['a']) assert tp['b'].attrs_equal(t['b']) assert tp.meta == t.meta
<commit_before><commit_msg>Add tests of table and column pickling<commit_after>
import cPickle as pickle import numpy as np import pytest from ...table import Table, Column, MaskedColumn @pytest.fixture(params=[0, 1, -1]) def protocol(request): """ Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced). """ return request.param def test_pickle_column(protocol): c = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp == c) assert cp.attrs_equal(c) def test_pickle_masked_column(protocol): c = MaskedColumn(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) c.mask[1] = True c.fill_value = -99 cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp._data == c._data) assert np.all(cp.mask == c.mask) assert cp.attrs_equal(c) assert cp.fill_value == -99 def test_pickle_table(protocol): a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm', meta={'b': 1}) t = Table([a, b], meta={'a': 1}) ts = pickle.dumps(t) tp = pickle.loads(ts) assert np.all(tp['a'] == t['a']) assert np.all(tp['b'] == t['b']) assert tp['a'].attrs_equal(t['a']) assert tp['b'].attrs_equal(t['b']) assert tp.meta == t.meta def test_pickle_masked_table(protocol): a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm', meta={'b': 1}) t = Table([a, b], meta={'a': 1}, masked=True) t['a'].mask[1] = True t['a'].fill_value = -99 ts = pickle.dumps(t) tp = pickle.loads(ts) for colname in ('a', 'b'): for attr in ('_data', 'mask', 'fill_value'): assert np.all(getattr(tp[colname], attr) == getattr(tp[colname], attr)) assert tp['a'].attrs_equal(t['a']) assert tp['b'].attrs_equal(t['b']) assert tp.meta == t.meta
Add tests of table and column picklingimport cPickle as pickle import numpy as np import pytest from ...table import Table, Column, MaskedColumn @pytest.fixture(params=[0, 1, -1]) def protocol(request): """ Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced). """ return request.param def test_pickle_column(protocol): c = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp == c) assert cp.attrs_equal(c) def test_pickle_masked_column(protocol): c = MaskedColumn(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) c.mask[1] = True c.fill_value = -99 cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp._data == c._data) assert np.all(cp.mask == c.mask) assert cp.attrs_equal(c) assert cp.fill_value == -99 def test_pickle_table(protocol): a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm', meta={'b': 1}) t = Table([a, b], meta={'a': 1}) ts = pickle.dumps(t) tp = pickle.loads(ts) assert np.all(tp['a'] == t['a']) assert np.all(tp['b'] == t['b']) assert tp['a'].attrs_equal(t['a']) assert tp['b'].attrs_equal(t['b']) assert tp.meta == t.meta def test_pickle_masked_table(protocol): a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm', meta={'b': 1}) t = Table([a, b], meta={'a': 1}, masked=True) t['a'].mask[1] = True t['a'].fill_value = -99 ts = pickle.dumps(t) tp = pickle.loads(ts) for colname in ('a', 'b'): for attr in ('_data', 'mask', 'fill_value'): assert np.all(getattr(tp[colname], attr) == getattr(tp[colname], attr)) assert tp['a'].attrs_equal(t['a']) assert tp['b'].attrs_equal(t['b']) assert tp.meta == t.meta
<commit_before><commit_msg>Add tests of table and column pickling<commit_after>import cPickle as pickle import numpy as np import pytest from ...table import Table, Column, MaskedColumn @pytest.fixture(params=[0, 1, -1]) def protocol(request): """ Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced). """ return request.param def test_pickle_column(protocol): c = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp == c) assert cp.attrs_equal(c) def test_pickle_masked_column(protocol): c = MaskedColumn(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) c.mask[1] = True c.fill_value = -99 cs = pickle.dumps(c) cp = pickle.loads(cs) assert np.all(cp._data == c._data) assert np.all(cp.mask == c.mask) assert cp.attrs_equal(c) assert cp.fill_value == -99 def test_pickle_table(protocol): a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm', meta={'b': 1}) t = Table([a, b], meta={'a': 1}) ts = pickle.dumps(t) tp = pickle.loads(ts) assert np.all(tp['a'] == t['a']) assert np.all(tp['b'] == t['b']) assert tp['a'].attrs_equal(t['a']) assert tp['b'].attrs_equal(t['b']) assert tp.meta == t.meta def test_pickle_masked_table(protocol): a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1}) b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm', meta={'b': 1}) t = Table([a, b], meta={'a': 1}, masked=True) t['a'].mask[1] = True t['a'].fill_value = -99 ts = pickle.dumps(t) tp = pickle.loads(ts) for colname in ('a', 'b'): for attr in ('_data', 'mask', 'fill_value'): assert np.all(getattr(tp[colname], attr) == getattr(tp[colname], attr)) assert tp['a'].attrs_equal(t['a']) assert tp['b'].attrs_equal(t['b']) assert tp.meta == t.meta
0c6becaa179aba9408def1b3cce61d5ec1509942
python/main.py
python/main.py
from simul import * if __name__ == '__main__': # create a new simulation s = Simulation(Re=5) # initial conditions psi(0) = 0, Omega(0) = 0 s.psi.initial("null") s.omega.initial("null") # T_n(t=0) = sin(pi*k*dz) & T_0(t=0) = 1-k*dz s.T.initial(lambda n, k: T_0(n,k,s)) # main loop over time while s.step(): s.T.step() s.psi.step() s.omega.step() del s
Load the simul module and run a simulation
Load the simul module and run a simulation
Python
apache-2.0
cphyc/MHD_simulation,cphyc/MHD_simulation
Load the simul module and run a simulation
from simul import * if __name__ == '__main__': # create a new simulation s = Simulation(Re=5) # initial conditions psi(0) = 0, Omega(0) = 0 s.psi.initial("null") s.omega.initial("null") # T_n(t=0) = sin(pi*k*dz) & T_0(t=0) = 1-k*dz s.T.initial(lambda n, k: T_0(n,k,s)) # main loop over time while s.step(): s.T.step() s.psi.step() s.omega.step() del s
<commit_before><commit_msg>Load the simul module and run a simulation<commit_after>
from simul import * if __name__ == '__main__': # create a new simulation s = Simulation(Re=5) # initial conditions psi(0) = 0, Omega(0) = 0 s.psi.initial("null") s.omega.initial("null") # T_n(t=0) = sin(pi*k*dz) & T_0(t=0) = 1-k*dz s.T.initial(lambda n, k: T_0(n,k,s)) # main loop over time while s.step(): s.T.step() s.psi.step() s.omega.step() del s
Load the simul module and run a simulationfrom simul import * if __name__ == '__main__': # create a new simulation s = Simulation(Re=5) # initial conditions psi(0) = 0, Omega(0) = 0 s.psi.initial("null") s.omega.initial("null") # T_n(t=0) = sin(pi*k*dz) & T_0(t=0) = 1-k*dz s.T.initial(lambda n, k: T_0(n,k,s)) # main loop over time while s.step(): s.T.step() s.psi.step() s.omega.step() del s
<commit_before><commit_msg>Load the simul module and run a simulation<commit_after>from simul import * if __name__ == '__main__': # create a new simulation s = Simulation(Re=5) # initial conditions psi(0) = 0, Omega(0) = 0 s.psi.initial("null") s.omega.initial("null") # T_n(t=0) = sin(pi*k*dz) & T_0(t=0) = 1-k*dz s.T.initial(lambda n, k: T_0(n,k,s)) # main loop over time while s.step(): s.T.step() s.psi.step() s.omega.step() del s
989a94c81f74a17707e66f126960b6bb45e9b4d5
migrations/versions/3042d0ca43bf_index_job_project_id.py
migrations/versions/3042d0ca43bf_index_job_project_id.py
"""Index Job(project_id, status, date_created) where patch_id IS NULL Revision ID: 3042d0ca43bf Revises: 3a3366fb7822 Create Date: 2014-01-03 15:24:39.947813 """ # revision identifiers, used by Alembic. revision = '3042d0ca43bf' down_revision = '3a3366fb7822' from alembic import op def upgrade(): op.execute('CREATE INDEX idx_job_previous_runs ON job (project_id, status, date_created) WHERE patch_id IS NULL') def downgrade(): op.drop_index('idx_job_previous_runs', 'job')
Add index to cover testgroup_details (previous runs)
Add index to cover testgroup_details (previous runs)
Python
apache-2.0
dropbox/changes,bowlofstew/changes,dropbox/changes,dropbox/changes,bowlofstew/changes,bowlofstew/changes,wfxiang08/changes,bowlofstew/changes,wfxiang08/changes,wfxiang08/changes,wfxiang08/changes,dropbox/changes
Add index to cover testgroup_details (previous runs)
"""Index Job(project_id, status, date_created) where patch_id IS NULL Revision ID: 3042d0ca43bf Revises: 3a3366fb7822 Create Date: 2014-01-03 15:24:39.947813 """ # revision identifiers, used by Alembic. revision = '3042d0ca43bf' down_revision = '3a3366fb7822' from alembic import op def upgrade(): op.execute('CREATE INDEX idx_job_previous_runs ON job (project_id, status, date_created) WHERE patch_id IS NULL') def downgrade(): op.drop_index('idx_job_previous_runs', 'job')
<commit_before><commit_msg>Add index to cover testgroup_details (previous runs)<commit_after>
"""Index Job(project_id, status, date_created) where patch_id IS NULL Revision ID: 3042d0ca43bf Revises: 3a3366fb7822 Create Date: 2014-01-03 15:24:39.947813 """ # revision identifiers, used by Alembic. revision = '3042d0ca43bf' down_revision = '3a3366fb7822' from alembic import op def upgrade(): op.execute('CREATE INDEX idx_job_previous_runs ON job (project_id, status, date_created) WHERE patch_id IS NULL') def downgrade(): op.drop_index('idx_job_previous_runs', 'job')
Add index to cover testgroup_details (previous runs)"""Index Job(project_id, status, date_created) where patch_id IS NULL Revision ID: 3042d0ca43bf Revises: 3a3366fb7822 Create Date: 2014-01-03 15:24:39.947813 """ # revision identifiers, used by Alembic. revision = '3042d0ca43bf' down_revision = '3a3366fb7822' from alembic import op def upgrade(): op.execute('CREATE INDEX idx_job_previous_runs ON job (project_id, status, date_created) WHERE patch_id IS NULL') def downgrade(): op.drop_index('idx_job_previous_runs', 'job')
<commit_before><commit_msg>Add index to cover testgroup_details (previous runs)<commit_after>"""Index Job(project_id, status, date_created) where patch_id IS NULL Revision ID: 3042d0ca43bf Revises: 3a3366fb7822 Create Date: 2014-01-03 15:24:39.947813 """ # revision identifiers, used by Alembic. revision = '3042d0ca43bf' down_revision = '3a3366fb7822' from alembic import op def upgrade(): op.execute('CREATE INDEX idx_job_previous_runs ON job (project_id, status, date_created) WHERE patch_id IS NULL') def downgrade(): op.drop_index('idx_job_previous_runs', 'job')
8b9fe74976d77df32d73792f74ef4ddea1eb525f
config.py
config.py
#! /usr/bin/env python import os import warnings import yaml class Config(object): config_fname = "configuration.yaml" def __init__(self, config_fname=None): config_fname = config_fname or self.config_fname fo = open(config_fname, "r") blob = fo.read() fo.close() self.config = yaml.load(blob) def __getattr__(self, attrname): if attrname == "slack_name": warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" % self.config_fname, DeprecationWarning) return self.config[attrname] # This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME SLACK_NAME = os.getenv("SLACK_NAME") if SLACK_NAME is None: SLACK_NAME = Config().slack_name
#! /usr/bin/env python import os import warnings import yaml class Config(object): config_fname = "configuration.yaml" def __init__(self, config_fname=None): config_fname = config_fname or self.config_fname fo = open(config_fname, "r") blob = fo.read() fo.close() self.config = yaml.load(blob) def __getattr__(self, attrname): if attrname == "slack_name": warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" % self.config_fname, DeprecationWarning) return self.config[attrname] def get(self, attrname, fallback=None): try: return self.config[attrname] except KeyError: return fallback # This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME SLACK_NAME = os.getenv("SLACK_NAME") if SLACK_NAME is None: SLACK_NAME = Config().slack_name
Add Config.get() to skip KeyErrors
Add Config.get() to skip KeyErrors Adds common `dict.get()` pattern to our own Config class, to enable use of fallbacks or `None`, as appropriate.
Python
apache-2.0
royrapoport/destalinator,TheConnMan/destalinator,royrapoport/destalinator,underarmour/destalinator,randsleadershipslack/destalinator,randsleadershipslack/destalinator,TheConnMan/destalinator
#! /usr/bin/env python import os import warnings import yaml class Config(object): config_fname = "configuration.yaml" def __init__(self, config_fname=None): config_fname = config_fname or self.config_fname fo = open(config_fname, "r") blob = fo.read() fo.close() self.config = yaml.load(blob) def __getattr__(self, attrname): if attrname == "slack_name": warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" % self.config_fname, DeprecationWarning) return self.config[attrname] # This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME SLACK_NAME = os.getenv("SLACK_NAME") if SLACK_NAME is None: SLACK_NAME = Config().slack_name Add Config.get() to skip KeyErrors Adds common `dict.get()` pattern to our own Config class, to enable use of fallbacks or `None`, as appropriate.
#! /usr/bin/env python import os import warnings import yaml class Config(object): config_fname = "configuration.yaml" def __init__(self, config_fname=None): config_fname = config_fname or self.config_fname fo = open(config_fname, "r") blob = fo.read() fo.close() self.config = yaml.load(blob) def __getattr__(self, attrname): if attrname == "slack_name": warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" % self.config_fname, DeprecationWarning) return self.config[attrname] def get(self, attrname, fallback=None): try: return self.config[attrname] except KeyError: return fallback # This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME SLACK_NAME = os.getenv("SLACK_NAME") if SLACK_NAME is None: SLACK_NAME = Config().slack_name
<commit_before>#! /usr/bin/env python import os import warnings import yaml class Config(object): config_fname = "configuration.yaml" def __init__(self, config_fname=None): config_fname = config_fname or self.config_fname fo = open(config_fname, "r") blob = fo.read() fo.close() self.config = yaml.load(blob) def __getattr__(self, attrname): if attrname == "slack_name": warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" % self.config_fname, DeprecationWarning) return self.config[attrname] # This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME SLACK_NAME = os.getenv("SLACK_NAME") if SLACK_NAME is None: SLACK_NAME = Config().slack_name <commit_msg>Add Config.get() to skip KeyErrors Adds common `dict.get()` pattern to our own Config class, to enable use of fallbacks or `None`, as appropriate.<commit_after>
#! /usr/bin/env python import os import warnings import yaml class Config(object): config_fname = "configuration.yaml" def __init__(self, config_fname=None): config_fname = config_fname or self.config_fname fo = open(config_fname, "r") blob = fo.read() fo.close() self.config = yaml.load(blob) def __getattr__(self, attrname): if attrname == "slack_name": warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" % self.config_fname, DeprecationWarning) return self.config[attrname] def get(self, attrname, fallback=None): try: return self.config[attrname] except KeyError: return fallback # This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME SLACK_NAME = os.getenv("SLACK_NAME") if SLACK_NAME is None: SLACK_NAME = Config().slack_name
#! /usr/bin/env python import os import warnings import yaml class Config(object): config_fname = "configuration.yaml" def __init__(self, config_fname=None): config_fname = config_fname or self.config_fname fo = open(config_fname, "r") blob = fo.read() fo.close() self.config = yaml.load(blob) def __getattr__(self, attrname): if attrname == "slack_name": warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" % self.config_fname, DeprecationWarning) return self.config[attrname] # This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME SLACK_NAME = os.getenv("SLACK_NAME") if SLACK_NAME is None: SLACK_NAME = Config().slack_name Add Config.get() to skip KeyErrors Adds common `dict.get()` pattern to our own Config class, to enable use of fallbacks or `None`, as appropriate.#! /usr/bin/env python import os import warnings import yaml class Config(object): config_fname = "configuration.yaml" def __init__(self, config_fname=None): config_fname = config_fname or self.config_fname fo = open(config_fname, "r") blob = fo.read() fo.close() self.config = yaml.load(blob) def __getattr__(self, attrname): if attrname == "slack_name": warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" % self.config_fname, DeprecationWarning) return self.config[attrname] def get(self, attrname, fallback=None): try: return self.config[attrname] except KeyError: return fallback # This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME SLACK_NAME = os.getenv("SLACK_NAME") if SLACK_NAME is None: SLACK_NAME = Config().slack_name
<commit_before>#! /usr/bin/env python import os import warnings import yaml class Config(object): config_fname = "configuration.yaml" def __init__(self, config_fname=None): config_fname = config_fname or self.config_fname fo = open(config_fname, "r") blob = fo.read() fo.close() self.config = yaml.load(blob) def __getattr__(self, attrname): if attrname == "slack_name": warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" % self.config_fname, DeprecationWarning) return self.config[attrname] # This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME SLACK_NAME = os.getenv("SLACK_NAME") if SLACK_NAME is None: SLACK_NAME = Config().slack_name <commit_msg>Add Config.get() to skip KeyErrors Adds common `dict.get()` pattern to our own Config class, to enable use of fallbacks or `None`, as appropriate.<commit_after>#! /usr/bin/env python import os import warnings import yaml class Config(object): config_fname = "configuration.yaml" def __init__(self, config_fname=None): config_fname = config_fname or self.config_fname fo = open(config_fname, "r") blob = fo.read() fo.close() self.config = yaml.load(blob) def __getattr__(self, attrname): if attrname == "slack_name": warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" % self.config_fname, DeprecationWarning) return self.config[attrname] def get(self, attrname, fallback=None): try: return self.config[attrname] except KeyError: return fallback # This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME SLACK_NAME = os.getenv("SLACK_NAME") if SLACK_NAME is None: SLACK_NAME = Config().slack_name
42297354f575e2c82346cf033202c5dfad5ddd99
lib/examples/nacl_amb/utils.py
lib/examples/nacl_amb/utils.py
#!/usr/bin/env python import numpy class TrajWriter(object): ''' A class for writing out trajectory traces as an xyz file, for subsequent visualization. ''' def __init__(self, trace, w, filename='trace.xyz'): self.trace = trace self.w = w self.filename = filename self._write() def _get_coords(self, iteration, seg_id): self.w.iteration = iteration coords = self.w.current.auxdata['coord'][seg_id] return coords def _write(self): all_coords = [] starting_iteration = self.w.iteration for i, iteration in enumerate(self.trace.iteration): seg_id = self.trace.seg_id[i] coords = self._get_coords(iteration, seg_id) # The last timepoint of one iteration is the same as the first # timepoint of the last, so skip the last timepoint of each # iteration coords = coords[:-1] all_coords.append(coords) self.w.iteration = starting_iteration all_coords = numpy.concatenate(all_coords) with open(self.filename, 'w') as outfile: for i, frame in enumerate(all_coords): outfile.write("2\n") outfile.write("{0}\n".format(i)) outfile.write("SOD {0:9.5f} {1:9.5f} {2:9.5f}\n".format( float(frame[0,0]), float(frame[0,1]), float(frame[0,2]))) outfile.write("CLA {0:9.5f} {1:9.5f} {2:9.5f}\n".format( float(frame[1,0]), float(frame[1,1]), float(frame[1,2])))
Add python class for writing out xyz files of trajectory coordinates
Add python class for writing out xyz files of trajectory coordinates Former-commit-id: cecdece306ef516acf87bde03bbd04cb7f0c761b
Python
mit
westpa/westpa
Add python class for writing out xyz files of trajectory coordinates Former-commit-id: cecdece306ef516acf87bde03bbd04cb7f0c761b
#!/usr/bin/env python import numpy class TrajWriter(object): ''' A class for writing out trajectory traces as an xyz file, for subsequent visualization. ''' def __init__(self, trace, w, filename='trace.xyz'): self.trace = trace self.w = w self.filename = filename self._write() def _get_coords(self, iteration, seg_id): self.w.iteration = iteration coords = self.w.current.auxdata['coord'][seg_id] return coords def _write(self): all_coords = [] starting_iteration = self.w.iteration for i, iteration in enumerate(self.trace.iteration): seg_id = self.trace.seg_id[i] coords = self._get_coords(iteration, seg_id) # The last timepoint of one iteration is the same as the first # timepoint of the last, so skip the last timepoint of each # iteration coords = coords[:-1] all_coords.append(coords) self.w.iteration = starting_iteration all_coords = numpy.concatenate(all_coords) with open(self.filename, 'w') as outfile: for i, frame in enumerate(all_coords): outfile.write("2\n") outfile.write("{0}\n".format(i)) outfile.write("SOD {0:9.5f} {1:9.5f} {2:9.5f}\n".format( float(frame[0,0]), float(frame[0,1]), float(frame[0,2]))) outfile.write("CLA {0:9.5f} {1:9.5f} {2:9.5f}\n".format( float(frame[1,0]), float(frame[1,1]), float(frame[1,2])))
<commit_before><commit_msg>Add python class for writing out xyz files of trajectory coordinates Former-commit-id: cecdece306ef516acf87bde03bbd04cb7f0c761b<commit_after>
#!/usr/bin/env python import numpy class TrajWriter(object): ''' A class for writing out trajectory traces as an xyz file, for subsequent visualization. ''' def __init__(self, trace, w, filename='trace.xyz'): self.trace = trace self.w = w self.filename = filename self._write() def _get_coords(self, iteration, seg_id): self.w.iteration = iteration coords = self.w.current.auxdata['coord'][seg_id] return coords def _write(self): all_coords = [] starting_iteration = self.w.iteration for i, iteration in enumerate(self.trace.iteration): seg_id = self.trace.seg_id[i] coords = self._get_coords(iteration, seg_id) # The last timepoint of one iteration is the same as the first # timepoint of the last, so skip the last timepoint of each # iteration coords = coords[:-1] all_coords.append(coords) self.w.iteration = starting_iteration all_coords = numpy.concatenate(all_coords) with open(self.filename, 'w') as outfile: for i, frame in enumerate(all_coords): outfile.write("2\n") outfile.write("{0}\n".format(i)) outfile.write("SOD {0:9.5f} {1:9.5f} {2:9.5f}\n".format( float(frame[0,0]), float(frame[0,1]), float(frame[0,2]))) outfile.write("CLA {0:9.5f} {1:9.5f} {2:9.5f}\n".format( float(frame[1,0]), float(frame[1,1]), float(frame[1,2])))
Add python class for writing out xyz files of trajectory coordinates Former-commit-id: cecdece306ef516acf87bde03bbd04cb7f0c761b#!/usr/bin/env python import numpy class TrajWriter(object): ''' A class for writing out trajectory traces as an xyz file, for subsequent visualization. ''' def __init__(self, trace, w, filename='trace.xyz'): self.trace = trace self.w = w self.filename = filename self._write() def _get_coords(self, iteration, seg_id): self.w.iteration = iteration coords = self.w.current.auxdata['coord'][seg_id] return coords def _write(self): all_coords = [] starting_iteration = self.w.iteration for i, iteration in enumerate(self.trace.iteration): seg_id = self.trace.seg_id[i] coords = self._get_coords(iteration, seg_id) # The last timepoint of one iteration is the same as the first # timepoint of the last, so skip the last timepoint of each # iteration coords = coords[:-1] all_coords.append(coords) self.w.iteration = starting_iteration all_coords = numpy.concatenate(all_coords) with open(self.filename, 'w') as outfile: for i, frame in enumerate(all_coords): outfile.write("2\n") outfile.write("{0}\n".format(i)) outfile.write("SOD {0:9.5f} {1:9.5f} {2:9.5f}\n".format( float(frame[0,0]), float(frame[0,1]), float(frame[0,2]))) outfile.write("CLA {0:9.5f} {1:9.5f} {2:9.5f}\n".format( float(frame[1,0]), float(frame[1,1]), float(frame[1,2])))
<commit_before><commit_msg>Add python class for writing out xyz files of trajectory coordinates Former-commit-id: cecdece306ef516acf87bde03bbd04cb7f0c761b<commit_after>#!/usr/bin/env python import numpy class TrajWriter(object): ''' A class for writing out trajectory traces as an xyz file, for subsequent visualization. ''' def __init__(self, trace, w, filename='trace.xyz'): self.trace = trace self.w = w self.filename = filename self._write() def _get_coords(self, iteration, seg_id): self.w.iteration = iteration coords = self.w.current.auxdata['coord'][seg_id] return coords def _write(self): all_coords = [] starting_iteration = self.w.iteration for i, iteration in enumerate(self.trace.iteration): seg_id = self.trace.seg_id[i] coords = self._get_coords(iteration, seg_id) # The last timepoint of one iteration is the same as the first # timepoint of the last, so skip the last timepoint of each # iteration coords = coords[:-1] all_coords.append(coords) self.w.iteration = starting_iteration all_coords = numpy.concatenate(all_coords) with open(self.filename, 'w') as outfile: for i, frame in enumerate(all_coords): outfile.write("2\n") outfile.write("{0}\n".format(i)) outfile.write("SOD {0:9.5f} {1:9.5f} {2:9.5f}\n".format( float(frame[0,0]), float(frame[0,1]), float(frame[0,2]))) outfile.write("CLA {0:9.5f} {1:9.5f} {2:9.5f}\n".format( float(frame[1,0]), float(frame[1,1]), float(frame[1,2])))
c1e801798d3b7e8d4c9ba8a11f79ffa92bf182f5
test/test_logger.py
test/test_logger.py
# encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> """ from __future__ import print_function from __future__ import unicode_literals import logbook from pingparsing import ( set_logger, set_log_level, ) import pytest class Test_set_logger(object): @pytest.mark.parametrize(["value"], [ [True], [False], ]) def test_smoke(self, value): set_logger(value) class Test_set_log_level(object): @pytest.mark.parametrize(["value"], [ [logbook.CRITICAL], [logbook.ERROR], [logbook.WARNING], [logbook.NOTICE], [logbook.INFO], [logbook.DEBUG], [logbook.TRACE], [logbook.NOTSET], ]) def test_smoke(self, value): set_log_level(value) @pytest.mark.parametrize(["value", "expected"], [ [None, LookupError], ["unexpected", LookupError], ]) def test_exception(self, value, expected): with pytest.raises(expected): set_log_level(value)
Add test cases for the logger
Add test cases for the logger
Python
mit
thombashi/pingparsing,thombashi/pingparsing
Add test cases for the logger
# encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> """ from __future__ import print_function from __future__ import unicode_literals import logbook from pingparsing import ( set_logger, set_log_level, ) import pytest class Test_set_logger(object): @pytest.mark.parametrize(["value"], [ [True], [False], ]) def test_smoke(self, value): set_logger(value) class Test_set_log_level(object): @pytest.mark.parametrize(["value"], [ [logbook.CRITICAL], [logbook.ERROR], [logbook.WARNING], [logbook.NOTICE], [logbook.INFO], [logbook.DEBUG], [logbook.TRACE], [logbook.NOTSET], ]) def test_smoke(self, value): set_log_level(value) @pytest.mark.parametrize(["value", "expected"], [ [None, LookupError], ["unexpected", LookupError], ]) def test_exception(self, value, expected): with pytest.raises(expected): set_log_level(value)
<commit_before><commit_msg>Add test cases for the logger<commit_after>
# encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> """ from __future__ import print_function from __future__ import unicode_literals import logbook from pingparsing import ( set_logger, set_log_level, ) import pytest class Test_set_logger(object): @pytest.mark.parametrize(["value"], [ [True], [False], ]) def test_smoke(self, value): set_logger(value) class Test_set_log_level(object): @pytest.mark.parametrize(["value"], [ [logbook.CRITICAL], [logbook.ERROR], [logbook.WARNING], [logbook.NOTICE], [logbook.INFO], [logbook.DEBUG], [logbook.TRACE], [logbook.NOTSET], ]) def test_smoke(self, value): set_log_level(value) @pytest.mark.parametrize(["value", "expected"], [ [None, LookupError], ["unexpected", LookupError], ]) def test_exception(self, value, expected): with pytest.raises(expected): set_log_level(value)
Add test cases for the logger# encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> """ from __future__ import print_function from __future__ import unicode_literals import logbook from pingparsing import ( set_logger, set_log_level, ) import pytest class Test_set_logger(object): @pytest.mark.parametrize(["value"], [ [True], [False], ]) def test_smoke(self, value): set_logger(value) class Test_set_log_level(object): @pytest.mark.parametrize(["value"], [ [logbook.CRITICAL], [logbook.ERROR], [logbook.WARNING], [logbook.NOTICE], [logbook.INFO], [logbook.DEBUG], [logbook.TRACE], [logbook.NOTSET], ]) def test_smoke(self, value): set_log_level(value) @pytest.mark.parametrize(["value", "expected"], [ [None, LookupError], ["unexpected", LookupError], ]) def test_exception(self, value, expected): with pytest.raises(expected): set_log_level(value)
<commit_before><commit_msg>Add test cases for the logger<commit_after># encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> """ from __future__ import print_function from __future__ import unicode_literals import logbook from pingparsing import ( set_logger, set_log_level, ) import pytest class Test_set_logger(object): @pytest.mark.parametrize(["value"], [ [True], [False], ]) def test_smoke(self, value): set_logger(value) class Test_set_log_level(object): @pytest.mark.parametrize(["value"], [ [logbook.CRITICAL], [logbook.ERROR], [logbook.WARNING], [logbook.NOTICE], [logbook.INFO], [logbook.DEBUG], [logbook.TRACE], [logbook.NOTSET], ]) def test_smoke(self, value): set_log_level(value) @pytest.mark.parametrize(["value", "expected"], [ [None, LookupError], ["unexpected", LookupError], ]) def test_exception(self, value, expected): with pytest.raises(expected): set_log_level(value)
74e24debf55b003f1d56d35f4b040d91a0698e0a
example/under-sampling/plot_cluster_centroids.py
example/under-sampling/plot_cluster_centroids.py
""" ================= Cluster centroids ================= An illustration of the cluster centroids method. """ print(__doc__) import matplotlib.pyplot as plt import seaborn as sns sns.set() # Define some color for the plotting almost_black = '#262626' palette = sns.color_palette() from sklearn.datasets import make_classification from sklearn.decomposition import PCA from unbalanced_dataset.under_sampling import ClusterCentroids # Generate the dataset X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0, n_features=20, n_clusters_per_class=1, n_samples=5000, random_state=10) # Instanciate a PCA object for the sake of easy visualisation pca = PCA(n_components=2) # Fit and transform x to visualise inside a 2D feature space X_vis = pca.fit_transform(X) # Apply the random under-sampling cc = ClusterCentroids() X_resampled, y_resampled = cc.fit_transform(X, y) X_res_vis = pca.transform(X_resampled) # Two subplots, unpack the axes array immediately f, (ax1, ax2) = plt.subplots(1, 2) ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5, edgecolor=almost_black, facecolor=palette[0], linewidth=0.15) ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5, edgecolor=almost_black, facecolor=palette[2], linewidth=0.15) ax1.set_title('Original set') ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1], label="Class #0", alpha=.5, edgecolor=almost_black, facecolor=palette[0], linewidth=0.15) ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1], label="Class #1", alpha=.5, edgecolor=almost_black, facecolor=palette[2], linewidth=0.15) ax2.set_title('Cluster centroids') plt.show()
Add example for cluster centroids method
Add example for cluster centroids method
Python
mit
dvro/UnbalancedDataset,dvro/imbalanced-learn,fmfn/UnbalancedDataset,scikit-learn-contrib/imbalanced-learn,scikit-learn-contrib/imbalanced-learn,glemaitre/UnbalancedDataset,dvro/UnbalancedDataset,dvro/imbalanced-learn,fmfn/UnbalancedDataset,glemaitre/UnbalancedDataset
Add example for cluster centroids method
""" ================= Cluster centroids ================= An illustration of the cluster centroids method. """ print(__doc__) import matplotlib.pyplot as plt import seaborn as sns sns.set() # Define some color for the plotting almost_black = '#262626' palette = sns.color_palette() from sklearn.datasets import make_classification from sklearn.decomposition import PCA from unbalanced_dataset.under_sampling import ClusterCentroids # Generate the dataset X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0, n_features=20, n_clusters_per_class=1, n_samples=5000, random_state=10) # Instanciate a PCA object for the sake of easy visualisation pca = PCA(n_components=2) # Fit and transform x to visualise inside a 2D feature space X_vis = pca.fit_transform(X) # Apply the random under-sampling cc = ClusterCentroids() X_resampled, y_resampled = cc.fit_transform(X, y) X_res_vis = pca.transform(X_resampled) # Two subplots, unpack the axes array immediately f, (ax1, ax2) = plt.subplots(1, 2) ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5, edgecolor=almost_black, facecolor=palette[0], linewidth=0.15) ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5, edgecolor=almost_black, facecolor=palette[2], linewidth=0.15) ax1.set_title('Original set') ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1], label="Class #0", alpha=.5, edgecolor=almost_black, facecolor=palette[0], linewidth=0.15) ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1], label="Class #1", alpha=.5, edgecolor=almost_black, facecolor=palette[2], linewidth=0.15) ax2.set_title('Cluster centroids') plt.show()
<commit_before><commit_msg>Add example for cluster centroids method<commit_after>
""" ================= Cluster centroids ================= An illustration of the cluster centroids method. """ print(__doc__) import matplotlib.pyplot as plt import seaborn as sns sns.set() # Define some color for the plotting almost_black = '#262626' palette = sns.color_palette() from sklearn.datasets import make_classification from sklearn.decomposition import PCA from unbalanced_dataset.under_sampling import ClusterCentroids # Generate the dataset X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0, n_features=20, n_clusters_per_class=1, n_samples=5000, random_state=10) # Instanciate a PCA object for the sake of easy visualisation pca = PCA(n_components=2) # Fit and transform x to visualise inside a 2D feature space X_vis = pca.fit_transform(X) # Apply the random under-sampling cc = ClusterCentroids() X_resampled, y_resampled = cc.fit_transform(X, y) X_res_vis = pca.transform(X_resampled) # Two subplots, unpack the axes array immediately f, (ax1, ax2) = plt.subplots(1, 2) ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5, edgecolor=almost_black, facecolor=palette[0], linewidth=0.15) ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5, edgecolor=almost_black, facecolor=palette[2], linewidth=0.15) ax1.set_title('Original set') ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1], label="Class #0", alpha=.5, edgecolor=almost_black, facecolor=palette[0], linewidth=0.15) ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1], label="Class #1", alpha=.5, edgecolor=almost_black, facecolor=palette[2], linewidth=0.15) ax2.set_title('Cluster centroids') plt.show()
Add example for cluster centroids method""" ================= Cluster centroids ================= An illustration of the cluster centroids method. """ print(__doc__) import matplotlib.pyplot as plt import seaborn as sns sns.set() # Define some color for the plotting almost_black = '#262626' palette = sns.color_palette() from sklearn.datasets import make_classification from sklearn.decomposition import PCA from unbalanced_dataset.under_sampling import ClusterCentroids # Generate the dataset X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0, n_features=20, n_clusters_per_class=1, n_samples=5000, random_state=10) # Instanciate a PCA object for the sake of easy visualisation pca = PCA(n_components=2) # Fit and transform x to visualise inside a 2D feature space X_vis = pca.fit_transform(X) # Apply the random under-sampling cc = ClusterCentroids() X_resampled, y_resampled = cc.fit_transform(X, y) X_res_vis = pca.transform(X_resampled) # Two subplots, unpack the axes array immediately f, (ax1, ax2) = plt.subplots(1, 2) ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5, edgecolor=almost_black, facecolor=palette[0], linewidth=0.15) ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5, edgecolor=almost_black, facecolor=palette[2], linewidth=0.15) ax1.set_title('Original set') ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1], label="Class #0", alpha=.5, edgecolor=almost_black, facecolor=palette[0], linewidth=0.15) ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1], label="Class #1", alpha=.5, edgecolor=almost_black, facecolor=palette[2], linewidth=0.15) ax2.set_title('Cluster centroids') plt.show()
<commit_before><commit_msg>Add example for cluster centroids method<commit_after>""" ================= Cluster centroids ================= An illustration of the cluster centroids method. """ print(__doc__) import matplotlib.pyplot as plt import seaborn as sns sns.set() # Define some color for the plotting almost_black = '#262626' palette = sns.color_palette() from sklearn.datasets import make_classification from sklearn.decomposition import PCA from unbalanced_dataset.under_sampling import ClusterCentroids # Generate the dataset X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0, n_features=20, n_clusters_per_class=1, n_samples=5000, random_state=10) # Instanciate a PCA object for the sake of easy visualisation pca = PCA(n_components=2) # Fit and transform x to visualise inside a 2D feature space X_vis = pca.fit_transform(X) # Apply the random under-sampling cc = ClusterCentroids() X_resampled, y_resampled = cc.fit_transform(X, y) X_res_vis = pca.transform(X_resampled) # Two subplots, unpack the axes array immediately f, (ax1, ax2) = plt.subplots(1, 2) ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5, edgecolor=almost_black, facecolor=palette[0], linewidth=0.15) ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5, edgecolor=almost_black, facecolor=palette[2], linewidth=0.15) ax1.set_title('Original set') ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1], label="Class #0", alpha=.5, edgecolor=almost_black, facecolor=palette[0], linewidth=0.15) ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1], label="Class #1", alpha=.5, edgecolor=almost_black, facecolor=palette[2], linewidth=0.15) ax2.set_title('Cluster centroids') plt.show()
8fa9a54c9a5ee683fc9e9d361a4eb7affe5e83ed
game_of_life.py
game_of_life.py
#!/usr/bin/env python from curses import wrapper from time import sleep def enumerate_lines(matrix): on = '*' off = ' ' for i, row in enumerate(matrix): yield i, ''.join(on if v else off for v in row) def paint(stdscr, matrix): stdscr.clear() for i, line in enumerate_lines(matrix): stdscr.addstr(i, 0, line) stdscr.refresh() size = 50 m1 = [ [i == j or i == size - j for j in xrange(0, size + 1)] for i in xrange(0, size + 1) ] m2 = [ [i == size / 2 or j == size / 2 for j in xrange(0, size + 1)] for i in xrange(0, size + 1) ] def main(stdscr): for i in xrange(0,100): matrix = m1 if i % 2 else m2 paint(stdscr, matrix) sleep(0.5) stdscr.getkey() wrapper(main)
Add functions to paint game of life to screen
Add functions to paint game of life to screen
Python
mit
akud/stem-club-presentation,akud/stem-club-presentation,akud/stem-club-presentation
Add functions to paint game of life to screen
#!/usr/bin/env python from curses import wrapper from time import sleep def enumerate_lines(matrix): on = '*' off = ' ' for i, row in enumerate(matrix): yield i, ''.join(on if v else off for v in row) def paint(stdscr, matrix): stdscr.clear() for i, line in enumerate_lines(matrix): stdscr.addstr(i, 0, line) stdscr.refresh() size = 50 m1 = [ [i == j or i == size - j for j in xrange(0, size + 1)] for i in xrange(0, size + 1) ] m2 = [ [i == size / 2 or j == size / 2 for j in xrange(0, size + 1)] for i in xrange(0, size + 1) ] def main(stdscr): for i in xrange(0,100): matrix = m1 if i % 2 else m2 paint(stdscr, matrix) sleep(0.5) stdscr.getkey() wrapper(main)
<commit_before><commit_msg>Add functions to paint game of life to screen<commit_after>
#!/usr/bin/env python from curses import wrapper from time import sleep def enumerate_lines(matrix): on = '*' off = ' ' for i, row in enumerate(matrix): yield i, ''.join(on if v else off for v in row) def paint(stdscr, matrix): stdscr.clear() for i, line in enumerate_lines(matrix): stdscr.addstr(i, 0, line) stdscr.refresh() size = 50 m1 = [ [i == j or i == size - j for j in xrange(0, size + 1)] for i in xrange(0, size + 1) ] m2 = [ [i == size / 2 or j == size / 2 for j in xrange(0, size + 1)] for i in xrange(0, size + 1) ] def main(stdscr): for i in xrange(0,100): matrix = m1 if i % 2 else m2 paint(stdscr, matrix) sleep(0.5) stdscr.getkey() wrapper(main)
Add functions to paint game of life to screen#!/usr/bin/env python from curses import wrapper from time import sleep def enumerate_lines(matrix): on = '*' off = ' ' for i, row in enumerate(matrix): yield i, ''.join(on if v else off for v in row) def paint(stdscr, matrix): stdscr.clear() for i, line in enumerate_lines(matrix): stdscr.addstr(i, 0, line) stdscr.refresh() size = 50 m1 = [ [i == j or i == size - j for j in xrange(0, size + 1)] for i in xrange(0, size + 1) ] m2 = [ [i == size / 2 or j == size / 2 for j in xrange(0, size + 1)] for i in xrange(0, size + 1) ] def main(stdscr): for i in xrange(0,100): matrix = m1 if i % 2 else m2 paint(stdscr, matrix) sleep(0.5) stdscr.getkey() wrapper(main)
<commit_before><commit_msg>Add functions to paint game of life to screen<commit_after>#!/usr/bin/env python from curses import wrapper from time import sleep def enumerate_lines(matrix): on = '*' off = ' ' for i, row in enumerate(matrix): yield i, ''.join(on if v else off for v in row) def paint(stdscr, matrix): stdscr.clear() for i, line in enumerate_lines(matrix): stdscr.addstr(i, 0, line) stdscr.refresh() size = 50 m1 = [ [i == j or i == size - j for j in xrange(0, size + 1)] for i in xrange(0, size + 1) ] m2 = [ [i == size / 2 or j == size / 2 for j in xrange(0, size + 1)] for i in xrange(0, size + 1) ] def main(stdscr): for i in xrange(0,100): matrix = m1 if i % 2 else m2 paint(stdscr, matrix) sleep(0.5) stdscr.getkey() wrapper(main)
c98a744f5f436ae2c6266a7bb5d32173cfd0e4a9
scripts/socrata_scraper.py
scripts/socrata_scraper.py
#!/usr/bin/python3 """ This is a basic script that downloads the catalog data from the smcgov.org website and pulls out information about all the datasets. This is in python3 There is an optional download_all argument that will allow you to download all of the datasets individually and in their entirety. I have included this as a demonstration, but it should not be commonly used because it takes a while and beats up on the smcgov data portal, which you should avoid. """ import sys import json import argparse import collections import urllib.request URL = "https://data.smcgov.org/api/catalog?limit=999999999&only=datasets" def main(args): category_data = collections.defaultdict(list) domain_data = collections.defaultdict(list) data_downloads = [] datasets_with_location = [] with urllib.request.urlopen(URL) as raw_data: data = json.loads(raw_data.read().decode('utf-8')) for result in data['results']: categories = result['classification']['categories'] domain = result['classification']['domain_category'] if categories is None or categories == []: categories = ['NULL'] permalink = result['permalink'] data_downloads.append('{}.json'.format(permalink)) domain_data[domain].append(permalink) for category in categories: category_data[category].append(permalink) if args.download_all: for download_url in data_downloads: with urllib.request.urlopen(download_url) as dataset_file: print('Downloading {}'.format(download_url)) dataset = json.loads(dataset_file.read().decode('utf-8')) if len(dataset) < 1: continue if 'location_1' in dataset[0].keys(): # Our best guess on which datasets have location info. datasets_with_location.append(download_url) if args.download_all: print('Datasets with location_1 key') print(datasets_with_location) print('----------------------------------------------------') print('Number of Datasets by Category') for key, values in category_data.items(): print(key, len(values)) print('----------------------------------------------------') print('Number of Datasets by Domain') for key, values in domain_data.items(): print(key, len(values)) if __name__=='__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--download_all', help='Download all datasets', action='store_true') args = parser.parse_args() main(args=args)
Add a script that scrapes the Socrata catalog, just in case we need that in another format
Add a script that scrapes the Socrata catalog, just in case we need that in another format
Python
mit
opensmc/service-locator,opensmc/service-locator,opensmc/service-locator
Add a script that scrapes the Socrata catalog, just in case we need that in another format
#!/usr/bin/python3 """ This is a basic script that downloads the catalog data from the smcgov.org website and pulls out information about all the datasets. This is in python3 There is an optional download_all argument that will allow you to download all of the datasets individually and in their entirety. I have included this as a demonstration, but it should not be commonly used because it takes a while and beats up on the smcgov data portal, which you should avoid. """ import sys import json import argparse import collections import urllib.request URL = "https://data.smcgov.org/api/catalog?limit=999999999&only=datasets" def main(args): category_data = collections.defaultdict(list) domain_data = collections.defaultdict(list) data_downloads = [] datasets_with_location = [] with urllib.request.urlopen(URL) as raw_data: data = json.loads(raw_data.read().decode('utf-8')) for result in data['results']: categories = result['classification']['categories'] domain = result['classification']['domain_category'] if categories is None or categories == []: categories = ['NULL'] permalink = result['permalink'] data_downloads.append('{}.json'.format(permalink)) domain_data[domain].append(permalink) for category in categories: category_data[category].append(permalink) if args.download_all: for download_url in data_downloads: with urllib.request.urlopen(download_url) as dataset_file: print('Downloading {}'.format(download_url)) dataset = json.loads(dataset_file.read().decode('utf-8')) if len(dataset) < 1: continue if 'location_1' in dataset[0].keys(): # Our best guess on which datasets have location info. datasets_with_location.append(download_url) if args.download_all: print('Datasets with location_1 key') print(datasets_with_location) print('----------------------------------------------------') print('Number of Datasets by Category') for key, values in category_data.items(): print(key, len(values)) print('----------------------------------------------------') print('Number of Datasets by Domain') for key, values in domain_data.items(): print(key, len(values)) if __name__=='__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--download_all', help='Download all datasets', action='store_true') args = parser.parse_args() main(args=args)
<commit_before><commit_msg>Add a script that scrapes the Socrata catalog, just in case we need that in another format<commit_after>
#!/usr/bin/python3 """ This is a basic script that downloads the catalog data from the smcgov.org website and pulls out information about all the datasets. This is in python3 There is an optional download_all argument that will allow you to download all of the datasets individually and in their entirety. I have included this as a demonstration, but it should not be commonly used because it takes a while and beats up on the smcgov data portal, which you should avoid. """ import sys import json import argparse import collections import urllib.request URL = "https://data.smcgov.org/api/catalog?limit=999999999&only=datasets" def main(args): category_data = collections.defaultdict(list) domain_data = collections.defaultdict(list) data_downloads = [] datasets_with_location = [] with urllib.request.urlopen(URL) as raw_data: data = json.loads(raw_data.read().decode('utf-8')) for result in data['results']: categories = result['classification']['categories'] domain = result['classification']['domain_category'] if categories is None or categories == []: categories = ['NULL'] permalink = result['permalink'] data_downloads.append('{}.json'.format(permalink)) domain_data[domain].append(permalink) for category in categories: category_data[category].append(permalink) if args.download_all: for download_url in data_downloads: with urllib.request.urlopen(download_url) as dataset_file: print('Downloading {}'.format(download_url)) dataset = json.loads(dataset_file.read().decode('utf-8')) if len(dataset) < 1: continue if 'location_1' in dataset[0].keys(): # Our best guess on which datasets have location info. datasets_with_location.append(download_url) if args.download_all: print('Datasets with location_1 key') print(datasets_with_location) print('----------------------------------------------------') print('Number of Datasets by Category') for key, values in category_data.items(): print(key, len(values)) print('----------------------------------------------------') print('Number of Datasets by Domain') for key, values in domain_data.items(): print(key, len(values)) if __name__=='__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--download_all', help='Download all datasets', action='store_true') args = parser.parse_args() main(args=args)
Add a script that scrapes the Socrata catalog, just in case we need that in another format#!/usr/bin/python3 """ This is a basic script that downloads the catalog data from the smcgov.org website and pulls out information about all the datasets. This is in python3 There is an optional download_all argument that will allow you to download all of the datasets individually and in their entirety. I have included this as a demonstration, but it should not be commonly used because it takes a while and beats up on the smcgov data portal, which you should avoid. """ import sys import json import argparse import collections import urllib.request URL = "https://data.smcgov.org/api/catalog?limit=999999999&only=datasets" def main(args): category_data = collections.defaultdict(list) domain_data = collections.defaultdict(list) data_downloads = [] datasets_with_location = [] with urllib.request.urlopen(URL) as raw_data: data = json.loads(raw_data.read().decode('utf-8')) for result in data['results']: categories = result['classification']['categories'] domain = result['classification']['domain_category'] if categories is None or categories == []: categories = ['NULL'] permalink = result['permalink'] data_downloads.append('{}.json'.format(permalink)) domain_data[domain].append(permalink) for category in categories: category_data[category].append(permalink) if args.download_all: for download_url in data_downloads: with urllib.request.urlopen(download_url) as dataset_file: print('Downloading {}'.format(download_url)) dataset = json.loads(dataset_file.read().decode('utf-8')) if len(dataset) < 1: continue if 'location_1' in dataset[0].keys(): # Our best guess on which datasets have location info. datasets_with_location.append(download_url) if args.download_all: print('Datasets with location_1 key') print(datasets_with_location) print('----------------------------------------------------') print('Number of Datasets by Category') for key, values in category_data.items(): print(key, len(values)) print('----------------------------------------------------') print('Number of Datasets by Domain') for key, values in domain_data.items(): print(key, len(values)) if __name__=='__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--download_all', help='Download all datasets', action='store_true') args = parser.parse_args() main(args=args)
<commit_before><commit_msg>Add a script that scrapes the Socrata catalog, just in case we need that in another format<commit_after>#!/usr/bin/python3 """ This is a basic script that downloads the catalog data from the smcgov.org website and pulls out information about all the datasets. This is in python3 There is an optional download_all argument that will allow you to download all of the datasets individually and in their entirety. I have included this as a demonstration, but it should not be commonly used because it takes a while and beats up on the smcgov data portal, which you should avoid. """ import sys import json import argparse import collections import urllib.request URL = "https://data.smcgov.org/api/catalog?limit=999999999&only=datasets" def main(args): category_data = collections.defaultdict(list) domain_data = collections.defaultdict(list) data_downloads = [] datasets_with_location = [] with urllib.request.urlopen(URL) as raw_data: data = json.loads(raw_data.read().decode('utf-8')) for result in data['results']: categories = result['classification']['categories'] domain = result['classification']['domain_category'] if categories is None or categories == []: categories = ['NULL'] permalink = result['permalink'] data_downloads.append('{}.json'.format(permalink)) domain_data[domain].append(permalink) for category in categories: category_data[category].append(permalink) if args.download_all: for download_url in data_downloads: with urllib.request.urlopen(download_url) as dataset_file: print('Downloading {}'.format(download_url)) dataset = json.loads(dataset_file.read().decode('utf-8')) if len(dataset) < 1: continue if 'location_1' in dataset[0].keys(): # Our best guess on which datasets have location info. datasets_with_location.append(download_url) if args.download_all: print('Datasets with location_1 key') print(datasets_with_location) print('----------------------------------------------------') print('Number of Datasets by Category') for key, values in category_data.items(): print(key, len(values)) print('----------------------------------------------------') print('Number of Datasets by Domain') for key, values in domain_data.items(): print(key, len(values)) if __name__=='__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--download_all', help='Download all datasets', action='store_true') args = parser.parse_args() main(args=args)
0c11d2740e561586bb4f9d2b67bda2ccc87e146e
ixdjango/management/commands/newrelic_notify_deploy.py
ixdjango/management/commands/newrelic_notify_deploy.py
""" Management command to enable New Relic notification of deployments .. moduleauthor:: Infoxchange Development Team <development@infoxchange.net.au> """ import pwd import os from subprocess import Popen, PIPE from urllib import urlencode from httplib2 import Http from django.conf import settings from django.core.management.base import NoArgsCommand import newrelic.agent class Command(NoArgsCommand): """ Loads the fixtures contained inside IX_FIXTURES setting variable. See http://redmine.office.infoxchange.net.au/issues/8376 """ URL = 'https://rpm.newrelic.com/deployments.xml' def handle_noargs(self, **options): newrelic.agent.initialize( settings.NEW_RELIC_CONFIG, settings.NEW_RELIC_ENV ) config = newrelic.agent.global_settings() if not config.monitor_mode: return # get the current git version git = Popen(('git', 'describe'), stdout=PIPE) ver, _ = git.communicate() ver = ver.strip() # get the current user user = pwd.getpwuid(os.getuid()) headers = { 'x-api-key': config.license_key } post = { 'deployment[app_name]': config.app_name, 'deployment[revision]': ver, 'deployment[user]': '%s (%s)' % (user.pw_gecos, user.pw_name), } print "Informing New Relic...", # post this data http = Http() response, _ = http.request(self.URL, 'POST', headers=headers, body=urlencode(post)) print response['status']
Add new command to notify New Relic of deployment
Add new command to notify New Relic of deployment
Python
mit
infoxchange/ixdjango
Add new command to notify New Relic of deployment
""" Management command to enable New Relic notification of deployments .. moduleauthor:: Infoxchange Development Team <development@infoxchange.net.au> """ import pwd import os from subprocess import Popen, PIPE from urllib import urlencode from httplib2 import Http from django.conf import settings from django.core.management.base import NoArgsCommand import newrelic.agent class Command(NoArgsCommand): """ Loads the fixtures contained inside IX_FIXTURES setting variable. See http://redmine.office.infoxchange.net.au/issues/8376 """ URL = 'https://rpm.newrelic.com/deployments.xml' def handle_noargs(self, **options): newrelic.agent.initialize( settings.NEW_RELIC_CONFIG, settings.NEW_RELIC_ENV ) config = newrelic.agent.global_settings() if not config.monitor_mode: return # get the current git version git = Popen(('git', 'describe'), stdout=PIPE) ver, _ = git.communicate() ver = ver.strip() # get the current user user = pwd.getpwuid(os.getuid()) headers = { 'x-api-key': config.license_key } post = { 'deployment[app_name]': config.app_name, 'deployment[revision]': ver, 'deployment[user]': '%s (%s)' % (user.pw_gecos, user.pw_name), } print "Informing New Relic...", # post this data http = Http() response, _ = http.request(self.URL, 'POST', headers=headers, body=urlencode(post)) print response['status']
<commit_before><commit_msg>Add new command to notify New Relic of deployment<commit_after>
""" Management command to enable New Relic notification of deployments .. moduleauthor:: Infoxchange Development Team <development@infoxchange.net.au> """ import pwd import os from subprocess import Popen, PIPE from urllib import urlencode from httplib2 import Http from django.conf import settings from django.core.management.base import NoArgsCommand import newrelic.agent class Command(NoArgsCommand): """ Loads the fixtures contained inside IX_FIXTURES setting variable. See http://redmine.office.infoxchange.net.au/issues/8376 """ URL = 'https://rpm.newrelic.com/deployments.xml' def handle_noargs(self, **options): newrelic.agent.initialize( settings.NEW_RELIC_CONFIG, settings.NEW_RELIC_ENV ) config = newrelic.agent.global_settings() if not config.monitor_mode: return # get the current git version git = Popen(('git', 'describe'), stdout=PIPE) ver, _ = git.communicate() ver = ver.strip() # get the current user user = pwd.getpwuid(os.getuid()) headers = { 'x-api-key': config.license_key } post = { 'deployment[app_name]': config.app_name, 'deployment[revision]': ver, 'deployment[user]': '%s (%s)' % (user.pw_gecos, user.pw_name), } print "Informing New Relic...", # post this data http = Http() response, _ = http.request(self.URL, 'POST', headers=headers, body=urlencode(post)) print response['status']
Add new command to notify New Relic of deployment""" Management command to enable New Relic notification of deployments .. moduleauthor:: Infoxchange Development Team <development@infoxchange.net.au> """ import pwd import os from subprocess import Popen, PIPE from urllib import urlencode from httplib2 import Http from django.conf import settings from django.core.management.base import NoArgsCommand import newrelic.agent class Command(NoArgsCommand): """ Loads the fixtures contained inside IX_FIXTURES setting variable. See http://redmine.office.infoxchange.net.au/issues/8376 """ URL = 'https://rpm.newrelic.com/deployments.xml' def handle_noargs(self, **options): newrelic.agent.initialize( settings.NEW_RELIC_CONFIG, settings.NEW_RELIC_ENV ) config = newrelic.agent.global_settings() if not config.monitor_mode: return # get the current git version git = Popen(('git', 'describe'), stdout=PIPE) ver, _ = git.communicate() ver = ver.strip() # get the current user user = pwd.getpwuid(os.getuid()) headers = { 'x-api-key': config.license_key } post = { 'deployment[app_name]': config.app_name, 'deployment[revision]': ver, 'deployment[user]': '%s (%s)' % (user.pw_gecos, user.pw_name), } print "Informing New Relic...", # post this data http = Http() response, _ = http.request(self.URL, 'POST', headers=headers, body=urlencode(post)) print response['status']
<commit_before><commit_msg>Add new command to notify New Relic of deployment<commit_after>""" Management command to enable New Relic notification of deployments .. moduleauthor:: Infoxchange Development Team <development@infoxchange.net.au> """ import pwd import os from subprocess import Popen, PIPE from urllib import urlencode from httplib2 import Http from django.conf import settings from django.core.management.base import NoArgsCommand import newrelic.agent class Command(NoArgsCommand): """ Loads the fixtures contained inside IX_FIXTURES setting variable. See http://redmine.office.infoxchange.net.au/issues/8376 """ URL = 'https://rpm.newrelic.com/deployments.xml' def handle_noargs(self, **options): newrelic.agent.initialize( settings.NEW_RELIC_CONFIG, settings.NEW_RELIC_ENV ) config = newrelic.agent.global_settings() if not config.monitor_mode: return # get the current git version git = Popen(('git', 'describe'), stdout=PIPE) ver, _ = git.communicate() ver = ver.strip() # get the current user user = pwd.getpwuid(os.getuid()) headers = { 'x-api-key': config.license_key } post = { 'deployment[app_name]': config.app_name, 'deployment[revision]': ver, 'deployment[user]': '%s (%s)' % (user.pw_gecos, user.pw_name), } print "Informing New Relic...", # post this data http = Http() response, _ = http.request(self.URL, 'POST', headers=headers, body=urlencode(post)) print response['status']
2e2ad49c7ada145b5a4a81bd8941cf5e72d2d81b
rst2pdf/tests/input/test_180.py
rst2pdf/tests/input/test_180.py
# -*- coding: utf-8 -*- from reportlab.platypus import SimpleDocTemplate from reportlab.platypus.paragraph import Paragraph from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.colors import Color from reportlab.platypus.flowables import _listWrapOn, _FUZZ from wordaxe.rl.NewParagraph import Paragraph from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet def go(): styles = getSampleStyleSheet() style=styles['Normal'] p1 = Paragraph('This is a paragraph', style ) print p1.wrap(500,701) print p1._cache['avail'] print len(p1.split(500,701)) print len(p1.split(500,700)) go()
Test case for wordaxe bug
Test case for wordaxe bug
Python
mit
thomaspurchas/rst2pdf,thomaspurchas/rst2pdf
Test case for wordaxe bug
# -*- coding: utf-8 -*- from reportlab.platypus import SimpleDocTemplate from reportlab.platypus.paragraph import Paragraph from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.colors import Color from reportlab.platypus.flowables import _listWrapOn, _FUZZ from wordaxe.rl.NewParagraph import Paragraph from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet def go(): styles = getSampleStyleSheet() style=styles['Normal'] p1 = Paragraph('This is a paragraph', style ) print p1.wrap(500,701) print p1._cache['avail'] print len(p1.split(500,701)) print len(p1.split(500,700)) go()
<commit_before><commit_msg>Test case for wordaxe bug<commit_after>
# -*- coding: utf-8 -*- from reportlab.platypus import SimpleDocTemplate from reportlab.platypus.paragraph import Paragraph from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.colors import Color from reportlab.platypus.flowables import _listWrapOn, _FUZZ from wordaxe.rl.NewParagraph import Paragraph from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet def go(): styles = getSampleStyleSheet() style=styles['Normal'] p1 = Paragraph('This is a paragraph', style ) print p1.wrap(500,701) print p1._cache['avail'] print len(p1.split(500,701)) print len(p1.split(500,700)) go()
Test case for wordaxe bug# -*- coding: utf-8 -*- from reportlab.platypus import SimpleDocTemplate from reportlab.platypus.paragraph import Paragraph from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.colors import Color from reportlab.platypus.flowables import _listWrapOn, _FUZZ from wordaxe.rl.NewParagraph import Paragraph from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet def go(): styles = getSampleStyleSheet() style=styles['Normal'] p1 = Paragraph('This is a paragraph', style ) print p1.wrap(500,701) print p1._cache['avail'] print len(p1.split(500,701)) print len(p1.split(500,700)) go()
<commit_before><commit_msg>Test case for wordaxe bug<commit_after># -*- coding: utf-8 -*- from reportlab.platypus import SimpleDocTemplate from reportlab.platypus.paragraph import Paragraph from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.colors import Color from reportlab.platypus.flowables import _listWrapOn, _FUZZ from wordaxe.rl.NewParagraph import Paragraph from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet def go(): styles = getSampleStyleSheet() style=styles['Normal'] p1 = Paragraph('This is a paragraph', style ) print p1.wrap(500,701) print p1._cache['avail'] print len(p1.split(500,701)) print len(p1.split(500,700)) go()
c851501cc8149685a9e9c023aa200b92c17a9078
pida_fields.py
pida_fields.py
def decode_name_fields(ida_fields): i = -1 stop = len(ida_fields) while True: i += 1 if i == stop: break count = ord(ida_fields[i]) - 1 if count == 0: continue i += 1 yield ida_fields[i:i + count] i += count - 1
Add decoder ida fields name
Add decoder ida fields name
Python
mit
goodwinxp/ATFGenerator,goodwinxp/ATFGenerator,goodwinxp/ATFGenerator
Add decoder ida fields name
def decode_name_fields(ida_fields): i = -1 stop = len(ida_fields) while True: i += 1 if i == stop: break count = ord(ida_fields[i]) - 1 if count == 0: continue i += 1 yield ida_fields[i:i + count] i += count - 1
<commit_before><commit_msg>Add decoder ida fields name<commit_after>
def decode_name_fields(ida_fields): i = -1 stop = len(ida_fields) while True: i += 1 if i == stop: break count = ord(ida_fields[i]) - 1 if count == 0: continue i += 1 yield ida_fields[i:i + count] i += count - 1
Add decoder ida fields namedef decode_name_fields(ida_fields): i = -1 stop = len(ida_fields) while True: i += 1 if i == stop: break count = ord(ida_fields[i]) - 1 if count == 0: continue i += 1 yield ida_fields[i:i + count] i += count - 1
<commit_before><commit_msg>Add decoder ida fields name<commit_after>def decode_name_fields(ida_fields): i = -1 stop = len(ida_fields) while True: i += 1 if i == stop: break count = ord(ida_fields[i]) - 1 if count == 0: continue i += 1 yield ida_fields[i:i + count] i += count - 1
e869c7ef9e3d19da4c98cda57b5e22fb5a35cba5
tests/test_validators.py
tests/test_validators.py
""" test_validators ~~~~~~~~~~~~~~ Unittests for bundled validators. :copyright: 2007-2008 by James Crasta, Thomas Johansson. :license: MIT, see LICENSE.txt for details. """ from py.test import raises from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address class DummyForm(object): pass class DummyField(object): def __init__(self, data): self.data = data form = DummyForm() def test_email(): assert email(form, DummyField('foo@bar.dk')) == None assert email(form, DummyField('123@bar.dk')) == None assert email(form, DummyField('foo@456.dk')) == None assert email(form, DummyField('foo@bar456.info')) == None raises(ValidationError, email, form, DummyField('foo')) == None raises(ValidationError, email, form, DummyField('bar.dk')) == None raises(ValidationError, email, form, DummyField('foo@')) == None raises(ValidationError, email, form, DummyField('@bar.dk')) == None raises(ValidationError, email, form, DummyField('foo@bar')) == None raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None def test_length(): field = DummyField('foobar') assert length(min=2, max=6)(form, field) == None raises(ValidationError, length(min=7), form, field) raises(ValidationError, length(max=5), form, field) def test_url(): assert url()(form, DummyField('http://foobar.dk')) == None assert url()(form, DummyField('http://foobar.dk/')) == None assert url()(form, DummyField('http://foobar.dk/foobar')) == None raises(ValidationError, url(), form, DummyField('http://foobar')) raises(ValidationError, url(), form, DummyField('foobar.dk')) raises(ValidationError, url(), form, DummyField('http://foobar.12')) def test_not_empty(): assert not_empty()(form, DummyField('foobar')) == None raises(ValidationError, not_empty(), form, DummyField('')) raises(ValidationError, not_empty(), form, DummyField(' ')) def test_ip_address(): assert ip_address(form, DummyField('127.0.0.1')) == None raises(ValidationError, ip_address, form, DummyField('abc.0.0.1')) raises(ValidationError, ip_address, form, DummyField('1278.0.0.1')) raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
Add first basic unittests using py.test
Add first basic unittests using py.test
Python
bsd-3-clause
Khan/wtforms
Add first basic unittests using py.test
""" test_validators ~~~~~~~~~~~~~~ Unittests for bundled validators. :copyright: 2007-2008 by James Crasta, Thomas Johansson. :license: MIT, see LICENSE.txt for details. """ from py.test import raises from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address class DummyForm(object): pass class DummyField(object): def __init__(self, data): self.data = data form = DummyForm() def test_email(): assert email(form, DummyField('foo@bar.dk')) == None assert email(form, DummyField('123@bar.dk')) == None assert email(form, DummyField('foo@456.dk')) == None assert email(form, DummyField('foo@bar456.info')) == None raises(ValidationError, email, form, DummyField('foo')) == None raises(ValidationError, email, form, DummyField('bar.dk')) == None raises(ValidationError, email, form, DummyField('foo@')) == None raises(ValidationError, email, form, DummyField('@bar.dk')) == None raises(ValidationError, email, form, DummyField('foo@bar')) == None raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None def test_length(): field = DummyField('foobar') assert length(min=2, max=6)(form, field) == None raises(ValidationError, length(min=7), form, field) raises(ValidationError, length(max=5), form, field) def test_url(): assert url()(form, DummyField('http://foobar.dk')) == None assert url()(form, DummyField('http://foobar.dk/')) == None assert url()(form, DummyField('http://foobar.dk/foobar')) == None raises(ValidationError, url(), form, DummyField('http://foobar')) raises(ValidationError, url(), form, DummyField('foobar.dk')) raises(ValidationError, url(), form, DummyField('http://foobar.12')) def test_not_empty(): assert not_empty()(form, DummyField('foobar')) == None raises(ValidationError, not_empty(), form, DummyField('')) raises(ValidationError, not_empty(), form, DummyField(' ')) def test_ip_address(): assert ip_address(form, DummyField('127.0.0.1')) == None raises(ValidationError, ip_address, form, DummyField('abc.0.0.1')) raises(ValidationError, ip_address, form, DummyField('1278.0.0.1')) raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
<commit_before><commit_msg>Add first basic unittests using py.test<commit_after>
""" test_validators ~~~~~~~~~~~~~~ Unittests for bundled validators. :copyright: 2007-2008 by James Crasta, Thomas Johansson. :license: MIT, see LICENSE.txt for details. """ from py.test import raises from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address class DummyForm(object): pass class DummyField(object): def __init__(self, data): self.data = data form = DummyForm() def test_email(): assert email(form, DummyField('foo@bar.dk')) == None assert email(form, DummyField('123@bar.dk')) == None assert email(form, DummyField('foo@456.dk')) == None assert email(form, DummyField('foo@bar456.info')) == None raises(ValidationError, email, form, DummyField('foo')) == None raises(ValidationError, email, form, DummyField('bar.dk')) == None raises(ValidationError, email, form, DummyField('foo@')) == None raises(ValidationError, email, form, DummyField('@bar.dk')) == None raises(ValidationError, email, form, DummyField('foo@bar')) == None raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None def test_length(): field = DummyField('foobar') assert length(min=2, max=6)(form, field) == None raises(ValidationError, length(min=7), form, field) raises(ValidationError, length(max=5), form, field) def test_url(): assert url()(form, DummyField('http://foobar.dk')) == None assert url()(form, DummyField('http://foobar.dk/')) == None assert url()(form, DummyField('http://foobar.dk/foobar')) == None raises(ValidationError, url(), form, DummyField('http://foobar')) raises(ValidationError, url(), form, DummyField('foobar.dk')) raises(ValidationError, url(), form, DummyField('http://foobar.12')) def test_not_empty(): assert not_empty()(form, DummyField('foobar')) == None raises(ValidationError, not_empty(), form, DummyField('')) raises(ValidationError, not_empty(), form, DummyField(' ')) def test_ip_address(): assert ip_address(form, DummyField('127.0.0.1')) == None raises(ValidationError, ip_address, form, DummyField('abc.0.0.1')) raises(ValidationError, ip_address, form, DummyField('1278.0.0.1')) raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
Add first basic unittests using py.test""" test_validators ~~~~~~~~~~~~~~ Unittests for bundled validators. :copyright: 2007-2008 by James Crasta, Thomas Johansson. :license: MIT, see LICENSE.txt for details. """ from py.test import raises from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address class DummyForm(object): pass class DummyField(object): def __init__(self, data): self.data = data form = DummyForm() def test_email(): assert email(form, DummyField('foo@bar.dk')) == None assert email(form, DummyField('123@bar.dk')) == None assert email(form, DummyField('foo@456.dk')) == None assert email(form, DummyField('foo@bar456.info')) == None raises(ValidationError, email, form, DummyField('foo')) == None raises(ValidationError, email, form, DummyField('bar.dk')) == None raises(ValidationError, email, form, DummyField('foo@')) == None raises(ValidationError, email, form, DummyField('@bar.dk')) == None raises(ValidationError, email, form, DummyField('foo@bar')) == None raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None def test_length(): field = DummyField('foobar') assert length(min=2, max=6)(form, field) == None raises(ValidationError, length(min=7), form, field) raises(ValidationError, length(max=5), form, field) def test_url(): assert url()(form, DummyField('http://foobar.dk')) == None assert url()(form, DummyField('http://foobar.dk/')) == None assert url()(form, DummyField('http://foobar.dk/foobar')) == None raises(ValidationError, url(), form, DummyField('http://foobar')) raises(ValidationError, url(), form, DummyField('foobar.dk')) raises(ValidationError, url(), form, DummyField('http://foobar.12')) def test_not_empty(): assert not_empty()(form, DummyField('foobar')) == None raises(ValidationError, not_empty(), form, DummyField('')) raises(ValidationError, not_empty(), form, DummyField(' ')) def test_ip_address(): assert ip_address(form, DummyField('127.0.0.1')) == None raises(ValidationError, ip_address, form, DummyField('abc.0.0.1')) raises(ValidationError, ip_address, form, DummyField('1278.0.0.1')) raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
<commit_before><commit_msg>Add first basic unittests using py.test<commit_after>""" test_validators ~~~~~~~~~~~~~~ Unittests for bundled validators. :copyright: 2007-2008 by James Crasta, Thomas Johansson. :license: MIT, see LICENSE.txt for details. """ from py.test import raises from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address class DummyForm(object): pass class DummyField(object): def __init__(self, data): self.data = data form = DummyForm() def test_email(): assert email(form, DummyField('foo@bar.dk')) == None assert email(form, DummyField('123@bar.dk')) == None assert email(form, DummyField('foo@456.dk')) == None assert email(form, DummyField('foo@bar456.info')) == None raises(ValidationError, email, form, DummyField('foo')) == None raises(ValidationError, email, form, DummyField('bar.dk')) == None raises(ValidationError, email, form, DummyField('foo@')) == None raises(ValidationError, email, form, DummyField('@bar.dk')) == None raises(ValidationError, email, form, DummyField('foo@bar')) == None raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None def test_length(): field = DummyField('foobar') assert length(min=2, max=6)(form, field) == None raises(ValidationError, length(min=7), form, field) raises(ValidationError, length(max=5), form, field) def test_url(): assert url()(form, DummyField('http://foobar.dk')) == None assert url()(form, DummyField('http://foobar.dk/')) == None assert url()(form, DummyField('http://foobar.dk/foobar')) == None raises(ValidationError, url(), form, DummyField('http://foobar')) raises(ValidationError, url(), form, DummyField('foobar.dk')) raises(ValidationError, url(), form, DummyField('http://foobar.12')) def test_not_empty(): assert not_empty()(form, DummyField('foobar')) == None raises(ValidationError, not_empty(), form, DummyField('')) raises(ValidationError, not_empty(), form, DummyField(' ')) def test_ip_address(): assert ip_address(form, DummyField('127.0.0.1')) == None raises(ValidationError, ip_address, form, DummyField('abc.0.0.1')) raises(ValidationError, ip_address, form, DummyField('1278.0.0.1')) raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
11efa5583bbeeee7c7823264f6f73715ea81edc0
luigi/tests/ontologies/eco_test.py
luigi/tests/ontologies/eco_test.py
# -*- coding: utf-8 -*- """ Copyright [2009-2017] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from ontologies import eco def test_can_load_all_eco_terms(): source = eco.TermSources( quickgo_file='data/quickgo/rna.gpa' ) assert len(list(eco.to_load(source))) == 6
Add trivial test for ECO fetching
Add trivial test for ECO fetching
Python
apache-2.0
RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline
Add trivial test for ECO fetching
# -*- coding: utf-8 -*- """ Copyright [2009-2017] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from ontologies import eco def test_can_load_all_eco_terms(): source = eco.TermSources( quickgo_file='data/quickgo/rna.gpa' ) assert len(list(eco.to_load(source))) == 6
<commit_before><commit_msg>Add trivial test for ECO fetching<commit_after>
# -*- coding: utf-8 -*- """ Copyright [2009-2017] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from ontologies import eco def test_can_load_all_eco_terms(): source = eco.TermSources( quickgo_file='data/quickgo/rna.gpa' ) assert len(list(eco.to_load(source))) == 6
Add trivial test for ECO fetching# -*- coding: utf-8 -*- """ Copyright [2009-2017] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from ontologies import eco def test_can_load_all_eco_terms(): source = eco.TermSources( quickgo_file='data/quickgo/rna.gpa' ) assert len(list(eco.to_load(source))) == 6
<commit_before><commit_msg>Add trivial test for ECO fetching<commit_after># -*- coding: utf-8 -*- """ Copyright [2009-2017] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from ontologies import eco def test_can_load_all_eco_terms(): source = eco.TermSources( quickgo_file='data/quickgo/rna.gpa' ) assert len(list(eco.to_load(source))) == 6
4fdba8a1a5a2123843cc9eefd8949fb8996f59b2
telemetry/telemetry/unittest/run_chromeos_tests.py
telemetry/telemetry/unittest/run_chromeos_tests.py
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import sys from telemetry.unittest import gtest_progress_reporter from telemetry.unittest import run_tests from telemetry.core import util def RunTestsForChromeOS(browser_type, unit_tests, perf_tests): stream = _LoggingOutputStream() error_string = '' logging.info('Running telemetry unit tests with browser_type "%s".' % browser_type) ret = _RunOneSetOfTests(browser_type, 'telemetry', os.path.join('telemetry', 'telemetry'), unit_tests, stream) if ret: error_string += 'The unit tests failed.\n' logging.info('Running telemetry perf tests with browser_type "%s".' % browser_type) ret = _RunOneSetOfTests(browser_type, 'perf', 'perf', perf_tests, stream) if ret: error_string = 'The perf tests failed.\n' return error_string def _RunOneSetOfTests(browser_type, root_dir, sub_dir, tests, stream): top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', root_dir) sub_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', sub_dir) sys.path.append(top_level_dir) output_formatters = [gtest_progress_reporter.GTestProgressReporter(stream)] run_tests.config = run_tests.Config(top_level_dir, [sub_dir], output_formatters) return run_tests.RunTestsCommand.main(['--browser', browser_type] + tests) class _LoggingOutputStream(object): def __init__(self): self._buffer = [] def write(self, s): """Buffer a string write. Log it when we encounter a newline.""" if '\n' in s: segments = s.split('\n') segments[0] = ''.join(self._buffer + [segments[0]]) log_level = logging.getLogger().getEffectiveLevel() try: # TODO(dtu): We need this because of crbug.com/394571 logging.getLogger().setLevel(logging.INFO) for line in segments[:-1]: logging.info(line) finally: logging.getLogger().setLevel(log_level) self._buffer = [segments[-1]] else: self._buffer.append(s) def flush(self): # pylint: disable=W0612 pass
Add a wrapper for ChromeOS to call into telemetry.
Add a wrapper for ChromeOS to call into telemetry. R=dtu@chromium.org, achuith@chromium.org BUG=402172, 388256 Review URL: https://codereview.chromium.org/682953002 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#301557}
Python
bsd-3-clause
SummerLW/Perf-Insight-Report,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult-csm,catapult-project/catapult,benschmaus/catapult,benschmaus/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,catapult-project/catapult,SummerLW/Perf-Insight-Report,sahiljain/catapult,catapult-project/catapult,SummerLW/Perf-Insight-Report,sahiljain/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,sahiljain/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult-csm,benschmaus/catapult,sahiljain/catapult,benschmaus/catapult,benschmaus/catapult,sahiljain/catapult,benschmaus/catapult,sahiljain/catapult,catapult-project/catapult-csm,benschmaus/catapult
Add a wrapper for ChromeOS to call into telemetry. R=dtu@chromium.org, achuith@chromium.org BUG=402172, 388256 Review URL: https://codereview.chromium.org/682953002 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#301557}
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import sys from telemetry.unittest import gtest_progress_reporter from telemetry.unittest import run_tests from telemetry.core import util def RunTestsForChromeOS(browser_type, unit_tests, perf_tests): stream = _LoggingOutputStream() error_string = '' logging.info('Running telemetry unit tests with browser_type "%s".' % browser_type) ret = _RunOneSetOfTests(browser_type, 'telemetry', os.path.join('telemetry', 'telemetry'), unit_tests, stream) if ret: error_string += 'The unit tests failed.\n' logging.info('Running telemetry perf tests with browser_type "%s".' % browser_type) ret = _RunOneSetOfTests(browser_type, 'perf', 'perf', perf_tests, stream) if ret: error_string = 'The perf tests failed.\n' return error_string def _RunOneSetOfTests(browser_type, root_dir, sub_dir, tests, stream): top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', root_dir) sub_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', sub_dir) sys.path.append(top_level_dir) output_formatters = [gtest_progress_reporter.GTestProgressReporter(stream)] run_tests.config = run_tests.Config(top_level_dir, [sub_dir], output_formatters) return run_tests.RunTestsCommand.main(['--browser', browser_type] + tests) class _LoggingOutputStream(object): def __init__(self): self._buffer = [] def write(self, s): """Buffer a string write. Log it when we encounter a newline.""" if '\n' in s: segments = s.split('\n') segments[0] = ''.join(self._buffer + [segments[0]]) log_level = logging.getLogger().getEffectiveLevel() try: # TODO(dtu): We need this because of crbug.com/394571 logging.getLogger().setLevel(logging.INFO) for line in segments[:-1]: logging.info(line) finally: logging.getLogger().setLevel(log_level) self._buffer = [segments[-1]] else: self._buffer.append(s) def flush(self): # pylint: disable=W0612 pass
<commit_before><commit_msg>Add a wrapper for ChromeOS to call into telemetry. R=dtu@chromium.org, achuith@chromium.org BUG=402172, 388256 Review URL: https://codereview.chromium.org/682953002 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#301557}<commit_after>
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import sys from telemetry.unittest import gtest_progress_reporter from telemetry.unittest import run_tests from telemetry.core import util def RunTestsForChromeOS(browser_type, unit_tests, perf_tests): stream = _LoggingOutputStream() error_string = '' logging.info('Running telemetry unit tests with browser_type "%s".' % browser_type) ret = _RunOneSetOfTests(browser_type, 'telemetry', os.path.join('telemetry', 'telemetry'), unit_tests, stream) if ret: error_string += 'The unit tests failed.\n' logging.info('Running telemetry perf tests with browser_type "%s".' % browser_type) ret = _RunOneSetOfTests(browser_type, 'perf', 'perf', perf_tests, stream) if ret: error_string = 'The perf tests failed.\n' return error_string def _RunOneSetOfTests(browser_type, root_dir, sub_dir, tests, stream): top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', root_dir) sub_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', sub_dir) sys.path.append(top_level_dir) output_formatters = [gtest_progress_reporter.GTestProgressReporter(stream)] run_tests.config = run_tests.Config(top_level_dir, [sub_dir], output_formatters) return run_tests.RunTestsCommand.main(['--browser', browser_type] + tests) class _LoggingOutputStream(object): def __init__(self): self._buffer = [] def write(self, s): """Buffer a string write. Log it when we encounter a newline.""" if '\n' in s: segments = s.split('\n') segments[0] = ''.join(self._buffer + [segments[0]]) log_level = logging.getLogger().getEffectiveLevel() try: # TODO(dtu): We need this because of crbug.com/394571 logging.getLogger().setLevel(logging.INFO) for line in segments[:-1]: logging.info(line) finally: logging.getLogger().setLevel(log_level) self._buffer = [segments[-1]] else: self._buffer.append(s) def flush(self): # pylint: disable=W0612 pass
Add a wrapper for ChromeOS to call into telemetry. R=dtu@chromium.org, achuith@chromium.org BUG=402172, 388256 Review URL: https://codereview.chromium.org/682953002 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#301557}# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import sys from telemetry.unittest import gtest_progress_reporter from telemetry.unittest import run_tests from telemetry.core import util def RunTestsForChromeOS(browser_type, unit_tests, perf_tests): stream = _LoggingOutputStream() error_string = '' logging.info('Running telemetry unit tests with browser_type "%s".' % browser_type) ret = _RunOneSetOfTests(browser_type, 'telemetry', os.path.join('telemetry', 'telemetry'), unit_tests, stream) if ret: error_string += 'The unit tests failed.\n' logging.info('Running telemetry perf tests with browser_type "%s".' % browser_type) ret = _RunOneSetOfTests(browser_type, 'perf', 'perf', perf_tests, stream) if ret: error_string = 'The perf tests failed.\n' return error_string def _RunOneSetOfTests(browser_type, root_dir, sub_dir, tests, stream): top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', root_dir) sub_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', sub_dir) sys.path.append(top_level_dir) output_formatters = [gtest_progress_reporter.GTestProgressReporter(stream)] run_tests.config = run_tests.Config(top_level_dir, [sub_dir], output_formatters) return run_tests.RunTestsCommand.main(['--browser', browser_type] + tests) class _LoggingOutputStream(object): def __init__(self): self._buffer = [] def write(self, s): """Buffer a string write. Log it when we encounter a newline.""" if '\n' in s: segments = s.split('\n') segments[0] = ''.join(self._buffer + [segments[0]]) log_level = logging.getLogger().getEffectiveLevel() try: # TODO(dtu): We need this because of crbug.com/394571 logging.getLogger().setLevel(logging.INFO) for line in segments[:-1]: logging.info(line) finally: logging.getLogger().setLevel(log_level) self._buffer = [segments[-1]] else: self._buffer.append(s) def flush(self): # pylint: disable=W0612 pass
<commit_before><commit_msg>Add a wrapper for ChromeOS to call into telemetry. R=dtu@chromium.org, achuith@chromium.org BUG=402172, 388256 Review URL: https://codereview.chromium.org/682953002 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#301557}<commit_after># Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import sys from telemetry.unittest import gtest_progress_reporter from telemetry.unittest import run_tests from telemetry.core import util def RunTestsForChromeOS(browser_type, unit_tests, perf_tests): stream = _LoggingOutputStream() error_string = '' logging.info('Running telemetry unit tests with browser_type "%s".' % browser_type) ret = _RunOneSetOfTests(browser_type, 'telemetry', os.path.join('telemetry', 'telemetry'), unit_tests, stream) if ret: error_string += 'The unit tests failed.\n' logging.info('Running telemetry perf tests with browser_type "%s".' % browser_type) ret = _RunOneSetOfTests(browser_type, 'perf', 'perf', perf_tests, stream) if ret: error_string = 'The perf tests failed.\n' return error_string def _RunOneSetOfTests(browser_type, root_dir, sub_dir, tests, stream): top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', root_dir) sub_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', sub_dir) sys.path.append(top_level_dir) output_formatters = [gtest_progress_reporter.GTestProgressReporter(stream)] run_tests.config = run_tests.Config(top_level_dir, [sub_dir], output_formatters) return run_tests.RunTestsCommand.main(['--browser', browser_type] + tests) class _LoggingOutputStream(object): def __init__(self): self._buffer = [] def write(self, s): """Buffer a string write. Log it when we encounter a newline.""" if '\n' in s: segments = s.split('\n') segments[0] = ''.join(self._buffer + [segments[0]]) log_level = logging.getLogger().getEffectiveLevel() try: # TODO(dtu): We need this because of crbug.com/394571 logging.getLogger().setLevel(logging.INFO) for line in segments[:-1]: logging.info(line) finally: logging.getLogger().setLevel(log_level) self._buffer = [segments[-1]] else: self._buffer.append(s) def flush(self): # pylint: disable=W0612 pass
2ef9618e705bb293641674ca5e7cc1f14daf3483
migrations/versions/0285_default_org_branding.py
migrations/versions/0285_default_org_branding.py
"""empty message Revision ID: 0285_default_org_branding Revises: 0284_0283_retry Create Date: 2016-10-25 17:37:27.660723 """ # revision identifiers, used by Alembic. revision = '0285_default_org_branding' down_revision = '0284_0283_retry' from alembic import op import sqlalchemy as sa BRANDING_TABLES = ('email_branding', 'letter_branding') def upgrade(): for branding in BRANDING_TABLES: op.execute(""" UPDATE organisation SET {branding}_id = {branding}.id FROM {branding} WHERE {branding}.domain in ( SELECT domain FROM domain WHERE domain.organisation_id = organisation.id ) """.format(branding=branding)) def downgrade(): for branding in BRANDING_TABLES: op.execute(""" UPDATE organisation SET {branding}_id = null """.format(branding=branding))
Set default branding for all organisations
Set default branding for all organisations Currently when someone creates a service we match them to an organisation. Then if the organisation has a default branding set, their service gets that branding. However none of the organisations yet have a default branding set up. This commit migrates the data about the relationship between an organisation and its branding from being inferred from the `domain` field on the branding, to being a proper database relationship.
Python
mit
alphagov/notifications-api,alphagov/notifications-api
Set default branding for all organisations Currently when someone creates a service we match them to an organisation. Then if the organisation has a default branding set, their service gets that branding. However none of the organisations yet have a default branding set up. This commit migrates the data about the relationship between an organisation and its branding from being inferred from the `domain` field on the branding, to being a proper database relationship.
"""empty message Revision ID: 0285_default_org_branding Revises: 0284_0283_retry Create Date: 2016-10-25 17:37:27.660723 """ # revision identifiers, used by Alembic. revision = '0285_default_org_branding' down_revision = '0284_0283_retry' from alembic import op import sqlalchemy as sa BRANDING_TABLES = ('email_branding', 'letter_branding') def upgrade(): for branding in BRANDING_TABLES: op.execute(""" UPDATE organisation SET {branding}_id = {branding}.id FROM {branding} WHERE {branding}.domain in ( SELECT domain FROM domain WHERE domain.organisation_id = organisation.id ) """.format(branding=branding)) def downgrade(): for branding in BRANDING_TABLES: op.execute(""" UPDATE organisation SET {branding}_id = null """.format(branding=branding))
<commit_before><commit_msg>Set default branding for all organisations Currently when someone creates a service we match them to an organisation. Then if the organisation has a default branding set, their service gets that branding. However none of the organisations yet have a default branding set up. This commit migrates the data about the relationship between an organisation and its branding from being inferred from the `domain` field on the branding, to being a proper database relationship.<commit_after>
"""empty message Revision ID: 0285_default_org_branding Revises: 0284_0283_retry Create Date: 2016-10-25 17:37:27.660723 """ # revision identifiers, used by Alembic. revision = '0285_default_org_branding' down_revision = '0284_0283_retry' from alembic import op import sqlalchemy as sa BRANDING_TABLES = ('email_branding', 'letter_branding') def upgrade(): for branding in BRANDING_TABLES: op.execute(""" UPDATE organisation SET {branding}_id = {branding}.id FROM {branding} WHERE {branding}.domain in ( SELECT domain FROM domain WHERE domain.organisation_id = organisation.id ) """.format(branding=branding)) def downgrade(): for branding in BRANDING_TABLES: op.execute(""" UPDATE organisation SET {branding}_id = null """.format(branding=branding))
Set default branding for all organisations Currently when someone creates a service we match them to an organisation. Then if the organisation has a default branding set, their service gets that branding. However none of the organisations yet have a default branding set up. This commit migrates the data about the relationship between an organisation and its branding from being inferred from the `domain` field on the branding, to being a proper database relationship."""empty message Revision ID: 0285_default_org_branding Revises: 0284_0283_retry Create Date: 2016-10-25 17:37:27.660723 """ # revision identifiers, used by Alembic. revision = '0285_default_org_branding' down_revision = '0284_0283_retry' from alembic import op import sqlalchemy as sa BRANDING_TABLES = ('email_branding', 'letter_branding') def upgrade(): for branding in BRANDING_TABLES: op.execute(""" UPDATE organisation SET {branding}_id = {branding}.id FROM {branding} WHERE {branding}.domain in ( SELECT domain FROM domain WHERE domain.organisation_id = organisation.id ) """.format(branding=branding)) def downgrade(): for branding in BRANDING_TABLES: op.execute(""" UPDATE organisation SET {branding}_id = null """.format(branding=branding))
<commit_before><commit_msg>Set default branding for all organisations Currently when someone creates a service we match them to an organisation. Then if the organisation has a default branding set, their service gets that branding. However none of the organisations yet have a default branding set up. This commit migrates the data about the relationship between an organisation and its branding from being inferred from the `domain` field on the branding, to being a proper database relationship.<commit_after>"""empty message Revision ID: 0285_default_org_branding Revises: 0284_0283_retry Create Date: 2016-10-25 17:37:27.660723 """ # revision identifiers, used by Alembic. revision = '0285_default_org_branding' down_revision = '0284_0283_retry' from alembic import op import sqlalchemy as sa BRANDING_TABLES = ('email_branding', 'letter_branding') def upgrade(): for branding in BRANDING_TABLES: op.execute(""" UPDATE organisation SET {branding}_id = {branding}.id FROM {branding} WHERE {branding}.domain in ( SELECT domain FROM domain WHERE domain.organisation_id = organisation.id ) """.format(branding=branding)) def downgrade(): for branding in BRANDING_TABLES: op.execute(""" UPDATE organisation SET {branding}_id = null """.format(branding=branding))
1708eb17fb9c232414b0e162754ca31b6fd9366c
services/comprehension/main-api/comprehension/tests/management/commands/test_pre_filter_responses.py
services/comprehension/main-api/comprehension/tests/management/commands/test_pre_filter_responses.py
import csv from io import StringIO from unittest.mock import call, MagicMock, patch from django.test import TestCase from ....views.plagiarism import PlagiarismFeedbackView from ....management.commands import pre_filter_responses Command = pre_filter_responses.Command class TestCommandBase(TestCase): def setUp(self): self.command = Command() class TestPreFilterResponsesCommand(TestCommandBase): def test_add_arguments(self): mock_parser = MagicMock() self.command.add_arguments(mock_parser) self.assertEqual(mock_parser.add_argument.call_count, 2) mock_parser.assert_has_calls([ call.add_argument('passage_source', metavar='PASSAGE_SOURCE', help='The path to the file with the passage'), call.add_argument('csv_input', metavar='CSV_PATH', help='The path to the input CSV file'), ]) @patch.object(PlagiarismFeedbackView, '_check_is_plagiarism') @patch.object(Command, '_retrieve_passage') @patch.object(csv, 'reader') @patch.object(csv, 'writer') @patch(f'{pre_filter_responses.__name__}.open') def test_extract_create_feedback_kwargs(self, mock_open, mock_writer, mock_reader, mock_retrieve, mock_check_plagiarism): mock_csv_input = 'MOCK_CSV_INPUT' kwargs = { 'passage_source': 'MOCK_PASSAGE_SOURCE', 'csv_input': mock_csv_input, } file_name = 'FAKE FILE NAME' mock_handler = mock_open.return_value mock_file_content = StringIO('HEADER\nVALUE') mock_handler.__enter__.return_value = mock_file_content mock_reader_row = 'MOCK_ROW' mock_reader.next.return_value = mock_reader_row mock_check_plagiarism.return_value = False self.command.handle(**kwargs) mock_open.assert_has_calls([ call(mock_csv_input, 'r'), call().__enter__(), call(f'filtered_{mock_csv_input}', 'w'), call().__enter__(), call().__exit__(None, None, None), call().__exit__(None, None, None), ]) mock_retrieve.assert_called_with(kwargs['passage_source']) mock_writer.assert_called()
Add tests for plagiarism filter command
Add tests for plagiarism filter command
Python
agpl-3.0
empirical-org/Empirical-Core,empirical-org/Empirical-Core,empirical-org/Empirical-Core,empirical-org/Empirical-Core,empirical-org/Empirical-Core,empirical-org/Empirical-Core,empirical-org/Empirical-Core
Add tests for plagiarism filter command
import csv from io import StringIO from unittest.mock import call, MagicMock, patch from django.test import TestCase from ....views.plagiarism import PlagiarismFeedbackView from ....management.commands import pre_filter_responses Command = pre_filter_responses.Command class TestCommandBase(TestCase): def setUp(self): self.command = Command() class TestPreFilterResponsesCommand(TestCommandBase): def test_add_arguments(self): mock_parser = MagicMock() self.command.add_arguments(mock_parser) self.assertEqual(mock_parser.add_argument.call_count, 2) mock_parser.assert_has_calls([ call.add_argument('passage_source', metavar='PASSAGE_SOURCE', help='The path to the file with the passage'), call.add_argument('csv_input', metavar='CSV_PATH', help='The path to the input CSV file'), ]) @patch.object(PlagiarismFeedbackView, '_check_is_plagiarism') @patch.object(Command, '_retrieve_passage') @patch.object(csv, 'reader') @patch.object(csv, 'writer') @patch(f'{pre_filter_responses.__name__}.open') def test_extract_create_feedback_kwargs(self, mock_open, mock_writer, mock_reader, mock_retrieve, mock_check_plagiarism): mock_csv_input = 'MOCK_CSV_INPUT' kwargs = { 'passage_source': 'MOCK_PASSAGE_SOURCE', 'csv_input': mock_csv_input, } file_name = 'FAKE FILE NAME' mock_handler = mock_open.return_value mock_file_content = StringIO('HEADER\nVALUE') mock_handler.__enter__.return_value = mock_file_content mock_reader_row = 'MOCK_ROW' mock_reader.next.return_value = mock_reader_row mock_check_plagiarism.return_value = False self.command.handle(**kwargs) mock_open.assert_has_calls([ call(mock_csv_input, 'r'), call().__enter__(), call(f'filtered_{mock_csv_input}', 'w'), call().__enter__(), call().__exit__(None, None, None), call().__exit__(None, None, None), ]) mock_retrieve.assert_called_with(kwargs['passage_source']) mock_writer.assert_called()
<commit_before><commit_msg>Add tests for plagiarism filter command<commit_after>
import csv from io import StringIO from unittest.mock import call, MagicMock, patch from django.test import TestCase from ....views.plagiarism import PlagiarismFeedbackView from ....management.commands import pre_filter_responses Command = pre_filter_responses.Command class TestCommandBase(TestCase): def setUp(self): self.command = Command() class TestPreFilterResponsesCommand(TestCommandBase): def test_add_arguments(self): mock_parser = MagicMock() self.command.add_arguments(mock_parser) self.assertEqual(mock_parser.add_argument.call_count, 2) mock_parser.assert_has_calls([ call.add_argument('passage_source', metavar='PASSAGE_SOURCE', help='The path to the file with the passage'), call.add_argument('csv_input', metavar='CSV_PATH', help='The path to the input CSV file'), ]) @patch.object(PlagiarismFeedbackView, '_check_is_plagiarism') @patch.object(Command, '_retrieve_passage') @patch.object(csv, 'reader') @patch.object(csv, 'writer') @patch(f'{pre_filter_responses.__name__}.open') def test_extract_create_feedback_kwargs(self, mock_open, mock_writer, mock_reader, mock_retrieve, mock_check_plagiarism): mock_csv_input = 'MOCK_CSV_INPUT' kwargs = { 'passage_source': 'MOCK_PASSAGE_SOURCE', 'csv_input': mock_csv_input, } file_name = 'FAKE FILE NAME' mock_handler = mock_open.return_value mock_file_content = StringIO('HEADER\nVALUE') mock_handler.__enter__.return_value = mock_file_content mock_reader_row = 'MOCK_ROW' mock_reader.next.return_value = mock_reader_row mock_check_plagiarism.return_value = False self.command.handle(**kwargs) mock_open.assert_has_calls([ call(mock_csv_input, 'r'), call().__enter__(), call(f'filtered_{mock_csv_input}', 'w'), call().__enter__(), call().__exit__(None, None, None), call().__exit__(None, None, None), ]) mock_retrieve.assert_called_with(kwargs['passage_source']) mock_writer.assert_called()
Add tests for plagiarism filter commandimport csv from io import StringIO from unittest.mock import call, MagicMock, patch from django.test import TestCase from ....views.plagiarism import PlagiarismFeedbackView from ....management.commands import pre_filter_responses Command = pre_filter_responses.Command class TestCommandBase(TestCase): def setUp(self): self.command = Command() class TestPreFilterResponsesCommand(TestCommandBase): def test_add_arguments(self): mock_parser = MagicMock() self.command.add_arguments(mock_parser) self.assertEqual(mock_parser.add_argument.call_count, 2) mock_parser.assert_has_calls([ call.add_argument('passage_source', metavar='PASSAGE_SOURCE', help='The path to the file with the passage'), call.add_argument('csv_input', metavar='CSV_PATH', help='The path to the input CSV file'), ]) @patch.object(PlagiarismFeedbackView, '_check_is_plagiarism') @patch.object(Command, '_retrieve_passage') @patch.object(csv, 'reader') @patch.object(csv, 'writer') @patch(f'{pre_filter_responses.__name__}.open') def test_extract_create_feedback_kwargs(self, mock_open, mock_writer, mock_reader, mock_retrieve, mock_check_plagiarism): mock_csv_input = 'MOCK_CSV_INPUT' kwargs = { 'passage_source': 'MOCK_PASSAGE_SOURCE', 'csv_input': mock_csv_input, } file_name = 'FAKE FILE NAME' mock_handler = mock_open.return_value mock_file_content = StringIO('HEADER\nVALUE') mock_handler.__enter__.return_value = mock_file_content mock_reader_row = 'MOCK_ROW' mock_reader.next.return_value = mock_reader_row mock_check_plagiarism.return_value = False self.command.handle(**kwargs) mock_open.assert_has_calls([ call(mock_csv_input, 'r'), call().__enter__(), call(f'filtered_{mock_csv_input}', 'w'), call().__enter__(), call().__exit__(None, None, None), call().__exit__(None, None, None), ]) mock_retrieve.assert_called_with(kwargs['passage_source']) mock_writer.assert_called()
<commit_before><commit_msg>Add tests for plagiarism filter command<commit_after>import csv from io import StringIO from unittest.mock import call, MagicMock, patch from django.test import TestCase from ....views.plagiarism import PlagiarismFeedbackView from ....management.commands import pre_filter_responses Command = pre_filter_responses.Command class TestCommandBase(TestCase): def setUp(self): self.command = Command() class TestPreFilterResponsesCommand(TestCommandBase): def test_add_arguments(self): mock_parser = MagicMock() self.command.add_arguments(mock_parser) self.assertEqual(mock_parser.add_argument.call_count, 2) mock_parser.assert_has_calls([ call.add_argument('passage_source', metavar='PASSAGE_SOURCE', help='The path to the file with the passage'), call.add_argument('csv_input', metavar='CSV_PATH', help='The path to the input CSV file'), ]) @patch.object(PlagiarismFeedbackView, '_check_is_plagiarism') @patch.object(Command, '_retrieve_passage') @patch.object(csv, 'reader') @patch.object(csv, 'writer') @patch(f'{pre_filter_responses.__name__}.open') def test_extract_create_feedback_kwargs(self, mock_open, mock_writer, mock_reader, mock_retrieve, mock_check_plagiarism): mock_csv_input = 'MOCK_CSV_INPUT' kwargs = { 'passage_source': 'MOCK_PASSAGE_SOURCE', 'csv_input': mock_csv_input, } file_name = 'FAKE FILE NAME' mock_handler = mock_open.return_value mock_file_content = StringIO('HEADER\nVALUE') mock_handler.__enter__.return_value = mock_file_content mock_reader_row = 'MOCK_ROW' mock_reader.next.return_value = mock_reader_row mock_check_plagiarism.return_value = False self.command.handle(**kwargs) mock_open.assert_has_calls([ call(mock_csv_input, 'r'), call().__enter__(), call(f'filtered_{mock_csv_input}', 'w'), call().__enter__(), call().__exit__(None, None, None), call().__exit__(None, None, None), ]) mock_retrieve.assert_called_with(kwargs['passage_source']) mock_writer.assert_called()
797114781ed4f31c265c58a76e39aa8ff6a16443
tensorpack/utils/compatible_serialize.py
tensorpack/utils/compatible_serialize.py
#!/usr/bin/env python import os from .serialize import loads_msgpack, loads_pyarrow, dumps_msgpack, dumps_pyarrow """ Serialization that has compatibility guarantee (therefore is safe to store to disk). """ __all__ = ['loads', 'dumps'] # pyarrow has no compatibility guarantee # use msgpack for persistent serialization, unless explicitly set from envvar if os.environ.get('TENSORPACK_COMPATIBLE_SERIALIZE', 'msgpack') == 'msgpack': loads = loads_msgpack dumps = dumps_msgpack else: loads = loads_pyarrow dumps = dumps_pyarrow
Add missing file from last commit
Add missing file from last commit
Python
apache-2.0
ppwwyyxx/tensorpack,eyaler/tensorpack,eyaler/tensorpack,ppwwyyxx/tensorpack
Add missing file from last commit
#!/usr/bin/env python import os from .serialize import loads_msgpack, loads_pyarrow, dumps_msgpack, dumps_pyarrow """ Serialization that has compatibility guarantee (therefore is safe to store to disk). """ __all__ = ['loads', 'dumps'] # pyarrow has no compatibility guarantee # use msgpack for persistent serialization, unless explicitly set from envvar if os.environ.get('TENSORPACK_COMPATIBLE_SERIALIZE', 'msgpack') == 'msgpack': loads = loads_msgpack dumps = dumps_msgpack else: loads = loads_pyarrow dumps = dumps_pyarrow
<commit_before><commit_msg>Add missing file from last commit<commit_after>
#!/usr/bin/env python import os from .serialize import loads_msgpack, loads_pyarrow, dumps_msgpack, dumps_pyarrow """ Serialization that has compatibility guarantee (therefore is safe to store to disk). """ __all__ = ['loads', 'dumps'] # pyarrow has no compatibility guarantee # use msgpack for persistent serialization, unless explicitly set from envvar if os.environ.get('TENSORPACK_COMPATIBLE_SERIALIZE', 'msgpack') == 'msgpack': loads = loads_msgpack dumps = dumps_msgpack else: loads = loads_pyarrow dumps = dumps_pyarrow
Add missing file from last commit#!/usr/bin/env python import os from .serialize import loads_msgpack, loads_pyarrow, dumps_msgpack, dumps_pyarrow """ Serialization that has compatibility guarantee (therefore is safe to store to disk). """ __all__ = ['loads', 'dumps'] # pyarrow has no compatibility guarantee # use msgpack for persistent serialization, unless explicitly set from envvar if os.environ.get('TENSORPACK_COMPATIBLE_SERIALIZE', 'msgpack') == 'msgpack': loads = loads_msgpack dumps = dumps_msgpack else: loads = loads_pyarrow dumps = dumps_pyarrow
<commit_before><commit_msg>Add missing file from last commit<commit_after>#!/usr/bin/env python import os from .serialize import loads_msgpack, loads_pyarrow, dumps_msgpack, dumps_pyarrow """ Serialization that has compatibility guarantee (therefore is safe to store to disk). """ __all__ = ['loads', 'dumps'] # pyarrow has no compatibility guarantee # use msgpack for persistent serialization, unless explicitly set from envvar if os.environ.get('TENSORPACK_COMPATIBLE_SERIALIZE', 'msgpack') == 'msgpack': loads = loads_msgpack dumps = dumps_msgpack else: loads = loads_pyarrow dumps = dumps_pyarrow
cfb39d7389d63a293dc075d420f80276a34df193
examples/pygstc/simple_pipeline.py
examples/pygstc/simple_pipeline.py
import time import sys from pygstc.gstc import * from pygstc.logger import * #Create a custom logger with loglevel=DEBUG gstd_logger = CustomLogger('simple_pipeline', loglevel='DEBUG') #Create the client with the logger gstd_client = GstdClient(logger=gstd_logger) def printError(): print("To play run: python3 simple_pipeline.py play VIDEO_PATH") print("To stop run: python3 simple_pipeline.py stop") print("To stop run: python3 simple_pipeline.py reverse") print("To stop run: python3 simple_pipeline.py slow_motion") if(len(sys.argv) > 1): if(sys.argv[1]=="play"): FILE_SOURCE = sys.argv[2] #pipeline is the string with the pipeline description pipeline = "playbin uri=file:"+FILE_SOURCE #Following instructions create and play the pipeline gstd_client.pipeline_create ("p0", pipeline) gstd_client.pipeline_play ("p0") print("Playing") # Check this # reverse and slow motion restart the pipeline elif(sys.argv[1]== "reverse"): gstd_client.event_seek("p0", rate=-1.0, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1) print("Playing in reverse") elif(sys.argv[1]== "slow_motion"): gstd_client.event_seek("p0", rate=0.5, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1) print("Playing in slow motion") elif(sys.argv[1]== "stop"): #Following instructions stop and delete the pipeline gstd_client.pipeline_stop ("p0") gstd_client.pipeline_delete ("p0") print("Pipeline deleted") else: printError() else: printError()
Add minimal pygstc example to play a video
Add minimal pygstc example to play a video
Python
lgpl-2.1
RidgeRun/gstd-1.x,RidgeRun/gstd-1.x,RidgeRun/gstd-1.x,RidgeRun/gstd-1.x
Add minimal pygstc example to play a video
import time import sys from pygstc.gstc import * from pygstc.logger import * #Create a custom logger with loglevel=DEBUG gstd_logger = CustomLogger('simple_pipeline', loglevel='DEBUG') #Create the client with the logger gstd_client = GstdClient(logger=gstd_logger) def printError(): print("To play run: python3 simple_pipeline.py play VIDEO_PATH") print("To stop run: python3 simple_pipeline.py stop") print("To stop run: python3 simple_pipeline.py reverse") print("To stop run: python3 simple_pipeline.py slow_motion") if(len(sys.argv) > 1): if(sys.argv[1]=="play"): FILE_SOURCE = sys.argv[2] #pipeline is the string with the pipeline description pipeline = "playbin uri=file:"+FILE_SOURCE #Following instructions create and play the pipeline gstd_client.pipeline_create ("p0", pipeline) gstd_client.pipeline_play ("p0") print("Playing") # Check this # reverse and slow motion restart the pipeline elif(sys.argv[1]== "reverse"): gstd_client.event_seek("p0", rate=-1.0, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1) print("Playing in reverse") elif(sys.argv[1]== "slow_motion"): gstd_client.event_seek("p0", rate=0.5, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1) print("Playing in slow motion") elif(sys.argv[1]== "stop"): #Following instructions stop and delete the pipeline gstd_client.pipeline_stop ("p0") gstd_client.pipeline_delete ("p0") print("Pipeline deleted") else: printError() else: printError()
<commit_before><commit_msg>Add minimal pygstc example to play a video<commit_after>
import time import sys from pygstc.gstc import * from pygstc.logger import * #Create a custom logger with loglevel=DEBUG gstd_logger = CustomLogger('simple_pipeline', loglevel='DEBUG') #Create the client with the logger gstd_client = GstdClient(logger=gstd_logger) def printError(): print("To play run: python3 simple_pipeline.py play VIDEO_PATH") print("To stop run: python3 simple_pipeline.py stop") print("To stop run: python3 simple_pipeline.py reverse") print("To stop run: python3 simple_pipeline.py slow_motion") if(len(sys.argv) > 1): if(sys.argv[1]=="play"): FILE_SOURCE = sys.argv[2] #pipeline is the string with the pipeline description pipeline = "playbin uri=file:"+FILE_SOURCE #Following instructions create and play the pipeline gstd_client.pipeline_create ("p0", pipeline) gstd_client.pipeline_play ("p0") print("Playing") # Check this # reverse and slow motion restart the pipeline elif(sys.argv[1]== "reverse"): gstd_client.event_seek("p0", rate=-1.0, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1) print("Playing in reverse") elif(sys.argv[1]== "slow_motion"): gstd_client.event_seek("p0", rate=0.5, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1) print("Playing in slow motion") elif(sys.argv[1]== "stop"): #Following instructions stop and delete the pipeline gstd_client.pipeline_stop ("p0") gstd_client.pipeline_delete ("p0") print("Pipeline deleted") else: printError() else: printError()
Add minimal pygstc example to play a videoimport time import sys from pygstc.gstc import * from pygstc.logger import * #Create a custom logger with loglevel=DEBUG gstd_logger = CustomLogger('simple_pipeline', loglevel='DEBUG') #Create the client with the logger gstd_client = GstdClient(logger=gstd_logger) def printError(): print("To play run: python3 simple_pipeline.py play VIDEO_PATH") print("To stop run: python3 simple_pipeline.py stop") print("To stop run: python3 simple_pipeline.py reverse") print("To stop run: python3 simple_pipeline.py slow_motion") if(len(sys.argv) > 1): if(sys.argv[1]=="play"): FILE_SOURCE = sys.argv[2] #pipeline is the string with the pipeline description pipeline = "playbin uri=file:"+FILE_SOURCE #Following instructions create and play the pipeline gstd_client.pipeline_create ("p0", pipeline) gstd_client.pipeline_play ("p0") print("Playing") # Check this # reverse and slow motion restart the pipeline elif(sys.argv[1]== "reverse"): gstd_client.event_seek("p0", rate=-1.0, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1) print("Playing in reverse") elif(sys.argv[1]== "slow_motion"): gstd_client.event_seek("p0", rate=0.5, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1) print("Playing in slow motion") elif(sys.argv[1]== "stop"): #Following instructions stop and delete the pipeline gstd_client.pipeline_stop ("p0") gstd_client.pipeline_delete ("p0") print("Pipeline deleted") else: printError() else: printError()
<commit_before><commit_msg>Add minimal pygstc example to play a video<commit_after>import time import sys from pygstc.gstc import * from pygstc.logger import * #Create a custom logger with loglevel=DEBUG gstd_logger = CustomLogger('simple_pipeline', loglevel='DEBUG') #Create the client with the logger gstd_client = GstdClient(logger=gstd_logger) def printError(): print("To play run: python3 simple_pipeline.py play VIDEO_PATH") print("To stop run: python3 simple_pipeline.py stop") print("To stop run: python3 simple_pipeline.py reverse") print("To stop run: python3 simple_pipeline.py slow_motion") if(len(sys.argv) > 1): if(sys.argv[1]=="play"): FILE_SOURCE = sys.argv[2] #pipeline is the string with the pipeline description pipeline = "playbin uri=file:"+FILE_SOURCE #Following instructions create and play the pipeline gstd_client.pipeline_create ("p0", pipeline) gstd_client.pipeline_play ("p0") print("Playing") # Check this # reverse and slow motion restart the pipeline elif(sys.argv[1]== "reverse"): gstd_client.event_seek("p0", rate=-1.0, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1) print("Playing in reverse") elif(sys.argv[1]== "slow_motion"): gstd_client.event_seek("p0", rate=0.5, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1) print("Playing in slow motion") elif(sys.argv[1]== "stop"): #Following instructions stop and delete the pipeline gstd_client.pipeline_stop ("p0") gstd_client.pipeline_delete ("p0") print("Pipeline deleted") else: printError() else: printError()
f8d06f85e896c1098f58667c161d920f6d255d7b
sendmail/log_mail.py
sendmail/log_mail.py
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import os import sys from smtplib import SMTP from datetime import datetime # Parameter: smtp_host = 'smtp.qboxmail.com' smtp_port = 465 smtp_user = 'account@example.it'' smtp_password = 'password' from_address = 'from@example.it' to_address = 'dest@example.it' subject = 'Subject' body = 'body' # Send mail: smtp = SMTP() smtp.set_debuglevel(0) smtp.connect(smtp_host, smtp_port) smtp.login(smtp_user, smtp_password) date = datetime.now().strftime('%Y-%m-%s %H:%M') smtp.sendmail( from_addr, to_addr, 'From: %s\nTo: %s\nSubject: %s\nDate: %s\n\n%s' % ( from_addr, to_addr, subject, date, body, ), ) smtp.quit() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Add utility for sent mail
Add utility for sent mail
Python
agpl-3.0
Micronaet/micronaet-utility,Micronaet/micronaet-utility,Micronaet/micronaet-utility,Micronaet/micronaet-utility
Add utility for sent mail
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import os import sys from smtplib import SMTP from datetime import datetime # Parameter: smtp_host = 'smtp.qboxmail.com' smtp_port = 465 smtp_user = 'account@example.it'' smtp_password = 'password' from_address = 'from@example.it' to_address = 'dest@example.it' subject = 'Subject' body = 'body' # Send mail: smtp = SMTP() smtp.set_debuglevel(0) smtp.connect(smtp_host, smtp_port) smtp.login(smtp_user, smtp_password) date = datetime.now().strftime('%Y-%m-%s %H:%M') smtp.sendmail( from_addr, to_addr, 'From: %s\nTo: %s\nSubject: %s\nDate: %s\n\n%s' % ( from_addr, to_addr, subject, date, body, ), ) smtp.quit() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
<commit_before><commit_msg>Add utility for sent mail<commit_after>
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import os import sys from smtplib import SMTP from datetime import datetime # Parameter: smtp_host = 'smtp.qboxmail.com' smtp_port = 465 smtp_user = 'account@example.it'' smtp_password = 'password' from_address = 'from@example.it' to_address = 'dest@example.it' subject = 'Subject' body = 'body' # Send mail: smtp = SMTP() smtp.set_debuglevel(0) smtp.connect(smtp_host, smtp_port) smtp.login(smtp_user, smtp_password) date = datetime.now().strftime('%Y-%m-%s %H:%M') smtp.sendmail( from_addr, to_addr, 'From: %s\nTo: %s\nSubject: %s\nDate: %s\n\n%s' % ( from_addr, to_addr, subject, date, body, ), ) smtp.quit() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Add utility for sent mail# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import os import sys from smtplib import SMTP from datetime import datetime # Parameter: smtp_host = 'smtp.qboxmail.com' smtp_port = 465 smtp_user = 'account@example.it'' smtp_password = 'password' from_address = 'from@example.it' to_address = 'dest@example.it' subject = 'Subject' body = 'body' # Send mail: smtp = SMTP() smtp.set_debuglevel(0) smtp.connect(smtp_host, smtp_port) smtp.login(smtp_user, smtp_password) date = datetime.now().strftime('%Y-%m-%s %H:%M') smtp.sendmail( from_addr, to_addr, 'From: %s\nTo: %s\nSubject: %s\nDate: %s\n\n%s' % ( from_addr, to_addr, subject, date, body, ), ) smtp.quit() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
<commit_before><commit_msg>Add utility for sent mail<commit_after># -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import os import sys from smtplib import SMTP from datetime import datetime # Parameter: smtp_host = 'smtp.qboxmail.com' smtp_port = 465 smtp_user = 'account@example.it'' smtp_password = 'password' from_address = 'from@example.it' to_address = 'dest@example.it' subject = 'Subject' body = 'body' # Send mail: smtp = SMTP() smtp.set_debuglevel(0) smtp.connect(smtp_host, smtp_port) smtp.login(smtp_user, smtp_password) date = datetime.now().strftime('%Y-%m-%s %H:%M') smtp.sendmail( from_addr, to_addr, 'From: %s\nTo: %s\nSubject: %s\nDate: %s\n\n%s' % ( from_addr, to_addr, subject, date, body, ), ) smtp.quit() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
425a8e26d371038f6ebf7c80dd7faea0f1dd906e
nodeconductor/core/tests/unittests/test_admin.py
nodeconductor/core/tests/unittests/test_admin.py
from django.contrib import admin from django.contrib.auth import get_user_model from django.test import TestCase from django.urls import reverse User = get_user_model() class TestAdminEndpoints(TestCase): def setUp(self): user, _ = User.objects.get_or_create(username='username', is_staff=True) self.client.force_login(user) self.admin_site_name = admin.site.name def _reverse_url(self, path): return reverse('%s:%s' % (self.admin_site_name, path)) def test_app_list_ulrs_can_be_queried(self): app_list_urls = dict() for model in admin.site._registry: app_list_url = reverse('%s:%s' % (self.admin_site_name, 'app_list'), args=(model._meta.app_label,)) app_list_urls.update({model._meta.app_label: app_list_url}) for url in app_list_urls.values(): response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_base_admin_site_urls_can_be_queried(self): pages = ['index', 'login', 'logout', 'password_change', 'password_change_done', 'jsi18n'] for name in pages: url = self._reverse_url(name) response = self.client.get(url) self.assertIn(response.status_code, [200, 302]) def test_changelist_urls_can_be_queried(self): for model in admin.site._registry: url = self._reverse_url('%s_%s_changelist' % (model._meta.app_label, model._meta.model_name)) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_add_urls_can_be_queried(self): for model in admin.site._registry: model_fullname = '%s_%s' % (model._meta.app_label, model._meta.model_name) url = self._reverse_url('%s_add' % model_fullname) response = self.client.get(url) self.assertIn(response.status_code, [200, 403])
Add base test for admin endpoints
Add base test for admin endpoints [WAL-883]
Python
mit
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
Add base test for admin endpoints [WAL-883]
from django.contrib import admin from django.contrib.auth import get_user_model from django.test import TestCase from django.urls import reverse User = get_user_model() class TestAdminEndpoints(TestCase): def setUp(self): user, _ = User.objects.get_or_create(username='username', is_staff=True) self.client.force_login(user) self.admin_site_name = admin.site.name def _reverse_url(self, path): return reverse('%s:%s' % (self.admin_site_name, path)) def test_app_list_ulrs_can_be_queried(self): app_list_urls = dict() for model in admin.site._registry: app_list_url = reverse('%s:%s' % (self.admin_site_name, 'app_list'), args=(model._meta.app_label,)) app_list_urls.update({model._meta.app_label: app_list_url}) for url in app_list_urls.values(): response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_base_admin_site_urls_can_be_queried(self): pages = ['index', 'login', 'logout', 'password_change', 'password_change_done', 'jsi18n'] for name in pages: url = self._reverse_url(name) response = self.client.get(url) self.assertIn(response.status_code, [200, 302]) def test_changelist_urls_can_be_queried(self): for model in admin.site._registry: url = self._reverse_url('%s_%s_changelist' % (model._meta.app_label, model._meta.model_name)) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_add_urls_can_be_queried(self): for model in admin.site._registry: model_fullname = '%s_%s' % (model._meta.app_label, model._meta.model_name) url = self._reverse_url('%s_add' % model_fullname) response = self.client.get(url) self.assertIn(response.status_code, [200, 403])
<commit_before><commit_msg>Add base test for admin endpoints [WAL-883]<commit_after>
from django.contrib import admin from django.contrib.auth import get_user_model from django.test import TestCase from django.urls import reverse User = get_user_model() class TestAdminEndpoints(TestCase): def setUp(self): user, _ = User.objects.get_or_create(username='username', is_staff=True) self.client.force_login(user) self.admin_site_name = admin.site.name def _reverse_url(self, path): return reverse('%s:%s' % (self.admin_site_name, path)) def test_app_list_ulrs_can_be_queried(self): app_list_urls = dict() for model in admin.site._registry: app_list_url = reverse('%s:%s' % (self.admin_site_name, 'app_list'), args=(model._meta.app_label,)) app_list_urls.update({model._meta.app_label: app_list_url}) for url in app_list_urls.values(): response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_base_admin_site_urls_can_be_queried(self): pages = ['index', 'login', 'logout', 'password_change', 'password_change_done', 'jsi18n'] for name in pages: url = self._reverse_url(name) response = self.client.get(url) self.assertIn(response.status_code, [200, 302]) def test_changelist_urls_can_be_queried(self): for model in admin.site._registry: url = self._reverse_url('%s_%s_changelist' % (model._meta.app_label, model._meta.model_name)) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_add_urls_can_be_queried(self): for model in admin.site._registry: model_fullname = '%s_%s' % (model._meta.app_label, model._meta.model_name) url = self._reverse_url('%s_add' % model_fullname) response = self.client.get(url) self.assertIn(response.status_code, [200, 403])
Add base test for admin endpoints [WAL-883]from django.contrib import admin from django.contrib.auth import get_user_model from django.test import TestCase from django.urls import reverse User = get_user_model() class TestAdminEndpoints(TestCase): def setUp(self): user, _ = User.objects.get_or_create(username='username', is_staff=True) self.client.force_login(user) self.admin_site_name = admin.site.name def _reverse_url(self, path): return reverse('%s:%s' % (self.admin_site_name, path)) def test_app_list_ulrs_can_be_queried(self): app_list_urls = dict() for model in admin.site._registry: app_list_url = reverse('%s:%s' % (self.admin_site_name, 'app_list'), args=(model._meta.app_label,)) app_list_urls.update({model._meta.app_label: app_list_url}) for url in app_list_urls.values(): response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_base_admin_site_urls_can_be_queried(self): pages = ['index', 'login', 'logout', 'password_change', 'password_change_done', 'jsi18n'] for name in pages: url = self._reverse_url(name) response = self.client.get(url) self.assertIn(response.status_code, [200, 302]) def test_changelist_urls_can_be_queried(self): for model in admin.site._registry: url = self._reverse_url('%s_%s_changelist' % (model._meta.app_label, model._meta.model_name)) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_add_urls_can_be_queried(self): for model in admin.site._registry: model_fullname = '%s_%s' % (model._meta.app_label, model._meta.model_name) url = self._reverse_url('%s_add' % model_fullname) response = self.client.get(url) self.assertIn(response.status_code, [200, 403])
<commit_before><commit_msg>Add base test for admin endpoints [WAL-883]<commit_after>from django.contrib import admin from django.contrib.auth import get_user_model from django.test import TestCase from django.urls import reverse User = get_user_model() class TestAdminEndpoints(TestCase): def setUp(self): user, _ = User.objects.get_or_create(username='username', is_staff=True) self.client.force_login(user) self.admin_site_name = admin.site.name def _reverse_url(self, path): return reverse('%s:%s' % (self.admin_site_name, path)) def test_app_list_ulrs_can_be_queried(self): app_list_urls = dict() for model in admin.site._registry: app_list_url = reverse('%s:%s' % (self.admin_site_name, 'app_list'), args=(model._meta.app_label,)) app_list_urls.update({model._meta.app_label: app_list_url}) for url in app_list_urls.values(): response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_base_admin_site_urls_can_be_queried(self): pages = ['index', 'login', 'logout', 'password_change', 'password_change_done', 'jsi18n'] for name in pages: url = self._reverse_url(name) response = self.client.get(url) self.assertIn(response.status_code, [200, 302]) def test_changelist_urls_can_be_queried(self): for model in admin.site._registry: url = self._reverse_url('%s_%s_changelist' % (model._meta.app_label, model._meta.model_name)) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_add_urls_can_be_queried(self): for model in admin.site._registry: model_fullname = '%s_%s' % (model._meta.app_label, model._meta.model_name) url = self._reverse_url('%s_add' % model_fullname) response = self.client.get(url) self.assertIn(response.status_code, [200, 403])
8ce2da2ed2e445480ee2e10483a5fae1c7c677a0
lib/output_view.py
lib/output_view.py
import sublime import sublime_plugin ###----------------------------------------------------------------------------- def output_to_view(window, title, content, reuse=True, syntax=None, clear=True, settings=None): if not isinstance(content, str): content = "\n".join (content) view = None if reuse: for _view in window.views (): if _view.name () == title: view = _view break if view is None: view = window.new_file () view.set_scratch (True) view.set_name (title) if syntax is not None: view.assign_syntax (syntax) else: view.set_read_only (False) if clear is True: view.sel ().clear () view.sel ().add (sublime.Region (0, view.size ())) view.run_command ("left_delete") if window.active_view () != view: window.focus_view (view) if settings is not None: for setting in settings: view.settings ().set (setting, settings[setting]) # Sace current buffer size, selection information and view position saved_size = view.size () saved_sel = list(view.sel ()) saved_position = view.viewport_position () # Single select, position cursor at end of file, insert the data view.sel ().clear () view.sel ().add (sublime.Region (saved_size, saved_size)) view.run_command ("insert", {"characters": content}) # If the last selection was at the end of the buffer, replace that selection # with the new end of the buffer so the relative position remains the same. if sublime.Region (saved_size, saved_size) == saved_sel[-1]: saved_sel[-1] = sublime.Region (view.size (), view.size ()) # Clear current selection and add original selection back view.sel ().clear () for region in saved_sel: view.sel ().add (region) view.set_viewport_position (saved_position, False) view.set_read_only (True) ###-----------------------------------------------------------------------------
Include self contained method for output to a view
Include self contained method for output to a view This uses an enhanced version of the code that is currently in the override_audit command file, making it more general purpose. The new method can send output to a view, either reusing the one with the same title as is being used, or creating a new one. A syntax can be applied as well as arbitrary settings, and the buffer can be cleared if desired. In the case that the buffer is not cleared, the code takes care to retain the current selection information and viewport position and put it back to that state later.
Python
mit
OdatNurd/OverrideAudit
Include self contained method for output to a view This uses an enhanced version of the code that is currently in the override_audit command file, making it more general purpose. The new method can send output to a view, either reusing the one with the same title as is being used, or creating a new one. A syntax can be applied as well as arbitrary settings, and the buffer can be cleared if desired. In the case that the buffer is not cleared, the code takes care to retain the current selection information and viewport position and put it back to that state later.
import sublime import sublime_plugin ###----------------------------------------------------------------------------- def output_to_view(window, title, content, reuse=True, syntax=None, clear=True, settings=None): if not isinstance(content, str): content = "\n".join (content) view = None if reuse: for _view in window.views (): if _view.name () == title: view = _view break if view is None: view = window.new_file () view.set_scratch (True) view.set_name (title) if syntax is not None: view.assign_syntax (syntax) else: view.set_read_only (False) if clear is True: view.sel ().clear () view.sel ().add (sublime.Region (0, view.size ())) view.run_command ("left_delete") if window.active_view () != view: window.focus_view (view) if settings is not None: for setting in settings: view.settings ().set (setting, settings[setting]) # Sace current buffer size, selection information and view position saved_size = view.size () saved_sel = list(view.sel ()) saved_position = view.viewport_position () # Single select, position cursor at end of file, insert the data view.sel ().clear () view.sel ().add (sublime.Region (saved_size, saved_size)) view.run_command ("insert", {"characters": content}) # If the last selection was at the end of the buffer, replace that selection # with the new end of the buffer so the relative position remains the same. if sublime.Region (saved_size, saved_size) == saved_sel[-1]: saved_sel[-1] = sublime.Region (view.size (), view.size ()) # Clear current selection and add original selection back view.sel ().clear () for region in saved_sel: view.sel ().add (region) view.set_viewport_position (saved_position, False) view.set_read_only (True) ###-----------------------------------------------------------------------------
<commit_before><commit_msg>Include self contained method for output to a view This uses an enhanced version of the code that is currently in the override_audit command file, making it more general purpose. The new method can send output to a view, either reusing the one with the same title as is being used, or creating a new one. A syntax can be applied as well as arbitrary settings, and the buffer can be cleared if desired. In the case that the buffer is not cleared, the code takes care to retain the current selection information and viewport position and put it back to that state later.<commit_after>
import sublime import sublime_plugin ###----------------------------------------------------------------------------- def output_to_view(window, title, content, reuse=True, syntax=None, clear=True, settings=None): if not isinstance(content, str): content = "\n".join (content) view = None if reuse: for _view in window.views (): if _view.name () == title: view = _view break if view is None: view = window.new_file () view.set_scratch (True) view.set_name (title) if syntax is not None: view.assign_syntax (syntax) else: view.set_read_only (False) if clear is True: view.sel ().clear () view.sel ().add (sublime.Region (0, view.size ())) view.run_command ("left_delete") if window.active_view () != view: window.focus_view (view) if settings is not None: for setting in settings: view.settings ().set (setting, settings[setting]) # Sace current buffer size, selection information and view position saved_size = view.size () saved_sel = list(view.sel ()) saved_position = view.viewport_position () # Single select, position cursor at end of file, insert the data view.sel ().clear () view.sel ().add (sublime.Region (saved_size, saved_size)) view.run_command ("insert", {"characters": content}) # If the last selection was at the end of the buffer, replace that selection # with the new end of the buffer so the relative position remains the same. if sublime.Region (saved_size, saved_size) == saved_sel[-1]: saved_sel[-1] = sublime.Region (view.size (), view.size ()) # Clear current selection and add original selection back view.sel ().clear () for region in saved_sel: view.sel ().add (region) view.set_viewport_position (saved_position, False) view.set_read_only (True) ###-----------------------------------------------------------------------------
Include self contained method for output to a view This uses an enhanced version of the code that is currently in the override_audit command file, making it more general purpose. The new method can send output to a view, either reusing the one with the same title as is being used, or creating a new one. A syntax can be applied as well as arbitrary settings, and the buffer can be cleared if desired. In the case that the buffer is not cleared, the code takes care to retain the current selection information and viewport position and put it back to that state later.import sublime import sublime_plugin ###----------------------------------------------------------------------------- def output_to_view(window, title, content, reuse=True, syntax=None, clear=True, settings=None): if not isinstance(content, str): content = "\n".join (content) view = None if reuse: for _view in window.views (): if _view.name () == title: view = _view break if view is None: view = window.new_file () view.set_scratch (True) view.set_name (title) if syntax is not None: view.assign_syntax (syntax) else: view.set_read_only (False) if clear is True: view.sel ().clear () view.sel ().add (sublime.Region (0, view.size ())) view.run_command ("left_delete") if window.active_view () != view: window.focus_view (view) if settings is not None: for setting in settings: view.settings ().set (setting, settings[setting]) # Sace current buffer size, selection information and view position saved_size = view.size () saved_sel = list(view.sel ()) saved_position = view.viewport_position () # Single select, position cursor at end of file, insert the data view.sel ().clear () view.sel ().add (sublime.Region (saved_size, saved_size)) view.run_command ("insert", {"characters": content}) # If the last selection was at the end of the buffer, replace that selection # with the new end of the buffer so the relative position remains the same. if sublime.Region (saved_size, saved_size) == saved_sel[-1]: saved_sel[-1] = sublime.Region (view.size (), view.size ()) # Clear current selection and add original selection back view.sel ().clear () for region in saved_sel: view.sel ().add (region) view.set_viewport_position (saved_position, False) view.set_read_only (True) ###-----------------------------------------------------------------------------
<commit_before><commit_msg>Include self contained method for output to a view This uses an enhanced version of the code that is currently in the override_audit command file, making it more general purpose. The new method can send output to a view, either reusing the one with the same title as is being used, or creating a new one. A syntax can be applied as well as arbitrary settings, and the buffer can be cleared if desired. In the case that the buffer is not cleared, the code takes care to retain the current selection information and viewport position and put it back to that state later.<commit_after>import sublime import sublime_plugin ###----------------------------------------------------------------------------- def output_to_view(window, title, content, reuse=True, syntax=None, clear=True, settings=None): if not isinstance(content, str): content = "\n".join (content) view = None if reuse: for _view in window.views (): if _view.name () == title: view = _view break if view is None: view = window.new_file () view.set_scratch (True) view.set_name (title) if syntax is not None: view.assign_syntax (syntax) else: view.set_read_only (False) if clear is True: view.sel ().clear () view.sel ().add (sublime.Region (0, view.size ())) view.run_command ("left_delete") if window.active_view () != view: window.focus_view (view) if settings is not None: for setting in settings: view.settings ().set (setting, settings[setting]) # Sace current buffer size, selection information and view position saved_size = view.size () saved_sel = list(view.sel ()) saved_position = view.viewport_position () # Single select, position cursor at end of file, insert the data view.sel ().clear () view.sel ().add (sublime.Region (saved_size, saved_size)) view.run_command ("insert", {"characters": content}) # If the last selection was at the end of the buffer, replace that selection # with the new end of the buffer so the relative position remains the same. if sublime.Region (saved_size, saved_size) == saved_sel[-1]: saved_sel[-1] = sublime.Region (view.size (), view.size ()) # Clear current selection and add original selection back view.sel ().clear () for region in saved_sel: view.sel ().add (region) view.set_viewport_position (saved_position, False) view.set_read_only (True) ###-----------------------------------------------------------------------------
50e24b0445f259d975e5dd78dd34a8e760e4ed88
DB.py
DB.py
# Create a database import sqlite3 import csv from datetime import datetime import sys reload(sys) sys.setdefaultencoding('utf8') class createDB(): def readCSV(self, filename): conn = sqlite3.connect('CIUK.db') print 'DB Creation Successful!' cur = conn.cursor() # cur.execute('''DROP TABLE PRODUCTS;''') cur.execute('''CREATE TABLE PRODUCTS (ID INTEGER PRIMARY KEY AUTOINCREMENT, TITLE TEXT NOT NULL, DESCRIPTION TEXT NOT NULL, PRICE INTEGER NOT NULL, CREATED_AT TIMESTAMP, UPDATED_AT TIMESTAMP);''') print 'Table Creation Successful!' with open(filename) as f: reader = csv.reader(f) for row in reader: cur.execute("INSERT INTO PRODUCTS VALUES (null, ?, ?, ?, ?, ?);", (unicode(row[0]), unicode(row[1]), unicode(row[2]), datetime.now(), datetime.now())) print 'Successfully read data from CSV file!' conn.commit() conn.close() c = createDB().readCSV('products.csv')
Create SQLite database and table and insert data from CSV file
Create SQLite database and table and insert data from CSV file
Python
mit
joykuotw/python-endpoints,joykuotw/python-endpoints,joykuotw/python-endpoints
Create SQLite database and table and insert data from CSV file
# Create a database import sqlite3 import csv from datetime import datetime import sys reload(sys) sys.setdefaultencoding('utf8') class createDB(): def readCSV(self, filename): conn = sqlite3.connect('CIUK.db') print 'DB Creation Successful!' cur = conn.cursor() # cur.execute('''DROP TABLE PRODUCTS;''') cur.execute('''CREATE TABLE PRODUCTS (ID INTEGER PRIMARY KEY AUTOINCREMENT, TITLE TEXT NOT NULL, DESCRIPTION TEXT NOT NULL, PRICE INTEGER NOT NULL, CREATED_AT TIMESTAMP, UPDATED_AT TIMESTAMP);''') print 'Table Creation Successful!' with open(filename) as f: reader = csv.reader(f) for row in reader: cur.execute("INSERT INTO PRODUCTS VALUES (null, ?, ?, ?, ?, ?);", (unicode(row[0]), unicode(row[1]), unicode(row[2]), datetime.now(), datetime.now())) print 'Successfully read data from CSV file!' conn.commit() conn.close() c = createDB().readCSV('products.csv')
<commit_before><commit_msg>Create SQLite database and table and insert data from CSV file<commit_after>
# Create a database import sqlite3 import csv from datetime import datetime import sys reload(sys) sys.setdefaultencoding('utf8') class createDB(): def readCSV(self, filename): conn = sqlite3.connect('CIUK.db') print 'DB Creation Successful!' cur = conn.cursor() # cur.execute('''DROP TABLE PRODUCTS;''') cur.execute('''CREATE TABLE PRODUCTS (ID INTEGER PRIMARY KEY AUTOINCREMENT, TITLE TEXT NOT NULL, DESCRIPTION TEXT NOT NULL, PRICE INTEGER NOT NULL, CREATED_AT TIMESTAMP, UPDATED_AT TIMESTAMP);''') print 'Table Creation Successful!' with open(filename) as f: reader = csv.reader(f) for row in reader: cur.execute("INSERT INTO PRODUCTS VALUES (null, ?, ?, ?, ?, ?);", (unicode(row[0]), unicode(row[1]), unicode(row[2]), datetime.now(), datetime.now())) print 'Successfully read data from CSV file!' conn.commit() conn.close() c = createDB().readCSV('products.csv')
Create SQLite database and table and insert data from CSV file# Create a database import sqlite3 import csv from datetime import datetime import sys reload(sys) sys.setdefaultencoding('utf8') class createDB(): def readCSV(self, filename): conn = sqlite3.connect('CIUK.db') print 'DB Creation Successful!' cur = conn.cursor() # cur.execute('''DROP TABLE PRODUCTS;''') cur.execute('''CREATE TABLE PRODUCTS (ID INTEGER PRIMARY KEY AUTOINCREMENT, TITLE TEXT NOT NULL, DESCRIPTION TEXT NOT NULL, PRICE INTEGER NOT NULL, CREATED_AT TIMESTAMP, UPDATED_AT TIMESTAMP);''') print 'Table Creation Successful!' with open(filename) as f: reader = csv.reader(f) for row in reader: cur.execute("INSERT INTO PRODUCTS VALUES (null, ?, ?, ?, ?, ?);", (unicode(row[0]), unicode(row[1]), unicode(row[2]), datetime.now(), datetime.now())) print 'Successfully read data from CSV file!' conn.commit() conn.close() c = createDB().readCSV('products.csv')
<commit_before><commit_msg>Create SQLite database and table and insert data from CSV file<commit_after># Create a database import sqlite3 import csv from datetime import datetime import sys reload(sys) sys.setdefaultencoding('utf8') class createDB(): def readCSV(self, filename): conn = sqlite3.connect('CIUK.db') print 'DB Creation Successful!' cur = conn.cursor() # cur.execute('''DROP TABLE PRODUCTS;''') cur.execute('''CREATE TABLE PRODUCTS (ID INTEGER PRIMARY KEY AUTOINCREMENT, TITLE TEXT NOT NULL, DESCRIPTION TEXT NOT NULL, PRICE INTEGER NOT NULL, CREATED_AT TIMESTAMP, UPDATED_AT TIMESTAMP);''') print 'Table Creation Successful!' with open(filename) as f: reader = csv.reader(f) for row in reader: cur.execute("INSERT INTO PRODUCTS VALUES (null, ?, ?, ?, ?, ?);", (unicode(row[0]), unicode(row[1]), unicode(row[2]), datetime.now(), datetime.now())) print 'Successfully read data from CSV file!' conn.commit() conn.close() c = createDB().readCSV('products.csv')
874c01374397014e7c99afd67f5680ed32f1c5c6
bn.py
bn.py
import sys from time import gmtime year, mon, mday, hour, min, sec, wday, yday, isdst = gmtime() bld = ((year - 2000) * 12 + mon - 1) * 100 + mday rev = hour * 100 + min print 'Your build and revision number for today is %d.%d.' % (bld, rev)
Build and revision number script
Build and revision number script
Python
apache-2.0
atifaziz/NCrontab,atifaziz/NCrontab
Build and revision number script
import sys from time import gmtime year, mon, mday, hour, min, sec, wday, yday, isdst = gmtime() bld = ((year - 2000) * 12 + mon - 1) * 100 + mday rev = hour * 100 + min print 'Your build and revision number for today is %d.%d.' % (bld, rev)
<commit_before><commit_msg>Build and revision number script<commit_after>
import sys from time import gmtime year, mon, mday, hour, min, sec, wday, yday, isdst = gmtime() bld = ((year - 2000) * 12 + mon - 1) * 100 + mday rev = hour * 100 + min print 'Your build and revision number for today is %d.%d.' % (bld, rev)
Build and revision number scriptimport sys from time import gmtime year, mon, mday, hour, min, sec, wday, yday, isdst = gmtime() bld = ((year - 2000) * 12 + mon - 1) * 100 + mday rev = hour * 100 + min print 'Your build and revision number for today is %d.%d.' % (bld, rev)
<commit_before><commit_msg>Build and revision number script<commit_after>import sys from time import gmtime year, mon, mday, hour, min, sec, wday, yday, isdst = gmtime() bld = ((year - 2000) * 12 + mon - 1) * 100 + mday rev = hour * 100 + min print 'Your build and revision number for today is %d.%d.' % (bld, rev)
780e4eb03420d75c18d0b21b5e616f2952aeda41
test/test_basic_logic.py
test/test_basic_logic.py
# -*- coding: utf-8 -*- """ test_basic_logic ~~~~~~~~~~~~~~~~ Test the basic logic of the h2 state machines. """ import h2.connection from hyperframe import frame class TestBasicConnection(object): """ Basic connection tests. """ example_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ] def test_begin_connection(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) assert len(frames) == 1 def test_sending_some_data(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) frames.append(c.send_data_on_stream(1, b'test', end_stream=True)) assert len(frames) == 2 def test_receive_headers_frame(self): f = frame.HeadersFrame(1) f.data = b'fake headers' f.flags = set(['END_STREAM', 'END_HEADERS']) c = h2.connection.H2Connection() assert c.receive_frame(f) is None
# -*- coding: utf-8 -*- """ test_basic_logic ~~~~~~~~~~~~~~~~ Test the basic logic of the h2 state machines. """ import h2.connection from hyperframe import frame class TestBasicConnection(object): """ Basic connection tests. """ example_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ] def test_begin_connection(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) assert len(frames) == 1 def test_sending_some_data(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) frames.append(c.send_data_on_stream(1, b'test', end_stream=True)) assert len(frames) == 2 def test_receive_headers_frame(self): f = frame.HeadersFrame(1) f.data = b'fake headers' f.flags = set(['END_STREAM', 'END_HEADERS']) c = h2.connection.H2Connection() assert c.receive_frame(f) is None def test_send_headers_end_stream(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream( 1, self.example_request_headers, end_stream=True ) assert len(frames) == 1 assert frames[-1].flags == set(['END_STREAM', 'END_HEADERS'])
Test sending headers with end stream.
Test sending headers with end stream.
Python
mit
python-hyper/hyper-h2,bhavishyagopesh/hyper-h2,Kriechi/hyper-h2,Kriechi/hyper-h2,mhils/hyper-h2,vladmunteanu/hyper-h2,vladmunteanu/hyper-h2,python-hyper/hyper-h2
# -*- coding: utf-8 -*- """ test_basic_logic ~~~~~~~~~~~~~~~~ Test the basic logic of the h2 state machines. """ import h2.connection from hyperframe import frame class TestBasicConnection(object): """ Basic connection tests. """ example_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ] def test_begin_connection(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) assert len(frames) == 1 def test_sending_some_data(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) frames.append(c.send_data_on_stream(1, b'test', end_stream=True)) assert len(frames) == 2 def test_receive_headers_frame(self): f = frame.HeadersFrame(1) f.data = b'fake headers' f.flags = set(['END_STREAM', 'END_HEADERS']) c = h2.connection.H2Connection() assert c.receive_frame(f) is None Test sending headers with end stream.
# -*- coding: utf-8 -*- """ test_basic_logic ~~~~~~~~~~~~~~~~ Test the basic logic of the h2 state machines. """ import h2.connection from hyperframe import frame class TestBasicConnection(object): """ Basic connection tests. """ example_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ] def test_begin_connection(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) assert len(frames) == 1 def test_sending_some_data(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) frames.append(c.send_data_on_stream(1, b'test', end_stream=True)) assert len(frames) == 2 def test_receive_headers_frame(self): f = frame.HeadersFrame(1) f.data = b'fake headers' f.flags = set(['END_STREAM', 'END_HEADERS']) c = h2.connection.H2Connection() assert c.receive_frame(f) is None def test_send_headers_end_stream(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream( 1, self.example_request_headers, end_stream=True ) assert len(frames) == 1 assert frames[-1].flags == set(['END_STREAM', 'END_HEADERS'])
<commit_before># -*- coding: utf-8 -*- """ test_basic_logic ~~~~~~~~~~~~~~~~ Test the basic logic of the h2 state machines. """ import h2.connection from hyperframe import frame class TestBasicConnection(object): """ Basic connection tests. """ example_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ] def test_begin_connection(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) assert len(frames) == 1 def test_sending_some_data(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) frames.append(c.send_data_on_stream(1, b'test', end_stream=True)) assert len(frames) == 2 def test_receive_headers_frame(self): f = frame.HeadersFrame(1) f.data = b'fake headers' f.flags = set(['END_STREAM', 'END_HEADERS']) c = h2.connection.H2Connection() assert c.receive_frame(f) is None <commit_msg>Test sending headers with end stream.<commit_after>
# -*- coding: utf-8 -*- """ test_basic_logic ~~~~~~~~~~~~~~~~ Test the basic logic of the h2 state machines. """ import h2.connection from hyperframe import frame class TestBasicConnection(object): """ Basic connection tests. """ example_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ] def test_begin_connection(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) assert len(frames) == 1 def test_sending_some_data(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) frames.append(c.send_data_on_stream(1, b'test', end_stream=True)) assert len(frames) == 2 def test_receive_headers_frame(self): f = frame.HeadersFrame(1) f.data = b'fake headers' f.flags = set(['END_STREAM', 'END_HEADERS']) c = h2.connection.H2Connection() assert c.receive_frame(f) is None def test_send_headers_end_stream(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream( 1, self.example_request_headers, end_stream=True ) assert len(frames) == 1 assert frames[-1].flags == set(['END_STREAM', 'END_HEADERS'])
# -*- coding: utf-8 -*- """ test_basic_logic ~~~~~~~~~~~~~~~~ Test the basic logic of the h2 state machines. """ import h2.connection from hyperframe import frame class TestBasicConnection(object): """ Basic connection tests. """ example_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ] def test_begin_connection(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) assert len(frames) == 1 def test_sending_some_data(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) frames.append(c.send_data_on_stream(1, b'test', end_stream=True)) assert len(frames) == 2 def test_receive_headers_frame(self): f = frame.HeadersFrame(1) f.data = b'fake headers' f.flags = set(['END_STREAM', 'END_HEADERS']) c = h2.connection.H2Connection() assert c.receive_frame(f) is None Test sending headers with end stream.# -*- coding: utf-8 -*- """ test_basic_logic ~~~~~~~~~~~~~~~~ Test the basic logic of the h2 state machines. """ import h2.connection from hyperframe import frame class TestBasicConnection(object): """ Basic connection tests. """ example_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ] def test_begin_connection(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) assert len(frames) == 1 def test_sending_some_data(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) frames.append(c.send_data_on_stream(1, b'test', end_stream=True)) assert len(frames) == 2 def test_receive_headers_frame(self): f = frame.HeadersFrame(1) f.data = b'fake headers' f.flags = set(['END_STREAM', 'END_HEADERS']) c = h2.connection.H2Connection() assert c.receive_frame(f) is None def test_send_headers_end_stream(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream( 1, self.example_request_headers, end_stream=True ) assert len(frames) == 1 assert frames[-1].flags == set(['END_STREAM', 'END_HEADERS'])
<commit_before># -*- coding: utf-8 -*- """ test_basic_logic ~~~~~~~~~~~~~~~~ Test the basic logic of the h2 state machines. """ import h2.connection from hyperframe import frame class TestBasicConnection(object): """ Basic connection tests. """ example_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ] def test_begin_connection(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) assert len(frames) == 1 def test_sending_some_data(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) frames.append(c.send_data_on_stream(1, b'test', end_stream=True)) assert len(frames) == 2 def test_receive_headers_frame(self): f = frame.HeadersFrame(1) f.data = b'fake headers' f.flags = set(['END_STREAM', 'END_HEADERS']) c = h2.connection.H2Connection() assert c.receive_frame(f) is None <commit_msg>Test sending headers with end stream.<commit_after># -*- coding: utf-8 -*- """ test_basic_logic ~~~~~~~~~~~~~~~~ Test the basic logic of the h2 state machines. """ import h2.connection from hyperframe import frame class TestBasicConnection(object): """ Basic connection tests. """ example_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ] def test_begin_connection(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) assert len(frames) == 1 def test_sending_some_data(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream(1, self.example_request_headers) frames.append(c.send_data_on_stream(1, b'test', end_stream=True)) assert len(frames) == 2 def test_receive_headers_frame(self): f = frame.HeadersFrame(1) f.data = b'fake headers' f.flags = set(['END_STREAM', 'END_HEADERS']) c = h2.connection.H2Connection() assert c.receive_frame(f) is None def test_send_headers_end_stream(self): c = h2.connection.H2Connection() frames = c.send_headers_on_stream( 1, self.example_request_headers, end_stream=True ) assert len(frames) == 1 assert frames[-1].flags == set(['END_STREAM', 'END_HEADERS'])
3cf30bac4d20dbebf6185351ba0c10426a489de9
tools/run_tests/sanity/check_channel_arg_usage.py
tools/run_tests/sanity/check_channel_arg_usage.py
#!/usr/bin/env python # Copyright 2018 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import sys os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..')) # set of files that are allowed to use the raw GRPC_ARG_* types _EXCEPTIONS = set([ 'src/core/lib/channel/channel_args.cc', 'src/core/lib/channel/channel_args.h', ]) _BANNED = set([ "GRPC_ARG_POINTER", ]) errors = 0 num_files = 0 for root, dirs, files in os.walk('src/core'): for filename in files: num_files += 1 path = os.path.join(root, filename) if path in _EXCEPTIONS: continue with open(path) as f: text = f.read() for banned in _BANNED: if banned in text: print('Illegal use of "%s" in %s' % (banned, path)) errors += 1 assert errors == 0 # This check comes about from this issue: # https://github.com/grpc/grpc/issues/15381 # Basically, a change rendered this script useless and we did not realize it. # This dumb check ensures that this type of issue doesn't occur again. assert num_files > 300 # we definitely have more than 300 files
Add sanity linter to catch future use
Add sanity linter to catch future use
Python
apache-2.0
Vizerai/grpc,jboeuf/grpc,vjpai/grpc,carl-mastrangelo/grpc,carl-mastrangelo/grpc,ncteisen/grpc,grpc/grpc,dgquintas/grpc,chrisdunelm/grpc,ctiller/grpc,nicolasnoble/grpc,jtattermusch/grpc,mehrdada/grpc,sreecha/grpc,chrisdunelm/grpc,firebase/grpc,ncteisen/grpc,jboeuf/grpc,jboeuf/grpc,jboeuf/grpc,pszemus/grpc,thinkerou/grpc,nicolasnoble/grpc,sreecha/grpc,chrisdunelm/grpc,muxi/grpc,firebase/grpc,muxi/grpc,nicolasnoble/grpc,chrisdunelm/grpc,jtattermusch/grpc,grpc/grpc,thinkerou/grpc,firebase/grpc,ejona86/grpc,nicolasnoble/grpc,jboeuf/grpc,jtattermusch/grpc,thinkerou/grpc,firebase/grpc,grpc/grpc,muxi/grpc,pszemus/grpc,donnadionne/grpc,pszemus/grpc,donnadionne/grpc,mehrdada/grpc,jtattermusch/grpc,vjpai/grpc,sreecha/grpc,nicolasnoble/grpc,stanley-cheung/grpc,muxi/grpc,grpc/grpc,muxi/grpc,sreecha/grpc,stanley-cheung/grpc,donnadionne/grpc,jtattermusch/grpc,nicolasnoble/grpc,sreecha/grpc,carl-mastrangelo/grpc,carl-mastrangelo/grpc,dgquintas/grpc,Vizerai/grpc,ejona86/grpc,firebase/grpc,pszemus/grpc,mehrdada/grpc,Vizerai/grpc,pszemus/grpc,vjpai/grpc,muxi/grpc,pszemus/grpc,muxi/grpc,vjpai/grpc,mehrdada/grpc,sreecha/grpc,sreecha/grpc,Vizerai/grpc,firebase/grpc,dgquintas/grpc,pszemus/grpc,vjpai/grpc,ejona86/grpc,ejona86/grpc,nicolasnoble/grpc,dgquintas/grpc,jboeuf/grpc,jtattermusch/grpc,thinkerou/grpc,ctiller/grpc,firebase/grpc,sreecha/grpc,dgquintas/grpc,jboeuf/grpc,ncteisen/grpc,thinkerou/grpc,Vizerai/grpc,muxi/grpc,pszemus/grpc,Vizerai/grpc,thinkerou/grpc,nicolasnoble/grpc,grpc/grpc,sreecha/grpc,ctiller/grpc,jtattermusch/grpc,stanley-cheung/grpc,mehrdada/grpc,donnadionne/grpc,vjpai/grpc,nicolasnoble/grpc,ctiller/grpc,chrisdunelm/grpc,Vizerai/grpc,chrisdunelm/grpc,carl-mastrangelo/grpc,stanley-cheung/grpc,jtattermusch/grpc,ncteisen/grpc,grpc/grpc,carl-mastrangelo/grpc,pszemus/grpc,mehrdada/grpc,stanley-cheung/grpc,jtattermusch/grpc,dgquintas/grpc,pszemus/grpc,chrisdunelm/grpc,ctiller/grpc,donnadionne/grpc,ejona86/grpc,dgquintas/grpc,firebase/grpc,Vizerai/grpc,jboeuf/grpc,Vizerai/grpc,grpc/grpc,sreecha/grpc,nicolasnoble/grpc,jtattermusch/grpc,jboeuf/grpc,thinkerou/grpc,ejona86/grpc,jtattermusch/grpc,ncteisen/grpc,ejona86/grpc,jboeuf/grpc,carl-mastrangelo/grpc,firebase/grpc,donnadionne/grpc,stanley-cheung/grpc,grpc/grpc,ejona86/grpc,donnadionne/grpc,donnadionne/grpc,vjpai/grpc,mehrdada/grpc,stanley-cheung/grpc,carl-mastrangelo/grpc,Vizerai/grpc,vjpai/grpc,grpc/grpc,ejona86/grpc,sreecha/grpc,donnadionne/grpc,mehrdada/grpc,ejona86/grpc,stanley-cheung/grpc,stanley-cheung/grpc,ctiller/grpc,mehrdada/grpc,stanley-cheung/grpc,ejona86/grpc,muxi/grpc,ncteisen/grpc,thinkerou/grpc,firebase/grpc,stanley-cheung/grpc,mehrdada/grpc,ncteisen/grpc,dgquintas/grpc,ncteisen/grpc,chrisdunelm/grpc,carl-mastrangelo/grpc,mehrdada/grpc,chrisdunelm/grpc,sreecha/grpc,jtattermusch/grpc,chrisdunelm/grpc,pszemus/grpc,stanley-cheung/grpc,Vizerai/grpc,donnadionne/grpc,mehrdada/grpc,grpc/grpc,carl-mastrangelo/grpc,thinkerou/grpc,grpc/grpc,ctiller/grpc,dgquintas/grpc,firebase/grpc,ctiller/grpc,muxi/grpc,carl-mastrangelo/grpc,vjpai/grpc,donnadionne/grpc,jboeuf/grpc,dgquintas/grpc,vjpai/grpc,carl-mastrangelo/grpc,jboeuf/grpc,ncteisen/grpc,ejona86/grpc,nicolasnoble/grpc,vjpai/grpc,ctiller/grpc,thinkerou/grpc,vjpai/grpc,dgquintas/grpc,pszemus/grpc,nicolasnoble/grpc,thinkerou/grpc,thinkerou/grpc,ncteisen/grpc,firebase/grpc,ncteisen/grpc,ctiller/grpc,ncteisen/grpc,chrisdunelm/grpc,ctiller/grpc,muxi/grpc,muxi/grpc,donnadionne/grpc,ctiller/grpc,grpc/grpc
Add sanity linter to catch future use
#!/usr/bin/env python # Copyright 2018 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import sys os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..')) # set of files that are allowed to use the raw GRPC_ARG_* types _EXCEPTIONS = set([ 'src/core/lib/channel/channel_args.cc', 'src/core/lib/channel/channel_args.h', ]) _BANNED = set([ "GRPC_ARG_POINTER", ]) errors = 0 num_files = 0 for root, dirs, files in os.walk('src/core'): for filename in files: num_files += 1 path = os.path.join(root, filename) if path in _EXCEPTIONS: continue with open(path) as f: text = f.read() for banned in _BANNED: if banned in text: print('Illegal use of "%s" in %s' % (banned, path)) errors += 1 assert errors == 0 # This check comes about from this issue: # https://github.com/grpc/grpc/issues/15381 # Basically, a change rendered this script useless and we did not realize it. # This dumb check ensures that this type of issue doesn't occur again. assert num_files > 300 # we definitely have more than 300 files
<commit_before><commit_msg>Add sanity linter to catch future use<commit_after>
#!/usr/bin/env python # Copyright 2018 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import sys os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..')) # set of files that are allowed to use the raw GRPC_ARG_* types _EXCEPTIONS = set([ 'src/core/lib/channel/channel_args.cc', 'src/core/lib/channel/channel_args.h', ]) _BANNED = set([ "GRPC_ARG_POINTER", ]) errors = 0 num_files = 0 for root, dirs, files in os.walk('src/core'): for filename in files: num_files += 1 path = os.path.join(root, filename) if path in _EXCEPTIONS: continue with open(path) as f: text = f.read() for banned in _BANNED: if banned in text: print('Illegal use of "%s" in %s' % (banned, path)) errors += 1 assert errors == 0 # This check comes about from this issue: # https://github.com/grpc/grpc/issues/15381 # Basically, a change rendered this script useless and we did not realize it. # This dumb check ensures that this type of issue doesn't occur again. assert num_files > 300 # we definitely have more than 300 files
Add sanity linter to catch future use#!/usr/bin/env python # Copyright 2018 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import sys os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..')) # set of files that are allowed to use the raw GRPC_ARG_* types _EXCEPTIONS = set([ 'src/core/lib/channel/channel_args.cc', 'src/core/lib/channel/channel_args.h', ]) _BANNED = set([ "GRPC_ARG_POINTER", ]) errors = 0 num_files = 0 for root, dirs, files in os.walk('src/core'): for filename in files: num_files += 1 path = os.path.join(root, filename) if path in _EXCEPTIONS: continue with open(path) as f: text = f.read() for banned in _BANNED: if banned in text: print('Illegal use of "%s" in %s' % (banned, path)) errors += 1 assert errors == 0 # This check comes about from this issue: # https://github.com/grpc/grpc/issues/15381 # Basically, a change rendered this script useless and we did not realize it. # This dumb check ensures that this type of issue doesn't occur again. assert num_files > 300 # we definitely have more than 300 files
<commit_before><commit_msg>Add sanity linter to catch future use<commit_after>#!/usr/bin/env python # Copyright 2018 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import sys os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..')) # set of files that are allowed to use the raw GRPC_ARG_* types _EXCEPTIONS = set([ 'src/core/lib/channel/channel_args.cc', 'src/core/lib/channel/channel_args.h', ]) _BANNED = set([ "GRPC_ARG_POINTER", ]) errors = 0 num_files = 0 for root, dirs, files in os.walk('src/core'): for filename in files: num_files += 1 path = os.path.join(root, filename) if path in _EXCEPTIONS: continue with open(path) as f: text = f.read() for banned in _BANNED: if banned in text: print('Illegal use of "%s" in %s' % (banned, path)) errors += 1 assert errors == 0 # This check comes about from this issue: # https://github.com/grpc/grpc/issues/15381 # Basically, a change rendered this script useless and we did not realize it. # This dumb check ensures that this type of issue doesn't occur again. assert num_files > 300 # we definitely have more than 300 files
245879ce699b275edc3ee17e4cba1146241f25de
wizbit/xmlrpcdeferred.py
wizbit/xmlrpcdeferred.py
import gobject import xmlrpclib class XMLRPCDeferred (gobject.GObject): """Object representing the delayed result of an XML-RPC request. .is_ready: bool True when the result is received; False before then. .value : any Once is_ready=True, this attribute contains the result of the request. If this value is an instance of the xmlrpclib.Fault class, then some exception occurred during the request's processing. """ __gsignals__ = { 'ready': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ()) } def __init__ (self, transport, http): self.__gobject_init__() self.transport = transport self.http = http self.value = None self.is_ready = False sock = self.http._conn.sock self.src_id = gobject.io_add_watch(sock, gobject.IO_IN | gobject.IO_HUP, self.handle_io) def handle_io (self, source, condition): # Triggered when there's input available on the socket. # The assumption is that all the input will be available # relatively quickly. self.read() # Returning false prevents this callback from being triggered # again. We also remove the monitoring of this file # descriptor. gobject.source_remove(self.src_id) return False def read (self): errcode, errmsg, headers = self.http.getreply() if errcode != 200: raise ProtocolError( host + handler, errcode, errmsg, headers ) try: result = xmlrpclib.Transport._parse_response(self.transport, self.http.getfile(), None) except xmlrpclib.Fault, exc: result = exc self.value = result self.is_ready = True self.emit('ready') def __len__ (self): # XXX egregious hack!!! # The code in xmlrpclib.ServerProxy calls len() on the object # returned by the transport, and if it's of length 1 returns # the contained object. Therefore, this __len__ method # returns a completely fake length of 2. return 2 class GXMLRPCTransport (xmlrpclib.Transport): def request(self, host, handler, request_body, verbose=0): # issue XML-RPC request h = self.make_connection(host) if verbose: h.set_debuglevel(1) self.send_request(h, handler, request_body) self.send_host(h, host) self.send_user_agent(h) self.send_content(h, request_body) self.verbose = verbose return XMLRPCDeferred(self, h)
Add GLib mainllop transport for xmlrpcserver
Add GLib mainllop transport for xmlrpcserver
Python
lgpl-2.1
wizbit-archive/wizbit,wizbit-archive/wizbit
Add GLib mainllop transport for xmlrpcserver
import gobject import xmlrpclib class XMLRPCDeferred (gobject.GObject): """Object representing the delayed result of an XML-RPC request. .is_ready: bool True when the result is received; False before then. .value : any Once is_ready=True, this attribute contains the result of the request. If this value is an instance of the xmlrpclib.Fault class, then some exception occurred during the request's processing. """ __gsignals__ = { 'ready': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ()) } def __init__ (self, transport, http): self.__gobject_init__() self.transport = transport self.http = http self.value = None self.is_ready = False sock = self.http._conn.sock self.src_id = gobject.io_add_watch(sock, gobject.IO_IN | gobject.IO_HUP, self.handle_io) def handle_io (self, source, condition): # Triggered when there's input available on the socket. # The assumption is that all the input will be available # relatively quickly. self.read() # Returning false prevents this callback from being triggered # again. We also remove the monitoring of this file # descriptor. gobject.source_remove(self.src_id) return False def read (self): errcode, errmsg, headers = self.http.getreply() if errcode != 200: raise ProtocolError( host + handler, errcode, errmsg, headers ) try: result = xmlrpclib.Transport._parse_response(self.transport, self.http.getfile(), None) except xmlrpclib.Fault, exc: result = exc self.value = result self.is_ready = True self.emit('ready') def __len__ (self): # XXX egregious hack!!! # The code in xmlrpclib.ServerProxy calls len() on the object # returned by the transport, and if it's of length 1 returns # the contained object. Therefore, this __len__ method # returns a completely fake length of 2. return 2 class GXMLRPCTransport (xmlrpclib.Transport): def request(self, host, handler, request_body, verbose=0): # issue XML-RPC request h = self.make_connection(host) if verbose: h.set_debuglevel(1) self.send_request(h, handler, request_body) self.send_host(h, host) self.send_user_agent(h) self.send_content(h, request_body) self.verbose = verbose return XMLRPCDeferred(self, h)
<commit_before><commit_msg>Add GLib mainllop transport for xmlrpcserver<commit_after>
import gobject import xmlrpclib class XMLRPCDeferred (gobject.GObject): """Object representing the delayed result of an XML-RPC request. .is_ready: bool True when the result is received; False before then. .value : any Once is_ready=True, this attribute contains the result of the request. If this value is an instance of the xmlrpclib.Fault class, then some exception occurred during the request's processing. """ __gsignals__ = { 'ready': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ()) } def __init__ (self, transport, http): self.__gobject_init__() self.transport = transport self.http = http self.value = None self.is_ready = False sock = self.http._conn.sock self.src_id = gobject.io_add_watch(sock, gobject.IO_IN | gobject.IO_HUP, self.handle_io) def handle_io (self, source, condition): # Triggered when there's input available on the socket. # The assumption is that all the input will be available # relatively quickly. self.read() # Returning false prevents this callback from being triggered # again. We also remove the monitoring of this file # descriptor. gobject.source_remove(self.src_id) return False def read (self): errcode, errmsg, headers = self.http.getreply() if errcode != 200: raise ProtocolError( host + handler, errcode, errmsg, headers ) try: result = xmlrpclib.Transport._parse_response(self.transport, self.http.getfile(), None) except xmlrpclib.Fault, exc: result = exc self.value = result self.is_ready = True self.emit('ready') def __len__ (self): # XXX egregious hack!!! # The code in xmlrpclib.ServerProxy calls len() on the object # returned by the transport, and if it's of length 1 returns # the contained object. Therefore, this __len__ method # returns a completely fake length of 2. return 2 class GXMLRPCTransport (xmlrpclib.Transport): def request(self, host, handler, request_body, verbose=0): # issue XML-RPC request h = self.make_connection(host) if verbose: h.set_debuglevel(1) self.send_request(h, handler, request_body) self.send_host(h, host) self.send_user_agent(h) self.send_content(h, request_body) self.verbose = verbose return XMLRPCDeferred(self, h)
Add GLib mainllop transport for xmlrpcserverimport gobject import xmlrpclib class XMLRPCDeferred (gobject.GObject): """Object representing the delayed result of an XML-RPC request. .is_ready: bool True when the result is received; False before then. .value : any Once is_ready=True, this attribute contains the result of the request. If this value is an instance of the xmlrpclib.Fault class, then some exception occurred during the request's processing. """ __gsignals__ = { 'ready': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ()) } def __init__ (self, transport, http): self.__gobject_init__() self.transport = transport self.http = http self.value = None self.is_ready = False sock = self.http._conn.sock self.src_id = gobject.io_add_watch(sock, gobject.IO_IN | gobject.IO_HUP, self.handle_io) def handle_io (self, source, condition): # Triggered when there's input available on the socket. # The assumption is that all the input will be available # relatively quickly. self.read() # Returning false prevents this callback from being triggered # again. We also remove the monitoring of this file # descriptor. gobject.source_remove(self.src_id) return False def read (self): errcode, errmsg, headers = self.http.getreply() if errcode != 200: raise ProtocolError( host + handler, errcode, errmsg, headers ) try: result = xmlrpclib.Transport._parse_response(self.transport, self.http.getfile(), None) except xmlrpclib.Fault, exc: result = exc self.value = result self.is_ready = True self.emit('ready') def __len__ (self): # XXX egregious hack!!! # The code in xmlrpclib.ServerProxy calls len() on the object # returned by the transport, and if it's of length 1 returns # the contained object. Therefore, this __len__ method # returns a completely fake length of 2. return 2 class GXMLRPCTransport (xmlrpclib.Transport): def request(self, host, handler, request_body, verbose=0): # issue XML-RPC request h = self.make_connection(host) if verbose: h.set_debuglevel(1) self.send_request(h, handler, request_body) self.send_host(h, host) self.send_user_agent(h) self.send_content(h, request_body) self.verbose = verbose return XMLRPCDeferred(self, h)
<commit_before><commit_msg>Add GLib mainllop transport for xmlrpcserver<commit_after>import gobject import xmlrpclib class XMLRPCDeferred (gobject.GObject): """Object representing the delayed result of an XML-RPC request. .is_ready: bool True when the result is received; False before then. .value : any Once is_ready=True, this attribute contains the result of the request. If this value is an instance of the xmlrpclib.Fault class, then some exception occurred during the request's processing. """ __gsignals__ = { 'ready': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ()) } def __init__ (self, transport, http): self.__gobject_init__() self.transport = transport self.http = http self.value = None self.is_ready = False sock = self.http._conn.sock self.src_id = gobject.io_add_watch(sock, gobject.IO_IN | gobject.IO_HUP, self.handle_io) def handle_io (self, source, condition): # Triggered when there's input available on the socket. # The assumption is that all the input will be available # relatively quickly. self.read() # Returning false prevents this callback from being triggered # again. We also remove the monitoring of this file # descriptor. gobject.source_remove(self.src_id) return False def read (self): errcode, errmsg, headers = self.http.getreply() if errcode != 200: raise ProtocolError( host + handler, errcode, errmsg, headers ) try: result = xmlrpclib.Transport._parse_response(self.transport, self.http.getfile(), None) except xmlrpclib.Fault, exc: result = exc self.value = result self.is_ready = True self.emit('ready') def __len__ (self): # XXX egregious hack!!! # The code in xmlrpclib.ServerProxy calls len() on the object # returned by the transport, and if it's of length 1 returns # the contained object. Therefore, this __len__ method # returns a completely fake length of 2. return 2 class GXMLRPCTransport (xmlrpclib.Transport): def request(self, host, handler, request_body, verbose=0): # issue XML-RPC request h = self.make_connection(host) if verbose: h.set_debuglevel(1) self.send_request(h, handler, request_body) self.send_host(h, host) self.send_user_agent(h) self.send_content(h, request_body) self.verbose = verbose return XMLRPCDeferred(self, h)
0880d067f478ba6474e433e620a1e48e23ed9c34
wsgi/setup_nginxuwsgi.py
wsgi/setup_nginxuwsgi.py
import subprocess import multiprocessing import os bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py2/bin') config_dir = os.path.expanduser('~/FrameworkBenchmarks/config') NCPU = multiprocessing.cpu_count() def start(args): try: subprocess.check_call('sudo /usr/local/nginx/sbin/nginx -c ' + config_dir + '/nginx_uwsgi.conf', shell=True) # Run in the background, but keep stdout/stderr for easy debugging subprocess.Popen(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi.ini' + ' --processes ' + str(NCPU) + ' --wsgi hello:app', shell=True, cwd='wsgi') return 0 except subprocess.CalledProcessError: return 1 def stop(): subprocess.call('sudo /usr/local/nginx/sbin/nginx -s stop', shell=True) subprocess.call(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi_stop.ini', shell=True) return 0
Add nginx+uWSGI for 10% perf improvement over gunicorn
wsgi: Add nginx+uWSGI for 10% perf improvement over gunicorn nginx+uWSGI can be a killer performance combination as described here: http://lists.unbit.it/pipermail/uwsgi/2013-September/006431.html
Python
bsd-3-clause
hamiltont/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,sxend/FrameworkBenchmarks,leafo/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,valyala/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,grob/FrameworkBenchmarks,sxend/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,torhve/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,herloct/FrameworkBenchmarks,doom369/FrameworkBenchmarks,joshk/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,denkab/FrameworkBenchmarks,methane/FrameworkBenchmarks,jamming/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,methane/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,jamming/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,actframework/FrameworkBenchmarks,sgml/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,khellang/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,sgml/FrameworkBenchmarks,actframework/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,valyala/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,valyala/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,zloster/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,zloster/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,doom369/FrameworkBenchmarks,denkab/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,zloster/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,leafo/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,valyala/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,sgml/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,zloster/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Verber/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,leafo/FrameworkBenchmarks,methane/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,khellang/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,sxend/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,khellang/FrameworkBenchmarks,testn/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sxend/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,herloct/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,grob/FrameworkBenchmarks,methane/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,zapov/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,torhve/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,dmacd/FB-try1,mfirry/FrameworkBenchmarks,khellang/FrameworkBenchmarks,denkab/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,zloster/FrameworkBenchmarks,actframework/FrameworkBenchmarks,sxend/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,dmacd/FB-try1,sanjoydesk/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,grob/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,grob/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,torhve/FrameworkBenchmarks,Verber/FrameworkBenchmarks,actframework/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,zloster/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,herloct/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,zapov/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zloster/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,testn/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,dmacd/FB-try1,diablonhn/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,jamming/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,khellang/FrameworkBenchmarks,jamming/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jamming/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,leafo/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,methane/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,khellang/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,leafo/FrameworkBenchmarks,testn/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,torhve/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,actframework/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,torhve/FrameworkBenchmarks,valyala/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,herloct/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,zloster/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,khellang/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zapov/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,doom369/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,zapov/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Verber/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,grob/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,grob/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,denkab/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,denkab/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,sgml/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,doom369/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,sgml/FrameworkBenchmarks,doom369/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,sxend/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,jamming/FrameworkBenchmarks,zloster/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,valyala/FrameworkBenchmarks,methane/FrameworkBenchmarks,torhve/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,testn/FrameworkBenchmarks,sxend/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,denkab/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,zapov/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,dmacd/FB-try1,Verber/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,denkab/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,testn/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,zapov/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,sgml/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,valyala/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,methane/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,torhve/FrameworkBenchmarks,grob/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,herloct/FrameworkBenchmarks,zloster/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,khellang/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,testn/FrameworkBenchmarks,sxend/FrameworkBenchmarks,valyala/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,doom369/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,joshk/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,torhve/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,zloster/FrameworkBenchmarks,actframework/FrameworkBenchmarks,testn/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,sgml/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,testn/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,doom369/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,testn/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,joshk/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Verber/FrameworkBenchmarks,dmacd/FB-try1,k-r-g/FrameworkBenchmarks,methane/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,torhve/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,sgml/FrameworkBenchmarks,actframework/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,leafo/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,actframework/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,actframework/FrameworkBenchmarks,dmacd/FB-try1,ratpack/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,zapov/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,testn/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,joshk/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,zloster/FrameworkBenchmarks,grob/FrameworkBenchmarks,khellang/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,dmacd/FB-try1,valyala/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,Verber/FrameworkBenchmarks,methane/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,grob/FrameworkBenchmarks,jamming/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Verber/FrameworkBenchmarks,actframework/FrameworkBenchmarks,doom369/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,jamming/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Verber/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,dmacd/FB-try1,zdanek/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,khellang/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,leafo/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zloster/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,joshk/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,leafo/FrameworkBenchmarks,joshk/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,actframework/FrameworkBenchmarks,grob/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,leafo/FrameworkBenchmarks,jamming/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,joshk/FrameworkBenchmarks,valyala/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,testn/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,joshk/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,Verber/FrameworkBenchmarks,denkab/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,denkab/FrameworkBenchmarks,leafo/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,torhve/FrameworkBenchmarks,grob/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,dmacd/FB-try1,Rydgel/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,Verber/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,methane/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,torhve/FrameworkBenchmarks,leafo/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,zloster/FrameworkBenchmarks,zapov/FrameworkBenchmarks,zapov/FrameworkBenchmarks,doom369/FrameworkBenchmarks,dmacd/FB-try1,circlespainter/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,testn/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,methane/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Verber/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,denkab/FrameworkBenchmarks,denkab/FrameworkBenchmarks,sgml/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,zapov/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,sgml/FrameworkBenchmarks,valyala/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,zloster/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,joshk/FrameworkBenchmarks,sxend/FrameworkBenchmarks,grob/FrameworkBenchmarks,herloct/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,grob/FrameworkBenchmarks,doom369/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,denkab/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,methane/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,herloct/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,valyala/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jamming/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,doom369/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,testn/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,khellang/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sgml/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,doom369/FrameworkBenchmarks,jamming/FrameworkBenchmarks,khellang/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,doom369/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,khellang/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Verber/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,testn/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,sgml/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,dmacd/FB-try1,alubbe/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,sxend/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,valyala/FrameworkBenchmarks,Verber/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,herloct/FrameworkBenchmarks,denkab/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,zapov/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,jamming/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,sxend/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,sgml/FrameworkBenchmarks,joshk/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,leafo/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,zloster/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,joshk/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,actframework/FrameworkBenchmarks,denkab/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,torhve/FrameworkBenchmarks,doom369/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,joshk/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zapov/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,joshk/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,jamming/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,doom369/FrameworkBenchmarks,valyala/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,sgml/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,methane/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,sxend/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,zapov/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,grob/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,joshk/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,sxend/FrameworkBenchmarks,herloct/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,zapov/FrameworkBenchmarks,methane/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,Verber/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,khellang/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,dmacd/FB-try1,nkasvosve/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks
wsgi: Add nginx+uWSGI for 10% perf improvement over gunicorn nginx+uWSGI can be a killer performance combination as described here: http://lists.unbit.it/pipermail/uwsgi/2013-September/006431.html
import subprocess import multiprocessing import os bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py2/bin') config_dir = os.path.expanduser('~/FrameworkBenchmarks/config') NCPU = multiprocessing.cpu_count() def start(args): try: subprocess.check_call('sudo /usr/local/nginx/sbin/nginx -c ' + config_dir + '/nginx_uwsgi.conf', shell=True) # Run in the background, but keep stdout/stderr for easy debugging subprocess.Popen(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi.ini' + ' --processes ' + str(NCPU) + ' --wsgi hello:app', shell=True, cwd='wsgi') return 0 except subprocess.CalledProcessError: return 1 def stop(): subprocess.call('sudo /usr/local/nginx/sbin/nginx -s stop', shell=True) subprocess.call(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi_stop.ini', shell=True) return 0
<commit_before><commit_msg>wsgi: Add nginx+uWSGI for 10% perf improvement over gunicorn nginx+uWSGI can be a killer performance combination as described here: http://lists.unbit.it/pipermail/uwsgi/2013-September/006431.html<commit_after>
import subprocess import multiprocessing import os bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py2/bin') config_dir = os.path.expanduser('~/FrameworkBenchmarks/config') NCPU = multiprocessing.cpu_count() def start(args): try: subprocess.check_call('sudo /usr/local/nginx/sbin/nginx -c ' + config_dir + '/nginx_uwsgi.conf', shell=True) # Run in the background, but keep stdout/stderr for easy debugging subprocess.Popen(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi.ini' + ' --processes ' + str(NCPU) + ' --wsgi hello:app', shell=True, cwd='wsgi') return 0 except subprocess.CalledProcessError: return 1 def stop(): subprocess.call('sudo /usr/local/nginx/sbin/nginx -s stop', shell=True) subprocess.call(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi_stop.ini', shell=True) return 0
wsgi: Add nginx+uWSGI for 10% perf improvement over gunicorn nginx+uWSGI can be a killer performance combination as described here: http://lists.unbit.it/pipermail/uwsgi/2013-September/006431.htmlimport subprocess import multiprocessing import os bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py2/bin') config_dir = os.path.expanduser('~/FrameworkBenchmarks/config') NCPU = multiprocessing.cpu_count() def start(args): try: subprocess.check_call('sudo /usr/local/nginx/sbin/nginx -c ' + config_dir + '/nginx_uwsgi.conf', shell=True) # Run in the background, but keep stdout/stderr for easy debugging subprocess.Popen(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi.ini' + ' --processes ' + str(NCPU) + ' --wsgi hello:app', shell=True, cwd='wsgi') return 0 except subprocess.CalledProcessError: return 1 def stop(): subprocess.call('sudo /usr/local/nginx/sbin/nginx -s stop', shell=True) subprocess.call(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi_stop.ini', shell=True) return 0
<commit_before><commit_msg>wsgi: Add nginx+uWSGI for 10% perf improvement over gunicorn nginx+uWSGI can be a killer performance combination as described here: http://lists.unbit.it/pipermail/uwsgi/2013-September/006431.html<commit_after>import subprocess import multiprocessing import os bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py2/bin') config_dir = os.path.expanduser('~/FrameworkBenchmarks/config') NCPU = multiprocessing.cpu_count() def start(args): try: subprocess.check_call('sudo /usr/local/nginx/sbin/nginx -c ' + config_dir + '/nginx_uwsgi.conf', shell=True) # Run in the background, but keep stdout/stderr for easy debugging subprocess.Popen(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi.ini' + ' --processes ' + str(NCPU) + ' --wsgi hello:app', shell=True, cwd='wsgi') return 0 except subprocess.CalledProcessError: return 1 def stop(): subprocess.call('sudo /usr/local/nginx/sbin/nginx -s stop', shell=True) subprocess.call(bin_dir + '/uwsgi --ini ' + config_dir + '/uwsgi_stop.ini', shell=True) return 0
3709bcbd421d82f9404ab3b054989546d95c006f
sc2reader/scripts/sc2json.py
sc2reader/scripts/sc2json.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals, division import sc2reader from sc2reader.plugins.replay import toJSON def main(): import argparse parser = argparse.ArgumentParser(description="Prints replay data to a json string.") parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string") parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..") parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.") args = parser.parse_args() factory = sc2reader.factories.SC2Factory() factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent)) replay_json = factory.load_replay(args.path[0]) print(replay_json) if __name__ == '__main__': main()
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals, division import sc2reader from sc2reader.factories.plugins.replay import toJSON def main(): import argparse parser = argparse.ArgumentParser(description="Prints replay data to a json string.") parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string") parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..") parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.") args = parser.parse_args() factory = sc2reader.factories.SC2Factory() factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent)) replay_json = factory.load_replay(args.path[0]) print(replay_json) if __name__ == '__main__': main()
Fix another broken sc2reader.plugins reference.
Fix another broken sc2reader.plugins reference.
Python
mit
ggtracker/sc2reader,GraylinKim/sc2reader,vlaufer/sc2reader,StoicLoofah/sc2reader,GraylinKim/sc2reader,StoicLoofah/sc2reader,ggtracker/sc2reader,vlaufer/sc2reader
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals, division import sc2reader from sc2reader.plugins.replay import toJSON def main(): import argparse parser = argparse.ArgumentParser(description="Prints replay data to a json string.") parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string") parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..") parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.") args = parser.parse_args() factory = sc2reader.factories.SC2Factory() factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent)) replay_json = factory.load_replay(args.path[0]) print(replay_json) if __name__ == '__main__': main() Fix another broken sc2reader.plugins reference.
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals, division import sc2reader from sc2reader.factories.plugins.replay import toJSON def main(): import argparse parser = argparse.ArgumentParser(description="Prints replay data to a json string.") parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string") parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..") parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.") args = parser.parse_args() factory = sc2reader.factories.SC2Factory() factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent)) replay_json = factory.load_replay(args.path[0]) print(replay_json) if __name__ == '__main__': main()
<commit_before>#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals, division import sc2reader from sc2reader.plugins.replay import toJSON def main(): import argparse parser = argparse.ArgumentParser(description="Prints replay data to a json string.") parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string") parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..") parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.") args = parser.parse_args() factory = sc2reader.factories.SC2Factory() factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent)) replay_json = factory.load_replay(args.path[0]) print(replay_json) if __name__ == '__main__': main() <commit_msg>Fix another broken sc2reader.plugins reference.<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals, division import sc2reader from sc2reader.factories.plugins.replay import toJSON def main(): import argparse parser = argparse.ArgumentParser(description="Prints replay data to a json string.") parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string") parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..") parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.") args = parser.parse_args() factory = sc2reader.factories.SC2Factory() factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent)) replay_json = factory.load_replay(args.path[0]) print(replay_json) if __name__ == '__main__': main()
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals, division import sc2reader from sc2reader.plugins.replay import toJSON def main(): import argparse parser = argparse.ArgumentParser(description="Prints replay data to a json string.") parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string") parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..") parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.") args = parser.parse_args() factory = sc2reader.factories.SC2Factory() factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent)) replay_json = factory.load_replay(args.path[0]) print(replay_json) if __name__ == '__main__': main() Fix another broken sc2reader.plugins reference.#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals, division import sc2reader from sc2reader.factories.plugins.replay import toJSON def main(): import argparse parser = argparse.ArgumentParser(description="Prints replay data to a json string.") parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string") parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..") parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.") args = parser.parse_args() factory = sc2reader.factories.SC2Factory() factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent)) replay_json = factory.load_replay(args.path[0]) print(replay_json) if __name__ == '__main__': main()
<commit_before>#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals, division import sc2reader from sc2reader.plugins.replay import toJSON def main(): import argparse parser = argparse.ArgumentParser(description="Prints replay data to a json string.") parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string") parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..") parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.") args = parser.parse_args() factory = sc2reader.factories.SC2Factory() factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent)) replay_json = factory.load_replay(args.path[0]) print(replay_json) if __name__ == '__main__': main() <commit_msg>Fix another broken sc2reader.plugins reference.<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals, division import sc2reader from sc2reader.factories.plugins.replay import toJSON def main(): import argparse parser = argparse.ArgumentParser(description="Prints replay data to a json string.") parser.add_argument('--indent', '-i', type=int, default=None, help="The per-line indent to use when printing a human readable json string") parser.add_argument('--encoding', '-e', type=str, default='UTF-8', help="The character encoding use..") parser.add_argument('path', metavar='path', type=str, nargs=1, help="Path to the replay to serialize.") args = parser.parse_args() factory = sc2reader.factories.SC2Factory() factory.register_plugin("Replay", toJSON(encoding=args.encoding, indent=args.indent)) replay_json = factory.load_replay(args.path[0]) print(replay_json) if __name__ == '__main__': main()
c3c559f893e31e728a429cf446039781cea1f25d
tests/test_tensorflow_magics.py
tests/test_tensorflow_magics.py
# Copyright 2019 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the `%tensorflow_version` magic.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import unittest from google.colab import _tensorflow_magics class TensorflowMagicsTest(unittest.TestCase): @classmethod def setUpClass(cls): super(TensorflowMagicsTest, cls).setUpClass() cls._original_version = _tensorflow_magics._tf_version cls._original_sys_path = sys.path[:] def setUp(self): super(TensorflowMagicsTest, self).setUp() _tensorflow_magics._tf_version = self._original_version sys.path[:] = self._original_sys_path def test_switch_1x_to_2x(self): _tensorflow_magics._tensorflow_version("2.x") tf2_path = _tensorflow_magics._available_versions["2.x"] self.assertEqual(sys.path[1:], self._original_sys_path) self.assertTrue(sys.path[0].startswith(tf2_path), (sys.path[0], tf2_path)) def test_switch_back(self): _tensorflow_magics._tensorflow_version("2.x") _tensorflow_magics._tensorflow_version("1.x") self.assertEqual(sys.path, self._original_sys_path) if __name__ == "__main__": unittest.main()
Add unit tests for `%tensorflow_version`
Add unit tests for `%tensorflow_version` In preparation for changes to this file that will additionally configure the `PATH` and `PYTHONPATH` environment variables, which is required for proper TensorBoard support. PiperOrigin-RevId: 259611678
Python
apache-2.0
googlecolab/colabtools,googlecolab/colabtools
Add unit tests for `%tensorflow_version` In preparation for changes to this file that will additionally configure the `PATH` and `PYTHONPATH` environment variables, which is required for proper TensorBoard support. PiperOrigin-RevId: 259611678
# Copyright 2019 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the `%tensorflow_version` magic.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import unittest from google.colab import _tensorflow_magics class TensorflowMagicsTest(unittest.TestCase): @classmethod def setUpClass(cls): super(TensorflowMagicsTest, cls).setUpClass() cls._original_version = _tensorflow_magics._tf_version cls._original_sys_path = sys.path[:] def setUp(self): super(TensorflowMagicsTest, self).setUp() _tensorflow_magics._tf_version = self._original_version sys.path[:] = self._original_sys_path def test_switch_1x_to_2x(self): _tensorflow_magics._tensorflow_version("2.x") tf2_path = _tensorflow_magics._available_versions["2.x"] self.assertEqual(sys.path[1:], self._original_sys_path) self.assertTrue(sys.path[0].startswith(tf2_path), (sys.path[0], tf2_path)) def test_switch_back(self): _tensorflow_magics._tensorflow_version("2.x") _tensorflow_magics._tensorflow_version("1.x") self.assertEqual(sys.path, self._original_sys_path) if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Add unit tests for `%tensorflow_version` In preparation for changes to this file that will additionally configure the `PATH` and `PYTHONPATH` environment variables, which is required for proper TensorBoard support. PiperOrigin-RevId: 259611678<commit_after>
# Copyright 2019 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the `%tensorflow_version` magic.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import unittest from google.colab import _tensorflow_magics class TensorflowMagicsTest(unittest.TestCase): @classmethod def setUpClass(cls): super(TensorflowMagicsTest, cls).setUpClass() cls._original_version = _tensorflow_magics._tf_version cls._original_sys_path = sys.path[:] def setUp(self): super(TensorflowMagicsTest, self).setUp() _tensorflow_magics._tf_version = self._original_version sys.path[:] = self._original_sys_path def test_switch_1x_to_2x(self): _tensorflow_magics._tensorflow_version("2.x") tf2_path = _tensorflow_magics._available_versions["2.x"] self.assertEqual(sys.path[1:], self._original_sys_path) self.assertTrue(sys.path[0].startswith(tf2_path), (sys.path[0], tf2_path)) def test_switch_back(self): _tensorflow_magics._tensorflow_version("2.x") _tensorflow_magics._tensorflow_version("1.x") self.assertEqual(sys.path, self._original_sys_path) if __name__ == "__main__": unittest.main()
Add unit tests for `%tensorflow_version` In preparation for changes to this file that will additionally configure the `PATH` and `PYTHONPATH` environment variables, which is required for proper TensorBoard support. PiperOrigin-RevId: 259611678# Copyright 2019 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the `%tensorflow_version` magic.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import unittest from google.colab import _tensorflow_magics class TensorflowMagicsTest(unittest.TestCase): @classmethod def setUpClass(cls): super(TensorflowMagicsTest, cls).setUpClass() cls._original_version = _tensorflow_magics._tf_version cls._original_sys_path = sys.path[:] def setUp(self): super(TensorflowMagicsTest, self).setUp() _tensorflow_magics._tf_version = self._original_version sys.path[:] = self._original_sys_path def test_switch_1x_to_2x(self): _tensorflow_magics._tensorflow_version("2.x") tf2_path = _tensorflow_magics._available_versions["2.x"] self.assertEqual(sys.path[1:], self._original_sys_path) self.assertTrue(sys.path[0].startswith(tf2_path), (sys.path[0], tf2_path)) def test_switch_back(self): _tensorflow_magics._tensorflow_version("2.x") _tensorflow_magics._tensorflow_version("1.x") self.assertEqual(sys.path, self._original_sys_path) if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Add unit tests for `%tensorflow_version` In preparation for changes to this file that will additionally configure the `PATH` and `PYTHONPATH` environment variables, which is required for proper TensorBoard support. PiperOrigin-RevId: 259611678<commit_after># Copyright 2019 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the `%tensorflow_version` magic.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import unittest from google.colab import _tensorflow_magics class TensorflowMagicsTest(unittest.TestCase): @classmethod def setUpClass(cls): super(TensorflowMagicsTest, cls).setUpClass() cls._original_version = _tensorflow_magics._tf_version cls._original_sys_path = sys.path[:] def setUp(self): super(TensorflowMagicsTest, self).setUp() _tensorflow_magics._tf_version = self._original_version sys.path[:] = self._original_sys_path def test_switch_1x_to_2x(self): _tensorflow_magics._tensorflow_version("2.x") tf2_path = _tensorflow_magics._available_versions["2.x"] self.assertEqual(sys.path[1:], self._original_sys_path) self.assertTrue(sys.path[0].startswith(tf2_path), (sys.path[0], tf2_path)) def test_switch_back(self): _tensorflow_magics._tensorflow_version("2.x") _tensorflow_magics._tensorflow_version("1.x") self.assertEqual(sys.path, self._original_sys_path) if __name__ == "__main__": unittest.main()
be01980afe4b1dbd5a1d5b07651cd7a54c771d01
tests/modules/test_disk.py
tests/modules/test_disk.py
# pylint: disable=C0103,C0111 import mock import unittest import tests.mocks as mocks from bumblebee.input import LEFT_MOUSE from bumblebee.modules.disk import Module class MockVFS(object): def __init__(self, perc): self.f_blocks = 1024*1024 self.f_frsize = 1 self.f_bavail = self.f_blocks - self.f_blocks*(perc/100.0) class TestDiskModule(unittest.TestCase): def setUp(self): mocks.setup_test(self, Module) self._os = mock.patch("bumblebee.modules.disk.os") self.os = self._os.start() self.config.set("disk.path", "somepath") def tearDown(self): self._os.stop() mocks.teardown_test(self) def test_leftclick(self): module = Module(engine=self.engine, config={"config":self.config}) mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=module) self.popen.assert_call("nautilus {}".format(self.module.parameter("path"))) def test_warning(self): self.config.set("disk.critical", "80") self.config.set("disk.warning", "70") self.os.statvfs.return_value = MockVFS(75.0) self.module.update_all() self.assertTrue("warning" in self.module.state(self.anyWidget)) def test_critical(self): self.config.set("disk.critical", "80") self.config.set("disk.warning", "70") self.os.statvfs.return_value = MockVFS(85.0) self.module.update_all() self.assertTrue("critical" in self.module.state(self.anyWidget)) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
Add unit tests for disk module
[tests] Add unit tests for disk module
Python
mit
tobi-wan-kenobi/bumblebee-status,tobi-wan-kenobi/bumblebee-status
[tests] Add unit tests for disk module
# pylint: disable=C0103,C0111 import mock import unittest import tests.mocks as mocks from bumblebee.input import LEFT_MOUSE from bumblebee.modules.disk import Module class MockVFS(object): def __init__(self, perc): self.f_blocks = 1024*1024 self.f_frsize = 1 self.f_bavail = self.f_blocks - self.f_blocks*(perc/100.0) class TestDiskModule(unittest.TestCase): def setUp(self): mocks.setup_test(self, Module) self._os = mock.patch("bumblebee.modules.disk.os") self.os = self._os.start() self.config.set("disk.path", "somepath") def tearDown(self): self._os.stop() mocks.teardown_test(self) def test_leftclick(self): module = Module(engine=self.engine, config={"config":self.config}) mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=module) self.popen.assert_call("nautilus {}".format(self.module.parameter("path"))) def test_warning(self): self.config.set("disk.critical", "80") self.config.set("disk.warning", "70") self.os.statvfs.return_value = MockVFS(75.0) self.module.update_all() self.assertTrue("warning" in self.module.state(self.anyWidget)) def test_critical(self): self.config.set("disk.critical", "80") self.config.set("disk.warning", "70") self.os.statvfs.return_value = MockVFS(85.0) self.module.update_all() self.assertTrue("critical" in self.module.state(self.anyWidget)) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
<commit_before><commit_msg>[tests] Add unit tests for disk module<commit_after>
# pylint: disable=C0103,C0111 import mock import unittest import tests.mocks as mocks from bumblebee.input import LEFT_MOUSE from bumblebee.modules.disk import Module class MockVFS(object): def __init__(self, perc): self.f_blocks = 1024*1024 self.f_frsize = 1 self.f_bavail = self.f_blocks - self.f_blocks*(perc/100.0) class TestDiskModule(unittest.TestCase): def setUp(self): mocks.setup_test(self, Module) self._os = mock.patch("bumblebee.modules.disk.os") self.os = self._os.start() self.config.set("disk.path", "somepath") def tearDown(self): self._os.stop() mocks.teardown_test(self) def test_leftclick(self): module = Module(engine=self.engine, config={"config":self.config}) mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=module) self.popen.assert_call("nautilus {}".format(self.module.parameter("path"))) def test_warning(self): self.config.set("disk.critical", "80") self.config.set("disk.warning", "70") self.os.statvfs.return_value = MockVFS(75.0) self.module.update_all() self.assertTrue("warning" in self.module.state(self.anyWidget)) def test_critical(self): self.config.set("disk.critical", "80") self.config.set("disk.warning", "70") self.os.statvfs.return_value = MockVFS(85.0) self.module.update_all() self.assertTrue("critical" in self.module.state(self.anyWidget)) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
[tests] Add unit tests for disk module# pylint: disable=C0103,C0111 import mock import unittest import tests.mocks as mocks from bumblebee.input import LEFT_MOUSE from bumblebee.modules.disk import Module class MockVFS(object): def __init__(self, perc): self.f_blocks = 1024*1024 self.f_frsize = 1 self.f_bavail = self.f_blocks - self.f_blocks*(perc/100.0) class TestDiskModule(unittest.TestCase): def setUp(self): mocks.setup_test(self, Module) self._os = mock.patch("bumblebee.modules.disk.os") self.os = self._os.start() self.config.set("disk.path", "somepath") def tearDown(self): self._os.stop() mocks.teardown_test(self) def test_leftclick(self): module = Module(engine=self.engine, config={"config":self.config}) mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=module) self.popen.assert_call("nautilus {}".format(self.module.parameter("path"))) def test_warning(self): self.config.set("disk.critical", "80") self.config.set("disk.warning", "70") self.os.statvfs.return_value = MockVFS(75.0) self.module.update_all() self.assertTrue("warning" in self.module.state(self.anyWidget)) def test_critical(self): self.config.set("disk.critical", "80") self.config.set("disk.warning", "70") self.os.statvfs.return_value = MockVFS(85.0) self.module.update_all() self.assertTrue("critical" in self.module.state(self.anyWidget)) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
<commit_before><commit_msg>[tests] Add unit tests for disk module<commit_after># pylint: disable=C0103,C0111 import mock import unittest import tests.mocks as mocks from bumblebee.input import LEFT_MOUSE from bumblebee.modules.disk import Module class MockVFS(object): def __init__(self, perc): self.f_blocks = 1024*1024 self.f_frsize = 1 self.f_bavail = self.f_blocks - self.f_blocks*(perc/100.0) class TestDiskModule(unittest.TestCase): def setUp(self): mocks.setup_test(self, Module) self._os = mock.patch("bumblebee.modules.disk.os") self.os = self._os.start() self.config.set("disk.path", "somepath") def tearDown(self): self._os.stop() mocks.teardown_test(self) def test_leftclick(self): module = Module(engine=self.engine, config={"config":self.config}) mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=module) self.popen.assert_call("nautilus {}".format(self.module.parameter("path"))) def test_warning(self): self.config.set("disk.critical", "80") self.config.set("disk.warning", "70") self.os.statvfs.return_value = MockVFS(75.0) self.module.update_all() self.assertTrue("warning" in self.module.state(self.anyWidget)) def test_critical(self): self.config.set("disk.critical", "80") self.config.set("disk.warning", "70") self.os.statvfs.return_value = MockVFS(85.0) self.module.update_all() self.assertTrue("critical" in self.module.state(self.anyWidget)) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
76468926c1efe4d18477a70d767f91d4c6e38768
tests/test_dottedcircle.py
tests/test_dottedcircle.py
import uharfbuzz as hb import gflanguages import pytest langs = gflanguages.LoadLanguages() @pytest.fixture def hb_font(): # Persuade Harfbuzz we have a font that supports # every codepoint. face = hb.Face(b"") font = hb.Font(face) funcs = hb.FontFuncs.create() funcs.set_nominal_glyph_func((lambda font,cp,data: cp), None) font.funcs = funcs return font @pytest.mark.parametrize("lang", langs.keys()) def test_dotted_circle(lang, hb_font): item = langs[lang] samples = [x for (_,x) in item.sample_text.ListFields()] for sample in sorted(samples, key=lambda x:len(x)): buf = hb.Buffer() buf.add_str(sample) buf.guess_segment_properties() hb.shape(hb_font, buf) ok = not any(info.codepoint == 0x25CC for info in buf.glyph_infos) assert ok, f"Dotted circle found in {sample} ({lang})"
Add test for dotted circles in sample text
Add test for dotted circles in sample text
Python
apache-2.0
googlefonts/lang
Add test for dotted circles in sample text
import uharfbuzz as hb import gflanguages import pytest langs = gflanguages.LoadLanguages() @pytest.fixture def hb_font(): # Persuade Harfbuzz we have a font that supports # every codepoint. face = hb.Face(b"") font = hb.Font(face) funcs = hb.FontFuncs.create() funcs.set_nominal_glyph_func((lambda font,cp,data: cp), None) font.funcs = funcs return font @pytest.mark.parametrize("lang", langs.keys()) def test_dotted_circle(lang, hb_font): item = langs[lang] samples = [x for (_,x) in item.sample_text.ListFields()] for sample in sorted(samples, key=lambda x:len(x)): buf = hb.Buffer() buf.add_str(sample) buf.guess_segment_properties() hb.shape(hb_font, buf) ok = not any(info.codepoint == 0x25CC for info in buf.glyph_infos) assert ok, f"Dotted circle found in {sample} ({lang})"
<commit_before><commit_msg>Add test for dotted circles in sample text<commit_after>
import uharfbuzz as hb import gflanguages import pytest langs = gflanguages.LoadLanguages() @pytest.fixture def hb_font(): # Persuade Harfbuzz we have a font that supports # every codepoint. face = hb.Face(b"") font = hb.Font(face) funcs = hb.FontFuncs.create() funcs.set_nominal_glyph_func((lambda font,cp,data: cp), None) font.funcs = funcs return font @pytest.mark.parametrize("lang", langs.keys()) def test_dotted_circle(lang, hb_font): item = langs[lang] samples = [x for (_,x) in item.sample_text.ListFields()] for sample in sorted(samples, key=lambda x:len(x)): buf = hb.Buffer() buf.add_str(sample) buf.guess_segment_properties() hb.shape(hb_font, buf) ok = not any(info.codepoint == 0x25CC for info in buf.glyph_infos) assert ok, f"Dotted circle found in {sample} ({lang})"
Add test for dotted circles in sample textimport uharfbuzz as hb import gflanguages import pytest langs = gflanguages.LoadLanguages() @pytest.fixture def hb_font(): # Persuade Harfbuzz we have a font that supports # every codepoint. face = hb.Face(b"") font = hb.Font(face) funcs = hb.FontFuncs.create() funcs.set_nominal_glyph_func((lambda font,cp,data: cp), None) font.funcs = funcs return font @pytest.mark.parametrize("lang", langs.keys()) def test_dotted_circle(lang, hb_font): item = langs[lang] samples = [x for (_,x) in item.sample_text.ListFields()] for sample in sorted(samples, key=lambda x:len(x)): buf = hb.Buffer() buf.add_str(sample) buf.guess_segment_properties() hb.shape(hb_font, buf) ok = not any(info.codepoint == 0x25CC for info in buf.glyph_infos) assert ok, f"Dotted circle found in {sample} ({lang})"
<commit_before><commit_msg>Add test for dotted circles in sample text<commit_after>import uharfbuzz as hb import gflanguages import pytest langs = gflanguages.LoadLanguages() @pytest.fixture def hb_font(): # Persuade Harfbuzz we have a font that supports # every codepoint. face = hb.Face(b"") font = hb.Font(face) funcs = hb.FontFuncs.create() funcs.set_nominal_glyph_func((lambda font,cp,data: cp), None) font.funcs = funcs return font @pytest.mark.parametrize("lang", langs.keys()) def test_dotted_circle(lang, hb_font): item = langs[lang] samples = [x for (_,x) in item.sample_text.ListFields()] for sample in sorted(samples, key=lambda x:len(x)): buf = hb.Buffer() buf.add_str(sample) buf.guess_segment_properties() hb.shape(hb_font, buf) ok = not any(info.codepoint == 0x25CC for info in buf.glyph_infos) assert ok, f"Dotted circle found in {sample} ({lang})"
17f71bfb81393241759e38fb9dce01561aeca3d5
tests/test_product_tags.py
tests/test_product_tags.py
from mock import Mock from saleor.product.templatetags.product_images import get_thumbnail, product_first_image def test_get_thumbnail(): instance = Mock() cropped_value = Mock(url='crop.jpg') thumbnail_value = Mock(url='thumb.jpg') instance.crop = {'10x10': cropped_value} instance.thumbnail = {'10x10': thumbnail_value} cropped = get_thumbnail(instance, '10x10', method='crop') assert cropped == cropped_value.url thumb = get_thumbnail(instance, '10x10', method='thumbnail') assert thumb == thumbnail_value.url def test_get_thumbnail_no_instance(): output = get_thumbnail(instance=None, size='10x10', method='crop') assert output == '/static/images/product-image-placeholder.png' def test_product_first_image(): mock_product_image = Mock() mock_product_image.image = Mock() mock_product_image.image.crop = {'10x10': Mock(url='crop.jpg')} mock_queryset = Mock() mock_queryset.all.return_value = [mock_product_image] mock_product = Mock(images=mock_queryset) out = product_first_image(mock_product, '10x10', method='crop') assert out == 'crop.jpg'
Add tests to product tags
Add tests to product tags
Python
bsd-3-clause
car3oon/saleor,itbabu/saleor,UITools/saleor,KenMutemi/saleor,maferelo/saleor,KenMutemi/saleor,jreigel/saleor,mociepka/saleor,mociepka/saleor,itbabu/saleor,tfroehlich82/saleor,HyperManTT/ECommerceSaleor,UITools/saleor,HyperManTT/ECommerceSaleor,jreigel/saleor,UITools/saleor,UITools/saleor,tfroehlich82/saleor,itbabu/saleor,UITools/saleor,tfroehlich82/saleor,car3oon/saleor,KenMutemi/saleor,maferelo/saleor,HyperManTT/ECommerceSaleor,mociepka/saleor,car3oon/saleor,jreigel/saleor,maferelo/saleor
Add tests to product tags
from mock import Mock from saleor.product.templatetags.product_images import get_thumbnail, product_first_image def test_get_thumbnail(): instance = Mock() cropped_value = Mock(url='crop.jpg') thumbnail_value = Mock(url='thumb.jpg') instance.crop = {'10x10': cropped_value} instance.thumbnail = {'10x10': thumbnail_value} cropped = get_thumbnail(instance, '10x10', method='crop') assert cropped == cropped_value.url thumb = get_thumbnail(instance, '10x10', method='thumbnail') assert thumb == thumbnail_value.url def test_get_thumbnail_no_instance(): output = get_thumbnail(instance=None, size='10x10', method='crop') assert output == '/static/images/product-image-placeholder.png' def test_product_first_image(): mock_product_image = Mock() mock_product_image.image = Mock() mock_product_image.image.crop = {'10x10': Mock(url='crop.jpg')} mock_queryset = Mock() mock_queryset.all.return_value = [mock_product_image] mock_product = Mock(images=mock_queryset) out = product_first_image(mock_product, '10x10', method='crop') assert out == 'crop.jpg'
<commit_before><commit_msg>Add tests to product tags<commit_after>
from mock import Mock from saleor.product.templatetags.product_images import get_thumbnail, product_first_image def test_get_thumbnail(): instance = Mock() cropped_value = Mock(url='crop.jpg') thumbnail_value = Mock(url='thumb.jpg') instance.crop = {'10x10': cropped_value} instance.thumbnail = {'10x10': thumbnail_value} cropped = get_thumbnail(instance, '10x10', method='crop') assert cropped == cropped_value.url thumb = get_thumbnail(instance, '10x10', method='thumbnail') assert thumb == thumbnail_value.url def test_get_thumbnail_no_instance(): output = get_thumbnail(instance=None, size='10x10', method='crop') assert output == '/static/images/product-image-placeholder.png' def test_product_first_image(): mock_product_image = Mock() mock_product_image.image = Mock() mock_product_image.image.crop = {'10x10': Mock(url='crop.jpg')} mock_queryset = Mock() mock_queryset.all.return_value = [mock_product_image] mock_product = Mock(images=mock_queryset) out = product_first_image(mock_product, '10x10', method='crop') assert out == 'crop.jpg'
Add tests to product tagsfrom mock import Mock from saleor.product.templatetags.product_images import get_thumbnail, product_first_image def test_get_thumbnail(): instance = Mock() cropped_value = Mock(url='crop.jpg') thumbnail_value = Mock(url='thumb.jpg') instance.crop = {'10x10': cropped_value} instance.thumbnail = {'10x10': thumbnail_value} cropped = get_thumbnail(instance, '10x10', method='crop') assert cropped == cropped_value.url thumb = get_thumbnail(instance, '10x10', method='thumbnail') assert thumb == thumbnail_value.url def test_get_thumbnail_no_instance(): output = get_thumbnail(instance=None, size='10x10', method='crop') assert output == '/static/images/product-image-placeholder.png' def test_product_first_image(): mock_product_image = Mock() mock_product_image.image = Mock() mock_product_image.image.crop = {'10x10': Mock(url='crop.jpg')} mock_queryset = Mock() mock_queryset.all.return_value = [mock_product_image] mock_product = Mock(images=mock_queryset) out = product_first_image(mock_product, '10x10', method='crop') assert out == 'crop.jpg'
<commit_before><commit_msg>Add tests to product tags<commit_after>from mock import Mock from saleor.product.templatetags.product_images import get_thumbnail, product_first_image def test_get_thumbnail(): instance = Mock() cropped_value = Mock(url='crop.jpg') thumbnail_value = Mock(url='thumb.jpg') instance.crop = {'10x10': cropped_value} instance.thumbnail = {'10x10': thumbnail_value} cropped = get_thumbnail(instance, '10x10', method='crop') assert cropped == cropped_value.url thumb = get_thumbnail(instance, '10x10', method='thumbnail') assert thumb == thumbnail_value.url def test_get_thumbnail_no_instance(): output = get_thumbnail(instance=None, size='10x10', method='crop') assert output == '/static/images/product-image-placeholder.png' def test_product_first_image(): mock_product_image = Mock() mock_product_image.image = Mock() mock_product_image.image.crop = {'10x10': Mock(url='crop.jpg')} mock_queryset = Mock() mock_queryset.all.return_value = [mock_product_image] mock_product = Mock(images=mock_queryset) out = product_first_image(mock_product, '10x10', method='crop') assert out == 'crop.jpg'
e455d459590a4f2b16b9a9360b6e33640f5ec7bf
python/allcheck.py
python/allcheck.py
#!/usr/bin/env python import sys import re import glob import phonenumbers INTERNAL_FILES = ['phonenumbers/util.py', 'phonenumbers/re_util.py', 'phonenumbers/unicode_util.py'] CLASS_RE = re.compile(r"^class +([A-Za-z][_A-Za-z0-9]+)[ \(:]") FUNCTION_RE = re.compile("^def +([A-Za-z][_A-Za-z0-9]+)[ \(]") CONSTANT_RE = re.compile("^([A-Z][_A-Z0-9]+) *= *") grepped_all = set() for filename in glob.glob('phonenumbers/*.py'): if filename in INTERNAL_FILES: continue with file(filename, "r") as infile: for line in infile: m = CLASS_RE.match(line) if m: grepped_all.add(m.group(1)) m = FUNCTION_RE.match(line) if m: grepped_all.add(m.group(1)) m = CONSTANT_RE.match(line) if m: grepped_all.add(m.group(1)) code_all = set(phonenumbers.__all__) code_not_grepped = (code_all - grepped_all) grepped_not_code = (grepped_all - code_all) if len(code_not_grepped) > 0: print >> sys.stderr, "Found the following in __all__ but not in grepped code:" for identifier in code_not_grepped: print >> sys.stderr, " %s" % identifier if len(grepped_not_code) > 0: print >> sys.stderr, "Found the following in grepped code but not in__all__:" for identifier in grepped_not_code: print >> sys.stderr, " %s" % identifier
Add a script to check that __all__ in __init__.py is correct
Add a script to check that __all__ in __init__.py is correct
Python
apache-2.0
daviddrysdale/python-phonenumbers,roubert/python-phonenumbers,daodaoliang/python-phonenumbers,titansgroup/python-phonenumbers,dongguangming/python-phonenumbers,daviddrysdale/python-phonenumbers,agentr13/python-phonenumbers,gencer/python-phonenumbers,SergiuMir/python-phonenumbers,shikigit/python-phonenumbers,daviddrysdale/python-phonenumbers
Add a script to check that __all__ in __init__.py is correct
#!/usr/bin/env python import sys import re import glob import phonenumbers INTERNAL_FILES = ['phonenumbers/util.py', 'phonenumbers/re_util.py', 'phonenumbers/unicode_util.py'] CLASS_RE = re.compile(r"^class +([A-Za-z][_A-Za-z0-9]+)[ \(:]") FUNCTION_RE = re.compile("^def +([A-Za-z][_A-Za-z0-9]+)[ \(]") CONSTANT_RE = re.compile("^([A-Z][_A-Z0-9]+) *= *") grepped_all = set() for filename in glob.glob('phonenumbers/*.py'): if filename in INTERNAL_FILES: continue with file(filename, "r") as infile: for line in infile: m = CLASS_RE.match(line) if m: grepped_all.add(m.group(1)) m = FUNCTION_RE.match(line) if m: grepped_all.add(m.group(1)) m = CONSTANT_RE.match(line) if m: grepped_all.add(m.group(1)) code_all = set(phonenumbers.__all__) code_not_grepped = (code_all - grepped_all) grepped_not_code = (grepped_all - code_all) if len(code_not_grepped) > 0: print >> sys.stderr, "Found the following in __all__ but not in grepped code:" for identifier in code_not_grepped: print >> sys.stderr, " %s" % identifier if len(grepped_not_code) > 0: print >> sys.stderr, "Found the following in grepped code but not in__all__:" for identifier in grepped_not_code: print >> sys.stderr, " %s" % identifier
<commit_before><commit_msg>Add a script to check that __all__ in __init__.py is correct<commit_after>
#!/usr/bin/env python import sys import re import glob import phonenumbers INTERNAL_FILES = ['phonenumbers/util.py', 'phonenumbers/re_util.py', 'phonenumbers/unicode_util.py'] CLASS_RE = re.compile(r"^class +([A-Za-z][_A-Za-z0-9]+)[ \(:]") FUNCTION_RE = re.compile("^def +([A-Za-z][_A-Za-z0-9]+)[ \(]") CONSTANT_RE = re.compile("^([A-Z][_A-Z0-9]+) *= *") grepped_all = set() for filename in glob.glob('phonenumbers/*.py'): if filename in INTERNAL_FILES: continue with file(filename, "r") as infile: for line in infile: m = CLASS_RE.match(line) if m: grepped_all.add(m.group(1)) m = FUNCTION_RE.match(line) if m: grepped_all.add(m.group(1)) m = CONSTANT_RE.match(line) if m: grepped_all.add(m.group(1)) code_all = set(phonenumbers.__all__) code_not_grepped = (code_all - grepped_all) grepped_not_code = (grepped_all - code_all) if len(code_not_grepped) > 0: print >> sys.stderr, "Found the following in __all__ but not in grepped code:" for identifier in code_not_grepped: print >> sys.stderr, " %s" % identifier if len(grepped_not_code) > 0: print >> sys.stderr, "Found the following in grepped code but not in__all__:" for identifier in grepped_not_code: print >> sys.stderr, " %s" % identifier
Add a script to check that __all__ in __init__.py is correct#!/usr/bin/env python import sys import re import glob import phonenumbers INTERNAL_FILES = ['phonenumbers/util.py', 'phonenumbers/re_util.py', 'phonenumbers/unicode_util.py'] CLASS_RE = re.compile(r"^class +([A-Za-z][_A-Za-z0-9]+)[ \(:]") FUNCTION_RE = re.compile("^def +([A-Za-z][_A-Za-z0-9]+)[ \(]") CONSTANT_RE = re.compile("^([A-Z][_A-Z0-9]+) *= *") grepped_all = set() for filename in glob.glob('phonenumbers/*.py'): if filename in INTERNAL_FILES: continue with file(filename, "r") as infile: for line in infile: m = CLASS_RE.match(line) if m: grepped_all.add(m.group(1)) m = FUNCTION_RE.match(line) if m: grepped_all.add(m.group(1)) m = CONSTANT_RE.match(line) if m: grepped_all.add(m.group(1)) code_all = set(phonenumbers.__all__) code_not_grepped = (code_all - grepped_all) grepped_not_code = (grepped_all - code_all) if len(code_not_grepped) > 0: print >> sys.stderr, "Found the following in __all__ but not in grepped code:" for identifier in code_not_grepped: print >> sys.stderr, " %s" % identifier if len(grepped_not_code) > 0: print >> sys.stderr, "Found the following in grepped code but not in__all__:" for identifier in grepped_not_code: print >> sys.stderr, " %s" % identifier
<commit_before><commit_msg>Add a script to check that __all__ in __init__.py is correct<commit_after>#!/usr/bin/env python import sys import re import glob import phonenumbers INTERNAL_FILES = ['phonenumbers/util.py', 'phonenumbers/re_util.py', 'phonenumbers/unicode_util.py'] CLASS_RE = re.compile(r"^class +([A-Za-z][_A-Za-z0-9]+)[ \(:]") FUNCTION_RE = re.compile("^def +([A-Za-z][_A-Za-z0-9]+)[ \(]") CONSTANT_RE = re.compile("^([A-Z][_A-Z0-9]+) *= *") grepped_all = set() for filename in glob.glob('phonenumbers/*.py'): if filename in INTERNAL_FILES: continue with file(filename, "r") as infile: for line in infile: m = CLASS_RE.match(line) if m: grepped_all.add(m.group(1)) m = FUNCTION_RE.match(line) if m: grepped_all.add(m.group(1)) m = CONSTANT_RE.match(line) if m: grepped_all.add(m.group(1)) code_all = set(phonenumbers.__all__) code_not_grepped = (code_all - grepped_all) grepped_not_code = (grepped_all - code_all) if len(code_not_grepped) > 0: print >> sys.stderr, "Found the following in __all__ but not in grepped code:" for identifier in code_not_grepped: print >> sys.stderr, " %s" % identifier if len(grepped_not_code) > 0: print >> sys.stderr, "Found the following in grepped code but not in__all__:" for identifier in grepped_not_code: print >> sys.stderr, " %s" % identifier
48c844e602eaa182c4efaaa0b977765f4248d0a0
tools/network_migration.py
tools/network_migration.py
import argparse, shelve def renameDictKeys(storageDict): for key in storageDict.iterkeys(): if isinstance(storageDict[key], dict): renameDictKeys(storageDict[key]) if key == options.oldnetwork: storageDict[options.newnetwork] = storageDict[options.oldnetwork] del storageDict[options.oldnetwork] if __name__ == "__main__": # Parse the command line arguments parser = argparse.ArgumentParser(description="A tool for PyHeufyBot to migrate all storage data from one network " "to another.") parser.add_argument("-s", "--storage", help="The storage file to use", type=str, default="../heufybot.db") parser.add_argument("-o", "--oldnetwork", help="The name of the old network that the data should be migrated " "from.", type=str, required=True) parser.add_argument("-n", "--newnetwork", help="The name of the new network that the data should be migrated to.", type=str, required=True) options = parser.parse_args() storage = shelve.open(options.storage) d = dict(storage) renameDictKeys(d) storage.clear() storage.update(d) storage.close() print "Data has been migrated from '{}' to '{}'.".format(options.oldnetwork, options.newnetwork)
Add a data migration tool
Add a data migration tool
Python
mit
Heufneutje/PyHeufyBot,Heufneutje/PyHeufyBot
Add a data migration tool
import argparse, shelve def renameDictKeys(storageDict): for key in storageDict.iterkeys(): if isinstance(storageDict[key], dict): renameDictKeys(storageDict[key]) if key == options.oldnetwork: storageDict[options.newnetwork] = storageDict[options.oldnetwork] del storageDict[options.oldnetwork] if __name__ == "__main__": # Parse the command line arguments parser = argparse.ArgumentParser(description="A tool for PyHeufyBot to migrate all storage data from one network " "to another.") parser.add_argument("-s", "--storage", help="The storage file to use", type=str, default="../heufybot.db") parser.add_argument("-o", "--oldnetwork", help="The name of the old network that the data should be migrated " "from.", type=str, required=True) parser.add_argument("-n", "--newnetwork", help="The name of the new network that the data should be migrated to.", type=str, required=True) options = parser.parse_args() storage = shelve.open(options.storage) d = dict(storage) renameDictKeys(d) storage.clear() storage.update(d) storage.close() print "Data has been migrated from '{}' to '{}'.".format(options.oldnetwork, options.newnetwork)
<commit_before><commit_msg>Add a data migration tool<commit_after>
import argparse, shelve def renameDictKeys(storageDict): for key in storageDict.iterkeys(): if isinstance(storageDict[key], dict): renameDictKeys(storageDict[key]) if key == options.oldnetwork: storageDict[options.newnetwork] = storageDict[options.oldnetwork] del storageDict[options.oldnetwork] if __name__ == "__main__": # Parse the command line arguments parser = argparse.ArgumentParser(description="A tool for PyHeufyBot to migrate all storage data from one network " "to another.") parser.add_argument("-s", "--storage", help="The storage file to use", type=str, default="../heufybot.db") parser.add_argument("-o", "--oldnetwork", help="The name of the old network that the data should be migrated " "from.", type=str, required=True) parser.add_argument("-n", "--newnetwork", help="The name of the new network that the data should be migrated to.", type=str, required=True) options = parser.parse_args() storage = shelve.open(options.storage) d = dict(storage) renameDictKeys(d) storage.clear() storage.update(d) storage.close() print "Data has been migrated from '{}' to '{}'.".format(options.oldnetwork, options.newnetwork)
Add a data migration toolimport argparse, shelve def renameDictKeys(storageDict): for key in storageDict.iterkeys(): if isinstance(storageDict[key], dict): renameDictKeys(storageDict[key]) if key == options.oldnetwork: storageDict[options.newnetwork] = storageDict[options.oldnetwork] del storageDict[options.oldnetwork] if __name__ == "__main__": # Parse the command line arguments parser = argparse.ArgumentParser(description="A tool for PyHeufyBot to migrate all storage data from one network " "to another.") parser.add_argument("-s", "--storage", help="The storage file to use", type=str, default="../heufybot.db") parser.add_argument("-o", "--oldnetwork", help="The name of the old network that the data should be migrated " "from.", type=str, required=True) parser.add_argument("-n", "--newnetwork", help="The name of the new network that the data should be migrated to.", type=str, required=True) options = parser.parse_args() storage = shelve.open(options.storage) d = dict(storage) renameDictKeys(d) storage.clear() storage.update(d) storage.close() print "Data has been migrated from '{}' to '{}'.".format(options.oldnetwork, options.newnetwork)
<commit_before><commit_msg>Add a data migration tool<commit_after>import argparse, shelve def renameDictKeys(storageDict): for key in storageDict.iterkeys(): if isinstance(storageDict[key], dict): renameDictKeys(storageDict[key]) if key == options.oldnetwork: storageDict[options.newnetwork] = storageDict[options.oldnetwork] del storageDict[options.oldnetwork] if __name__ == "__main__": # Parse the command line arguments parser = argparse.ArgumentParser(description="A tool for PyHeufyBot to migrate all storage data from one network " "to another.") parser.add_argument("-s", "--storage", help="The storage file to use", type=str, default="../heufybot.db") parser.add_argument("-o", "--oldnetwork", help="The name of the old network that the data should be migrated " "from.", type=str, required=True) parser.add_argument("-n", "--newnetwork", help="The name of the new network that the data should be migrated to.", type=str, required=True) options = parser.parse_args() storage = shelve.open(options.storage) d = dict(storage) renameDictKeys(d) storage.clear() storage.update(d) storage.close() print "Data has been migrated from '{}' to '{}'.".format(options.oldnetwork, options.newnetwork)
d72d2e38d177476470b22ded061dd06b2be3ee88
turbustat/tests/helpers.py
turbustat/tests/helpers.py
from __future__ import print_function, absolute_import, division from astropy import units as u from numpy.testing import assert_allclose as assert_allclose_numpy, assert_array_equal def assert_allclose(q1, q2, **kwargs): """ Quantity-safe version of Numpy's assert_allclose Copyright (c) 2014, spectral-cube developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ if isinstance(q1, u.Quantity) and isinstance(q2, u.Quantity): assert_allclose_numpy(q1.to(q2.unit).value, q2.value, **kwargs) elif isinstance(q1, u.Quantity): assert_allclose_numpy(q1.value, q2, **kwargs) elif isinstance(q2, u.Quantity): assert_allclose_numpy(q1, q2.value, **kwargs) else: assert_allclose_numpy(q1, q2, **kwargs)
Add the quantity-safe allclose from spectral-cube
Add the quantity-safe allclose from spectral-cube
Python
mit
e-koch/TurbuStat,Astroua/TurbuStat
Add the quantity-safe allclose from spectral-cube
from __future__ import print_function, absolute_import, division from astropy import units as u from numpy.testing import assert_allclose as assert_allclose_numpy, assert_array_equal def assert_allclose(q1, q2, **kwargs): """ Quantity-safe version of Numpy's assert_allclose Copyright (c) 2014, spectral-cube developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ if isinstance(q1, u.Quantity) and isinstance(q2, u.Quantity): assert_allclose_numpy(q1.to(q2.unit).value, q2.value, **kwargs) elif isinstance(q1, u.Quantity): assert_allclose_numpy(q1.value, q2, **kwargs) elif isinstance(q2, u.Quantity): assert_allclose_numpy(q1, q2.value, **kwargs) else: assert_allclose_numpy(q1, q2, **kwargs)
<commit_before><commit_msg>Add the quantity-safe allclose from spectral-cube<commit_after>
from __future__ import print_function, absolute_import, division from astropy import units as u from numpy.testing import assert_allclose as assert_allclose_numpy, assert_array_equal def assert_allclose(q1, q2, **kwargs): """ Quantity-safe version of Numpy's assert_allclose Copyright (c) 2014, spectral-cube developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ if isinstance(q1, u.Quantity) and isinstance(q2, u.Quantity): assert_allclose_numpy(q1.to(q2.unit).value, q2.value, **kwargs) elif isinstance(q1, u.Quantity): assert_allclose_numpy(q1.value, q2, **kwargs) elif isinstance(q2, u.Quantity): assert_allclose_numpy(q1, q2.value, **kwargs) else: assert_allclose_numpy(q1, q2, **kwargs)
Add the quantity-safe allclose from spectral-cubefrom __future__ import print_function, absolute_import, division from astropy import units as u from numpy.testing import assert_allclose as assert_allclose_numpy, assert_array_equal def assert_allclose(q1, q2, **kwargs): """ Quantity-safe version of Numpy's assert_allclose Copyright (c) 2014, spectral-cube developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ if isinstance(q1, u.Quantity) and isinstance(q2, u.Quantity): assert_allclose_numpy(q1.to(q2.unit).value, q2.value, **kwargs) elif isinstance(q1, u.Quantity): assert_allclose_numpy(q1.value, q2, **kwargs) elif isinstance(q2, u.Quantity): assert_allclose_numpy(q1, q2.value, **kwargs) else: assert_allclose_numpy(q1, q2, **kwargs)
<commit_before><commit_msg>Add the quantity-safe allclose from spectral-cube<commit_after>from __future__ import print_function, absolute_import, division from astropy import units as u from numpy.testing import assert_allclose as assert_allclose_numpy, assert_array_equal def assert_allclose(q1, q2, **kwargs): """ Quantity-safe version of Numpy's assert_allclose Copyright (c) 2014, spectral-cube developers All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ if isinstance(q1, u.Quantity) and isinstance(q2, u.Quantity): assert_allclose_numpy(q1.to(q2.unit).value, q2.value, **kwargs) elif isinstance(q1, u.Quantity): assert_allclose_numpy(q1.value, q2, **kwargs) elif isinstance(q2, u.Quantity): assert_allclose_numpy(q1, q2.value, **kwargs) else: assert_allclose_numpy(q1, q2, **kwargs)
835b5f20061033b6fcf2a8b86203a42c5d4835ee
spotpy/unittests/test_parameter.py
spotpy/unittests/test_parameter.py
import unittest try: import spotpy except ImportError: import sys sys.path.append(".") import spotpy from spotpy import parameter import numpy as np #https://docs.python.org/3/library/unittest.html class TestListParameterDistribution(unittest.TestCase): def setUp(self): self.values = [1, 2, 3, 4, 5] self.list_param = parameter.List('test', self.values) self.list_param_repeat = parameter.List('test2', self.values, repeat=True) def test_list_is_callable(self): self.assertTrue(callable(self.list_param), "List instance should be callable") def test_list_gives_throwaway_value_on_first_call(self): v = self.list_param() self.assertNotEqual(self.values[0], v) def test_list_gives_1_value_when_size_is_not_specified(self): throwaway = self.list_param() v = self.list_param() self.assertEqual(self.values[0], v) def test_list_gives_n_values_when_size_is_n(self): throwaway = self.list_param() v = self.list_param(len(self.values)) self.assertEqual(self.values, list(v)) if __name__ == '__main__': unittest.main()
Add initial unit tests for parameter.py (List)
Add initial unit tests for parameter.py (List)
Python
mit
bees4ever/spotpy,bees4ever/spotpy,thouska/spotpy,thouska/spotpy,thouska/spotpy,bees4ever/spotpy
Add initial unit tests for parameter.py (List)
import unittest try: import spotpy except ImportError: import sys sys.path.append(".") import spotpy from spotpy import parameter import numpy as np #https://docs.python.org/3/library/unittest.html class TestListParameterDistribution(unittest.TestCase): def setUp(self): self.values = [1, 2, 3, 4, 5] self.list_param = parameter.List('test', self.values) self.list_param_repeat = parameter.List('test2', self.values, repeat=True) def test_list_is_callable(self): self.assertTrue(callable(self.list_param), "List instance should be callable") def test_list_gives_throwaway_value_on_first_call(self): v = self.list_param() self.assertNotEqual(self.values[0], v) def test_list_gives_1_value_when_size_is_not_specified(self): throwaway = self.list_param() v = self.list_param() self.assertEqual(self.values[0], v) def test_list_gives_n_values_when_size_is_n(self): throwaway = self.list_param() v = self.list_param(len(self.values)) self.assertEqual(self.values, list(v)) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add initial unit tests for parameter.py (List)<commit_after>
import unittest try: import spotpy except ImportError: import sys sys.path.append(".") import spotpy from spotpy import parameter import numpy as np #https://docs.python.org/3/library/unittest.html class TestListParameterDistribution(unittest.TestCase): def setUp(self): self.values = [1, 2, 3, 4, 5] self.list_param = parameter.List('test', self.values) self.list_param_repeat = parameter.List('test2', self.values, repeat=True) def test_list_is_callable(self): self.assertTrue(callable(self.list_param), "List instance should be callable") def test_list_gives_throwaway_value_on_first_call(self): v = self.list_param() self.assertNotEqual(self.values[0], v) def test_list_gives_1_value_when_size_is_not_specified(self): throwaway = self.list_param() v = self.list_param() self.assertEqual(self.values[0], v) def test_list_gives_n_values_when_size_is_n(self): throwaway = self.list_param() v = self.list_param(len(self.values)) self.assertEqual(self.values, list(v)) if __name__ == '__main__': unittest.main()
Add initial unit tests for parameter.py (List)import unittest try: import spotpy except ImportError: import sys sys.path.append(".") import spotpy from spotpy import parameter import numpy as np #https://docs.python.org/3/library/unittest.html class TestListParameterDistribution(unittest.TestCase): def setUp(self): self.values = [1, 2, 3, 4, 5] self.list_param = parameter.List('test', self.values) self.list_param_repeat = parameter.List('test2', self.values, repeat=True) def test_list_is_callable(self): self.assertTrue(callable(self.list_param), "List instance should be callable") def test_list_gives_throwaway_value_on_first_call(self): v = self.list_param() self.assertNotEqual(self.values[0], v) def test_list_gives_1_value_when_size_is_not_specified(self): throwaway = self.list_param() v = self.list_param() self.assertEqual(self.values[0], v) def test_list_gives_n_values_when_size_is_n(self): throwaway = self.list_param() v = self.list_param(len(self.values)) self.assertEqual(self.values, list(v)) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add initial unit tests for parameter.py (List)<commit_after>import unittest try: import spotpy except ImportError: import sys sys.path.append(".") import spotpy from spotpy import parameter import numpy as np #https://docs.python.org/3/library/unittest.html class TestListParameterDistribution(unittest.TestCase): def setUp(self): self.values = [1, 2, 3, 4, 5] self.list_param = parameter.List('test', self.values) self.list_param_repeat = parameter.List('test2', self.values, repeat=True) def test_list_is_callable(self): self.assertTrue(callable(self.list_param), "List instance should be callable") def test_list_gives_throwaway_value_on_first_call(self): v = self.list_param() self.assertNotEqual(self.values[0], v) def test_list_gives_1_value_when_size_is_not_specified(self): throwaway = self.list_param() v = self.list_param() self.assertEqual(self.values[0], v) def test_list_gives_n_values_when_size_is_n(self): throwaway = self.list_param() v = self.list_param(len(self.values)) self.assertEqual(self.values, list(v)) if __name__ == '__main__': unittest.main()
b1feed0ced6d1328cc39bc9bba36331ec6da7803
pre_commit_hooks/detect_private_key.py
pre_commit_hooks/detect_private_key.py
from __future__ import print_function import argparse import sys BLACKLIST = [ b'BEGIN RSA PRIVATE KEY', b'BEGIN DSA PRIVATE KEY', b'BEGIN EC PRIVATE KEY', b'BEGIN OPENSSH PRIVATE KEY', b'BEGIN PRIVATE KEY', b'PuTTY-User-Key-File-2', b'BEGIN SSH2 ENCRYPTED PRIVATE KEY', ] def detect_private_key(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to check') args = parser.parse_args(argv) private_key_files = [] for filename in args.filenames: with open(filename, 'rb') as f: content = f.read() if any(line in content for line in BLACKLIST): private_key_files.append(filename) if private_key_files: for private_key_file in private_key_files: print('Private key found: {}'.format(private_key_file)) return 1 else: return 0 if __name__ == '__main__': sys.exit(detect_private_key())
from __future__ import print_function import argparse import sys BLACKLIST = [ b'BEGIN RSA PRIVATE KEY', b'BEGIN DSA PRIVATE KEY', b'BEGIN EC PRIVATE KEY', b'BEGIN OPENSSH PRIVATE KEY', b'BEGIN PRIVATE KEY', b'PuTTY-User-Key-File-2', b'BEGIN SSH2 ENCRYPTED PRIVATE KEY', b'BEGIN PGP PRIVATE KEY BLOCK', ] def detect_private_key(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to check') args = parser.parse_args(argv) private_key_files = [] for filename in args.filenames: with open(filename, 'rb') as f: content = f.read() if any(line in content for line in BLACKLIST): private_key_files.append(filename) if private_key_files: for private_key_file in private_key_files: print('Private key found: {}'.format(private_key_file)) return 1 else: return 0 if __name__ == '__main__': sys.exit(detect_private_key())
Add ban for pgp/gpg private key blocks
Add ban for pgp/gpg private key blocks
Python
mit
pre-commit/pre-commit-hooks,Harwood/pre-commit-hooks
from __future__ import print_function import argparse import sys BLACKLIST = [ b'BEGIN RSA PRIVATE KEY', b'BEGIN DSA PRIVATE KEY', b'BEGIN EC PRIVATE KEY', b'BEGIN OPENSSH PRIVATE KEY', b'BEGIN PRIVATE KEY', b'PuTTY-User-Key-File-2', b'BEGIN SSH2 ENCRYPTED PRIVATE KEY', ] def detect_private_key(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to check') args = parser.parse_args(argv) private_key_files = [] for filename in args.filenames: with open(filename, 'rb') as f: content = f.read() if any(line in content for line in BLACKLIST): private_key_files.append(filename) if private_key_files: for private_key_file in private_key_files: print('Private key found: {}'.format(private_key_file)) return 1 else: return 0 if __name__ == '__main__': sys.exit(detect_private_key()) Add ban for pgp/gpg private key blocks
from __future__ import print_function import argparse import sys BLACKLIST = [ b'BEGIN RSA PRIVATE KEY', b'BEGIN DSA PRIVATE KEY', b'BEGIN EC PRIVATE KEY', b'BEGIN OPENSSH PRIVATE KEY', b'BEGIN PRIVATE KEY', b'PuTTY-User-Key-File-2', b'BEGIN SSH2 ENCRYPTED PRIVATE KEY', b'BEGIN PGP PRIVATE KEY BLOCK', ] def detect_private_key(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to check') args = parser.parse_args(argv) private_key_files = [] for filename in args.filenames: with open(filename, 'rb') as f: content = f.read() if any(line in content for line in BLACKLIST): private_key_files.append(filename) if private_key_files: for private_key_file in private_key_files: print('Private key found: {}'.format(private_key_file)) return 1 else: return 0 if __name__ == '__main__': sys.exit(detect_private_key())
<commit_before>from __future__ import print_function import argparse import sys BLACKLIST = [ b'BEGIN RSA PRIVATE KEY', b'BEGIN DSA PRIVATE KEY', b'BEGIN EC PRIVATE KEY', b'BEGIN OPENSSH PRIVATE KEY', b'BEGIN PRIVATE KEY', b'PuTTY-User-Key-File-2', b'BEGIN SSH2 ENCRYPTED PRIVATE KEY', ] def detect_private_key(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to check') args = parser.parse_args(argv) private_key_files = [] for filename in args.filenames: with open(filename, 'rb') as f: content = f.read() if any(line in content for line in BLACKLIST): private_key_files.append(filename) if private_key_files: for private_key_file in private_key_files: print('Private key found: {}'.format(private_key_file)) return 1 else: return 0 if __name__ == '__main__': sys.exit(detect_private_key()) <commit_msg>Add ban for pgp/gpg private key blocks<commit_after>
from __future__ import print_function import argparse import sys BLACKLIST = [ b'BEGIN RSA PRIVATE KEY', b'BEGIN DSA PRIVATE KEY', b'BEGIN EC PRIVATE KEY', b'BEGIN OPENSSH PRIVATE KEY', b'BEGIN PRIVATE KEY', b'PuTTY-User-Key-File-2', b'BEGIN SSH2 ENCRYPTED PRIVATE KEY', b'BEGIN PGP PRIVATE KEY BLOCK', ] def detect_private_key(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to check') args = parser.parse_args(argv) private_key_files = [] for filename in args.filenames: with open(filename, 'rb') as f: content = f.read() if any(line in content for line in BLACKLIST): private_key_files.append(filename) if private_key_files: for private_key_file in private_key_files: print('Private key found: {}'.format(private_key_file)) return 1 else: return 0 if __name__ == '__main__': sys.exit(detect_private_key())
from __future__ import print_function import argparse import sys BLACKLIST = [ b'BEGIN RSA PRIVATE KEY', b'BEGIN DSA PRIVATE KEY', b'BEGIN EC PRIVATE KEY', b'BEGIN OPENSSH PRIVATE KEY', b'BEGIN PRIVATE KEY', b'PuTTY-User-Key-File-2', b'BEGIN SSH2 ENCRYPTED PRIVATE KEY', ] def detect_private_key(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to check') args = parser.parse_args(argv) private_key_files = [] for filename in args.filenames: with open(filename, 'rb') as f: content = f.read() if any(line in content for line in BLACKLIST): private_key_files.append(filename) if private_key_files: for private_key_file in private_key_files: print('Private key found: {}'.format(private_key_file)) return 1 else: return 0 if __name__ == '__main__': sys.exit(detect_private_key()) Add ban for pgp/gpg private key blocksfrom __future__ import print_function import argparse import sys BLACKLIST = [ b'BEGIN RSA PRIVATE KEY', b'BEGIN DSA PRIVATE KEY', b'BEGIN EC PRIVATE KEY', b'BEGIN OPENSSH PRIVATE KEY', b'BEGIN PRIVATE KEY', b'PuTTY-User-Key-File-2', b'BEGIN SSH2 ENCRYPTED PRIVATE KEY', b'BEGIN PGP PRIVATE KEY BLOCK', ] def detect_private_key(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to check') args = parser.parse_args(argv) private_key_files = [] for filename in args.filenames: with open(filename, 'rb') as f: content = f.read() if any(line in content for line in BLACKLIST): private_key_files.append(filename) if private_key_files: for private_key_file in private_key_files: print('Private key found: {}'.format(private_key_file)) return 1 else: return 0 if __name__ == '__main__': sys.exit(detect_private_key())
<commit_before>from __future__ import print_function import argparse import sys BLACKLIST = [ b'BEGIN RSA PRIVATE KEY', b'BEGIN DSA PRIVATE KEY', b'BEGIN EC PRIVATE KEY', b'BEGIN OPENSSH PRIVATE KEY', b'BEGIN PRIVATE KEY', b'PuTTY-User-Key-File-2', b'BEGIN SSH2 ENCRYPTED PRIVATE KEY', ] def detect_private_key(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to check') args = parser.parse_args(argv) private_key_files = [] for filename in args.filenames: with open(filename, 'rb') as f: content = f.read() if any(line in content for line in BLACKLIST): private_key_files.append(filename) if private_key_files: for private_key_file in private_key_files: print('Private key found: {}'.format(private_key_file)) return 1 else: return 0 if __name__ == '__main__': sys.exit(detect_private_key()) <commit_msg>Add ban for pgp/gpg private key blocks<commit_after>from __future__ import print_function import argparse import sys BLACKLIST = [ b'BEGIN RSA PRIVATE KEY', b'BEGIN DSA PRIVATE KEY', b'BEGIN EC PRIVATE KEY', b'BEGIN OPENSSH PRIVATE KEY', b'BEGIN PRIVATE KEY', b'PuTTY-User-Key-File-2', b'BEGIN SSH2 ENCRYPTED PRIVATE KEY', b'BEGIN PGP PRIVATE KEY BLOCK', ] def detect_private_key(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to check') args = parser.parse_args(argv) private_key_files = [] for filename in args.filenames: with open(filename, 'rb') as f: content = f.read() if any(line in content for line in BLACKLIST): private_key_files.append(filename) if private_key_files: for private_key_file in private_key_files: print('Private key found: {}'.format(private_key_file)) return 1 else: return 0 if __name__ == '__main__': sys.exit(detect_private_key())
4883bd13c6e07a0568c29fd26a141888b52292b7
utils/player_draft_retriever.py
utils/player_draft_retriever.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import re import requests from lxml import html from db.team import Team from db.player_draft import PlayerDraft class PlayerDraftRetriever(): NHL_PLAYER_DRAFT_PREFIX = "https://www.nhl.com/player" DRAFT_INFO_REGEX = re.compile( "(\d{4})\s(.+),\s(\d+).+\srd,.+\((\d+).+\soverall\)") def __init__(self): pass def retrieve_draft_information(self, player_id): url = "/".join((self.NHL_PLAYER_DRAFT_PREFIX, str(player_id))) r = requests.get(url) doc = html.fromstring(r.text) raw_draft_info = doc.xpath( "//li[@class='player-bio__item']/span[text() = " + "'Draft:']/parent::li/text()") if not raw_draft_info: print("No draft information found") return raw_draft_info = raw_draft_info.pop() print(raw_draft_info) match = re.search(self.DRAFT_INFO_REGEX, raw_draft_info) if match: year = int(match.group(1)) team = Team.find_by_orig_abbr(match.group(2)) round = int(match.group(3)) overall = int(match.group(4)) draft_info = PlayerDraft( player_id, team.team_id, year, round, overall) draft_info_db = PlayerDraft.find_by_player_id(player_id) if draft_info_db: if draft_info_db != draft_info: draft_info_db.update(draft_info)
Add retriever object for player draft information
Add retriever object for player draft information
Python
mit
leaffan/pynhldb
Add retriever object for player draft information
#!/usr/bin/env python # -*- coding: utf-8 -*- import re import requests from lxml import html from db.team import Team from db.player_draft import PlayerDraft class PlayerDraftRetriever(): NHL_PLAYER_DRAFT_PREFIX = "https://www.nhl.com/player" DRAFT_INFO_REGEX = re.compile( "(\d{4})\s(.+),\s(\d+).+\srd,.+\((\d+).+\soverall\)") def __init__(self): pass def retrieve_draft_information(self, player_id): url = "/".join((self.NHL_PLAYER_DRAFT_PREFIX, str(player_id))) r = requests.get(url) doc = html.fromstring(r.text) raw_draft_info = doc.xpath( "//li[@class='player-bio__item']/span[text() = " + "'Draft:']/parent::li/text()") if not raw_draft_info: print("No draft information found") return raw_draft_info = raw_draft_info.pop() print(raw_draft_info) match = re.search(self.DRAFT_INFO_REGEX, raw_draft_info) if match: year = int(match.group(1)) team = Team.find_by_orig_abbr(match.group(2)) round = int(match.group(3)) overall = int(match.group(4)) draft_info = PlayerDraft( player_id, team.team_id, year, round, overall) draft_info_db = PlayerDraft.find_by_player_id(player_id) if draft_info_db: if draft_info_db != draft_info: draft_info_db.update(draft_info)
<commit_before><commit_msg>Add retriever object for player draft information<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- import re import requests from lxml import html from db.team import Team from db.player_draft import PlayerDraft class PlayerDraftRetriever(): NHL_PLAYER_DRAFT_PREFIX = "https://www.nhl.com/player" DRAFT_INFO_REGEX = re.compile( "(\d{4})\s(.+),\s(\d+).+\srd,.+\((\d+).+\soverall\)") def __init__(self): pass def retrieve_draft_information(self, player_id): url = "/".join((self.NHL_PLAYER_DRAFT_PREFIX, str(player_id))) r = requests.get(url) doc = html.fromstring(r.text) raw_draft_info = doc.xpath( "//li[@class='player-bio__item']/span[text() = " + "'Draft:']/parent::li/text()") if not raw_draft_info: print("No draft information found") return raw_draft_info = raw_draft_info.pop() print(raw_draft_info) match = re.search(self.DRAFT_INFO_REGEX, raw_draft_info) if match: year = int(match.group(1)) team = Team.find_by_orig_abbr(match.group(2)) round = int(match.group(3)) overall = int(match.group(4)) draft_info = PlayerDraft( player_id, team.team_id, year, round, overall) draft_info_db = PlayerDraft.find_by_player_id(player_id) if draft_info_db: if draft_info_db != draft_info: draft_info_db.update(draft_info)
Add retriever object for player draft information#!/usr/bin/env python # -*- coding: utf-8 -*- import re import requests from lxml import html from db.team import Team from db.player_draft import PlayerDraft class PlayerDraftRetriever(): NHL_PLAYER_DRAFT_PREFIX = "https://www.nhl.com/player" DRAFT_INFO_REGEX = re.compile( "(\d{4})\s(.+),\s(\d+).+\srd,.+\((\d+).+\soverall\)") def __init__(self): pass def retrieve_draft_information(self, player_id): url = "/".join((self.NHL_PLAYER_DRAFT_PREFIX, str(player_id))) r = requests.get(url) doc = html.fromstring(r.text) raw_draft_info = doc.xpath( "//li[@class='player-bio__item']/span[text() = " + "'Draft:']/parent::li/text()") if not raw_draft_info: print("No draft information found") return raw_draft_info = raw_draft_info.pop() print(raw_draft_info) match = re.search(self.DRAFT_INFO_REGEX, raw_draft_info) if match: year = int(match.group(1)) team = Team.find_by_orig_abbr(match.group(2)) round = int(match.group(3)) overall = int(match.group(4)) draft_info = PlayerDraft( player_id, team.team_id, year, round, overall) draft_info_db = PlayerDraft.find_by_player_id(player_id) if draft_info_db: if draft_info_db != draft_info: draft_info_db.update(draft_info)
<commit_before><commit_msg>Add retriever object for player draft information<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- import re import requests from lxml import html from db.team import Team from db.player_draft import PlayerDraft class PlayerDraftRetriever(): NHL_PLAYER_DRAFT_PREFIX = "https://www.nhl.com/player" DRAFT_INFO_REGEX = re.compile( "(\d{4})\s(.+),\s(\d+).+\srd,.+\((\d+).+\soverall\)") def __init__(self): pass def retrieve_draft_information(self, player_id): url = "/".join((self.NHL_PLAYER_DRAFT_PREFIX, str(player_id))) r = requests.get(url) doc = html.fromstring(r.text) raw_draft_info = doc.xpath( "//li[@class='player-bio__item']/span[text() = " + "'Draft:']/parent::li/text()") if not raw_draft_info: print("No draft information found") return raw_draft_info = raw_draft_info.pop() print(raw_draft_info) match = re.search(self.DRAFT_INFO_REGEX, raw_draft_info) if match: year = int(match.group(1)) team = Team.find_by_orig_abbr(match.group(2)) round = int(match.group(3)) overall = int(match.group(4)) draft_info = PlayerDraft( player_id, team.team_id, year, round, overall) draft_info_db = PlayerDraft.find_by_player_id(player_id) if draft_info_db: if draft_info_db != draft_info: draft_info_db.update(draft_info)
bc34d530f4a21b5f06228d626f446c617b9c8876
examples/defconfig_oldconfig.py
examples/defconfig_oldconfig.py
# Produces exactly the same output as the following script: # # make defconfig # echo CONFIG_ETHERNET=n >> .config # make oldconfig # echo CONFIG_ETHERNET=y >> .config # yes n | make oldconfig # # This came up in https://github.com/ulfalizer/Kconfiglib/issues/15. import kconfiglib import sys conf = kconfiglib.Config(sys.argv[1]) # Mirrors defconfig conf.load_config("arch/x86/configs/x86_64_defconfig") conf.write_config(".config") # Mirrors the first oldconfig conf.load_config(".config") conf["ETHERNET"].set_user_value('n') conf.write_config(".config") # Mirrors the second oldconfig conf.load_config(".config") conf["ETHERNET"].set_user_value('y') for s in conf: if s.get_user_value() is None and 'n' in s.get_assignable_values(): s.set_user_value('n') # Write the final configuration conf.write_config(".config")
Add example that mirrors defconfig and oldconfig.
Add example that mirrors defconfig and oldconfig. From https://github.com/ulfalizer/Kconfiglib/issues/15. Getting the output to match up exactly requires emulating each step, due to Kconfig subtleties related to which symbols have been assigned values by the user. The output might differ with other approaches, but this is not a bug.
Python
isc
ulfalizer/Kconfiglib,ulfalizer/Kconfiglib
Add example that mirrors defconfig and oldconfig. From https://github.com/ulfalizer/Kconfiglib/issues/15. Getting the output to match up exactly requires emulating each step, due to Kconfig subtleties related to which symbols have been assigned values by the user. The output might differ with other approaches, but this is not a bug.
# Produces exactly the same output as the following script: # # make defconfig # echo CONFIG_ETHERNET=n >> .config # make oldconfig # echo CONFIG_ETHERNET=y >> .config # yes n | make oldconfig # # This came up in https://github.com/ulfalizer/Kconfiglib/issues/15. import kconfiglib import sys conf = kconfiglib.Config(sys.argv[1]) # Mirrors defconfig conf.load_config("arch/x86/configs/x86_64_defconfig") conf.write_config(".config") # Mirrors the first oldconfig conf.load_config(".config") conf["ETHERNET"].set_user_value('n') conf.write_config(".config") # Mirrors the second oldconfig conf.load_config(".config") conf["ETHERNET"].set_user_value('y') for s in conf: if s.get_user_value() is None and 'n' in s.get_assignable_values(): s.set_user_value('n') # Write the final configuration conf.write_config(".config")
<commit_before><commit_msg>Add example that mirrors defconfig and oldconfig. From https://github.com/ulfalizer/Kconfiglib/issues/15. Getting the output to match up exactly requires emulating each step, due to Kconfig subtleties related to which symbols have been assigned values by the user. The output might differ with other approaches, but this is not a bug.<commit_after>
# Produces exactly the same output as the following script: # # make defconfig # echo CONFIG_ETHERNET=n >> .config # make oldconfig # echo CONFIG_ETHERNET=y >> .config # yes n | make oldconfig # # This came up in https://github.com/ulfalizer/Kconfiglib/issues/15. import kconfiglib import sys conf = kconfiglib.Config(sys.argv[1]) # Mirrors defconfig conf.load_config("arch/x86/configs/x86_64_defconfig") conf.write_config(".config") # Mirrors the first oldconfig conf.load_config(".config") conf["ETHERNET"].set_user_value('n') conf.write_config(".config") # Mirrors the second oldconfig conf.load_config(".config") conf["ETHERNET"].set_user_value('y') for s in conf: if s.get_user_value() is None and 'n' in s.get_assignable_values(): s.set_user_value('n') # Write the final configuration conf.write_config(".config")
Add example that mirrors defconfig and oldconfig. From https://github.com/ulfalizer/Kconfiglib/issues/15. Getting the output to match up exactly requires emulating each step, due to Kconfig subtleties related to which symbols have been assigned values by the user. The output might differ with other approaches, but this is not a bug.# Produces exactly the same output as the following script: # # make defconfig # echo CONFIG_ETHERNET=n >> .config # make oldconfig # echo CONFIG_ETHERNET=y >> .config # yes n | make oldconfig # # This came up in https://github.com/ulfalizer/Kconfiglib/issues/15. import kconfiglib import sys conf = kconfiglib.Config(sys.argv[1]) # Mirrors defconfig conf.load_config("arch/x86/configs/x86_64_defconfig") conf.write_config(".config") # Mirrors the first oldconfig conf.load_config(".config") conf["ETHERNET"].set_user_value('n') conf.write_config(".config") # Mirrors the second oldconfig conf.load_config(".config") conf["ETHERNET"].set_user_value('y') for s in conf: if s.get_user_value() is None and 'n' in s.get_assignable_values(): s.set_user_value('n') # Write the final configuration conf.write_config(".config")
<commit_before><commit_msg>Add example that mirrors defconfig and oldconfig. From https://github.com/ulfalizer/Kconfiglib/issues/15. Getting the output to match up exactly requires emulating each step, due to Kconfig subtleties related to which symbols have been assigned values by the user. The output might differ with other approaches, but this is not a bug.<commit_after># Produces exactly the same output as the following script: # # make defconfig # echo CONFIG_ETHERNET=n >> .config # make oldconfig # echo CONFIG_ETHERNET=y >> .config # yes n | make oldconfig # # This came up in https://github.com/ulfalizer/Kconfiglib/issues/15. import kconfiglib import sys conf = kconfiglib.Config(sys.argv[1]) # Mirrors defconfig conf.load_config("arch/x86/configs/x86_64_defconfig") conf.write_config(".config") # Mirrors the first oldconfig conf.load_config(".config") conf["ETHERNET"].set_user_value('n') conf.write_config(".config") # Mirrors the second oldconfig conf.load_config(".config") conf["ETHERNET"].set_user_value('y') for s in conf: if s.get_user_value() is None and 'n' in s.get_assignable_values(): s.set_user_value('n') # Write the final configuration conf.write_config(".config")
06cd9e8e5006d68d7656b7f147442e54aaf9d7a1
clubs/migrations/0035_add_public_health_college.py
clubs/migrations/0035_add_public_health_college.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations def add_college(apps, schema_editor): Club = apps.get_model('clubs', 'Club') College = apps.get_model('clubs', 'College') StudentClubYear = apps.get_model('core', 'StudentClubYear') year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015, end_date__year=2016) female_presidency = Club.objects.get(english_name="Presidency (Riyadh/Female)", year=year_2015_2016) r_i_f = College.objects.create(city='R', section='NG', name='I', gender='F') Club.objects.create(name="كلية الصحة العامة والمعلوماتية الصحية", english_name="College of Public Health and Health Informatics", description="", email="pending@ksau-hs.edu.sa", parent=female_presidency, gender="F", year=year_2015_2016, city="R", college=r_i_f) def remove_college(apps, schema_editor): Club = apps.get_model('clubs', 'Club') College = apps.get_model('clubs', 'College') StudentClubYear = apps.get_model('core', 'StudentClubYear') year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015, end_date__year=2016) College.objects.get(city='R', section='NG', name='I', gender='F').delete() Club.objects.get(english_name="College of Public Health and Health Informatics", city='R', gender='F', year=year_2015_2016) class Migration(migrations.Migration): dependencies = [ ('clubs', '0034_club_media_assessor'), ] operations = [ migrations.RunPython( add_college, reverse_code=remove_college), ]
Add female Public Health College and Club
Add female Public Health College and Club
Python
agpl-3.0
enjaz/enjaz,osamak/student-portal,osamak/student-portal,enjaz/enjaz,osamak/student-portal,osamak/student-portal,enjaz/enjaz,osamak/student-portal,enjaz/enjaz,enjaz/enjaz
Add female Public Health College and Club
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations def add_college(apps, schema_editor): Club = apps.get_model('clubs', 'Club') College = apps.get_model('clubs', 'College') StudentClubYear = apps.get_model('core', 'StudentClubYear') year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015, end_date__year=2016) female_presidency = Club.objects.get(english_name="Presidency (Riyadh/Female)", year=year_2015_2016) r_i_f = College.objects.create(city='R', section='NG', name='I', gender='F') Club.objects.create(name="كلية الصحة العامة والمعلوماتية الصحية", english_name="College of Public Health and Health Informatics", description="", email="pending@ksau-hs.edu.sa", parent=female_presidency, gender="F", year=year_2015_2016, city="R", college=r_i_f) def remove_college(apps, schema_editor): Club = apps.get_model('clubs', 'Club') College = apps.get_model('clubs', 'College') StudentClubYear = apps.get_model('core', 'StudentClubYear') year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015, end_date__year=2016) College.objects.get(city='R', section='NG', name='I', gender='F').delete() Club.objects.get(english_name="College of Public Health and Health Informatics", city='R', gender='F', year=year_2015_2016) class Migration(migrations.Migration): dependencies = [ ('clubs', '0034_club_media_assessor'), ] operations = [ migrations.RunPython( add_college, reverse_code=remove_college), ]
<commit_before><commit_msg>Add female Public Health College and Club<commit_after>
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations def add_college(apps, schema_editor): Club = apps.get_model('clubs', 'Club') College = apps.get_model('clubs', 'College') StudentClubYear = apps.get_model('core', 'StudentClubYear') year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015, end_date__year=2016) female_presidency = Club.objects.get(english_name="Presidency (Riyadh/Female)", year=year_2015_2016) r_i_f = College.objects.create(city='R', section='NG', name='I', gender='F') Club.objects.create(name="كلية الصحة العامة والمعلوماتية الصحية", english_name="College of Public Health and Health Informatics", description="", email="pending@ksau-hs.edu.sa", parent=female_presidency, gender="F", year=year_2015_2016, city="R", college=r_i_f) def remove_college(apps, schema_editor): Club = apps.get_model('clubs', 'Club') College = apps.get_model('clubs', 'College') StudentClubYear = apps.get_model('core', 'StudentClubYear') year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015, end_date__year=2016) College.objects.get(city='R', section='NG', name='I', gender='F').delete() Club.objects.get(english_name="College of Public Health and Health Informatics", city='R', gender='F', year=year_2015_2016) class Migration(migrations.Migration): dependencies = [ ('clubs', '0034_club_media_assessor'), ] operations = [ migrations.RunPython( add_college, reverse_code=remove_college), ]
Add female Public Health College and Club# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations def add_college(apps, schema_editor): Club = apps.get_model('clubs', 'Club') College = apps.get_model('clubs', 'College') StudentClubYear = apps.get_model('core', 'StudentClubYear') year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015, end_date__year=2016) female_presidency = Club.objects.get(english_name="Presidency (Riyadh/Female)", year=year_2015_2016) r_i_f = College.objects.create(city='R', section='NG', name='I', gender='F') Club.objects.create(name="كلية الصحة العامة والمعلوماتية الصحية", english_name="College of Public Health and Health Informatics", description="", email="pending@ksau-hs.edu.sa", parent=female_presidency, gender="F", year=year_2015_2016, city="R", college=r_i_f) def remove_college(apps, schema_editor): Club = apps.get_model('clubs', 'Club') College = apps.get_model('clubs', 'College') StudentClubYear = apps.get_model('core', 'StudentClubYear') year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015, end_date__year=2016) College.objects.get(city='R', section='NG', name='I', gender='F').delete() Club.objects.get(english_name="College of Public Health and Health Informatics", city='R', gender='F', year=year_2015_2016) class Migration(migrations.Migration): dependencies = [ ('clubs', '0034_club_media_assessor'), ] operations = [ migrations.RunPython( add_college, reverse_code=remove_college), ]
<commit_before><commit_msg>Add female Public Health College and Club<commit_after># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations def add_college(apps, schema_editor): Club = apps.get_model('clubs', 'Club') College = apps.get_model('clubs', 'College') StudentClubYear = apps.get_model('core', 'StudentClubYear') year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015, end_date__year=2016) female_presidency = Club.objects.get(english_name="Presidency (Riyadh/Female)", year=year_2015_2016) r_i_f = College.objects.create(city='R', section='NG', name='I', gender='F') Club.objects.create(name="كلية الصحة العامة والمعلوماتية الصحية", english_name="College of Public Health and Health Informatics", description="", email="pending@ksau-hs.edu.sa", parent=female_presidency, gender="F", year=year_2015_2016, city="R", college=r_i_f) def remove_college(apps, schema_editor): Club = apps.get_model('clubs', 'Club') College = apps.get_model('clubs', 'College') StudentClubYear = apps.get_model('core', 'StudentClubYear') year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015, end_date__year=2016) College.objects.get(city='R', section='NG', name='I', gender='F').delete() Club.objects.get(english_name="College of Public Health and Health Informatics", city='R', gender='F', year=year_2015_2016) class Migration(migrations.Migration): dependencies = [ ('clubs', '0034_club_media_assessor'), ] operations = [ migrations.RunPython( add_college, reverse_code=remove_college), ]
17cdae7f50a7ed15c4e8a84cdb0000a32f824c5f
examples/outh/getaccesstoken.py
examples/outh/getaccesstoken.py
import webbrowser import tweepy """ Query the user for their consumer key/secret then attempt to fetch a valid access token. """ if __name__ == "__main__": consumer_key = raw_input('Consumer key: ').strip() consumer_secret = raw_input('Consumer secret: ').strip() auth = tweepy.OAuthHandler(consumer_key, consumer_secret) # Open authorization URL in browser webbrowser.open(auth.get_authorization_url()) # Ask user for verifier pin pin = raw_input('Verification pin number from twitter.com: ').strip() # Get access token token = auth.get_access_token(verifier=pin) # Give user the access token print 'Access token:' print ' Key: %s' % token.key print ' Secret: %s' % token.secret
Add an oauth example script.
Add an oauth example script.
Python
mit
xrg/tweepy,raymondethan/tweepy,damchilly/tweepy,mlinsey/tweepy,ze-phyr-us/tweepy,obskyr/tweepy,markunsworth/tweepy,edsu/tweepy,kcompher/tweepy,yared-bezum/tweepy,tuxos/tweepy,alexhanna/tweepy,nickmalleson/tweepy,iamjakob/tweepy,cogniteev/tweepy,cinemapub/bright-response,nickmalleson/tweepy,conversocial/tweepy,cinemapub/bright-response,bconnelly/tweepy,alexhanna/tweepy,techieshark/tweepy,vivek8943/tweepy,abhishekgahlot/tweepy,nickmalleson/tweepy,takeshineshiro/tweepy,srimanthd/tweepy,thelostscientist/tweepy,abhishekgahlot/tweepy,sidewire/tweepy,vikasgorur/tweepy,kylemanna/tweepy,svven/tweepy,robbiewoods05/tweepy,aleczadikian/tweepy,vishnugonela/tweepy,atomicjets/tweepy,xrg/tweepy,markunsworth/tweepy,ze-phyr-us/tweepy,tweepy/tweepy,truekonrads/tweepy,wjt/tweepy,tsablic/tweepy,hackebrot/tweepy,jperecharla/tweepy,Choko256/tweepy,elijah513/tweepy,awangga/tweepy,arunxarun/tweepy,zhenv5/tweepy,nickmalleson/tweepy,kskk02/tweepy,IsaacHaze/tweepy,aganzha/tweepy,LikeABird/tweepy,arpithparikh/tweepy,sa8/tweepy,rudraksh125/tweepy
Add an oauth example script.
import webbrowser import tweepy """ Query the user for their consumer key/secret then attempt to fetch a valid access token. """ if __name__ == "__main__": consumer_key = raw_input('Consumer key: ').strip() consumer_secret = raw_input('Consumer secret: ').strip() auth = tweepy.OAuthHandler(consumer_key, consumer_secret) # Open authorization URL in browser webbrowser.open(auth.get_authorization_url()) # Ask user for verifier pin pin = raw_input('Verification pin number from twitter.com: ').strip() # Get access token token = auth.get_access_token(verifier=pin) # Give user the access token print 'Access token:' print ' Key: %s' % token.key print ' Secret: %s' % token.secret
<commit_before><commit_msg>Add an oauth example script.<commit_after>
import webbrowser import tweepy """ Query the user for their consumer key/secret then attempt to fetch a valid access token. """ if __name__ == "__main__": consumer_key = raw_input('Consumer key: ').strip() consumer_secret = raw_input('Consumer secret: ').strip() auth = tweepy.OAuthHandler(consumer_key, consumer_secret) # Open authorization URL in browser webbrowser.open(auth.get_authorization_url()) # Ask user for verifier pin pin = raw_input('Verification pin number from twitter.com: ').strip() # Get access token token = auth.get_access_token(verifier=pin) # Give user the access token print 'Access token:' print ' Key: %s' % token.key print ' Secret: %s' % token.secret
Add an oauth example script.import webbrowser import tweepy """ Query the user for their consumer key/secret then attempt to fetch a valid access token. """ if __name__ == "__main__": consumer_key = raw_input('Consumer key: ').strip() consumer_secret = raw_input('Consumer secret: ').strip() auth = tweepy.OAuthHandler(consumer_key, consumer_secret) # Open authorization URL in browser webbrowser.open(auth.get_authorization_url()) # Ask user for verifier pin pin = raw_input('Verification pin number from twitter.com: ').strip() # Get access token token = auth.get_access_token(verifier=pin) # Give user the access token print 'Access token:' print ' Key: %s' % token.key print ' Secret: %s' % token.secret
<commit_before><commit_msg>Add an oauth example script.<commit_after>import webbrowser import tweepy """ Query the user for their consumer key/secret then attempt to fetch a valid access token. """ if __name__ == "__main__": consumer_key = raw_input('Consumer key: ').strip() consumer_secret = raw_input('Consumer secret: ').strip() auth = tweepy.OAuthHandler(consumer_key, consumer_secret) # Open authorization URL in browser webbrowser.open(auth.get_authorization_url()) # Ask user for verifier pin pin = raw_input('Verification pin number from twitter.com: ').strip() # Get access token token = auth.get_access_token(verifier=pin) # Give user the access token print 'Access token:' print ' Key: %s' % token.key print ' Secret: %s' % token.secret
bc32b2bccc82caecea0cf936e13c3ae70d0e9486
utils/check.py
utils/check.py
from pathlib import Path from PIL import Image from concurrent.futures import ProcessPoolExecutor import os import sys def verify_or_delete(filename): try: Image.open(filename).load() except OSError: return False return True if __name__ == '__main__': if len(sys.argv) < 2: print('Remove Broken Images\nUsage: python check.py <dir>') exit(-1) filenames = list(Path(sys.args[1]).rglob('*.*')) with ProcessPoolExecutor() as executor: broken, total = 0, len(filenames) jobs = executor.map(verify_or_delete, filenames) for i, (filename, verified) in enumerate(zip(filenames, jobs)): if not verified: broken += 1 os.system('rm "%s"' % filename) print('Checking %d/%d, %d deleted...' % (i + 1, total, broken), end='\r') print('\nDone.')
Add script to remove broken images.
Add script to remove broken images.
Python
mit
Lodour/Weibo-Album-Crawler
Add script to remove broken images.
from pathlib import Path from PIL import Image from concurrent.futures import ProcessPoolExecutor import os import sys def verify_or_delete(filename): try: Image.open(filename).load() except OSError: return False return True if __name__ == '__main__': if len(sys.argv) < 2: print('Remove Broken Images\nUsage: python check.py <dir>') exit(-1) filenames = list(Path(sys.args[1]).rglob('*.*')) with ProcessPoolExecutor() as executor: broken, total = 0, len(filenames) jobs = executor.map(verify_or_delete, filenames) for i, (filename, verified) in enumerate(zip(filenames, jobs)): if not verified: broken += 1 os.system('rm "%s"' % filename) print('Checking %d/%d, %d deleted...' % (i + 1, total, broken), end='\r') print('\nDone.')
<commit_before><commit_msg>Add script to remove broken images.<commit_after>
from pathlib import Path from PIL import Image from concurrent.futures import ProcessPoolExecutor import os import sys def verify_or_delete(filename): try: Image.open(filename).load() except OSError: return False return True if __name__ == '__main__': if len(sys.argv) < 2: print('Remove Broken Images\nUsage: python check.py <dir>') exit(-1) filenames = list(Path(sys.args[1]).rglob('*.*')) with ProcessPoolExecutor() as executor: broken, total = 0, len(filenames) jobs = executor.map(verify_or_delete, filenames) for i, (filename, verified) in enumerate(zip(filenames, jobs)): if not verified: broken += 1 os.system('rm "%s"' % filename) print('Checking %d/%d, %d deleted...' % (i + 1, total, broken), end='\r') print('\nDone.')
Add script to remove broken images.from pathlib import Path from PIL import Image from concurrent.futures import ProcessPoolExecutor import os import sys def verify_or_delete(filename): try: Image.open(filename).load() except OSError: return False return True if __name__ == '__main__': if len(sys.argv) < 2: print('Remove Broken Images\nUsage: python check.py <dir>') exit(-1) filenames = list(Path(sys.args[1]).rglob('*.*')) with ProcessPoolExecutor() as executor: broken, total = 0, len(filenames) jobs = executor.map(verify_or_delete, filenames) for i, (filename, verified) in enumerate(zip(filenames, jobs)): if not verified: broken += 1 os.system('rm "%s"' % filename) print('Checking %d/%d, %d deleted...' % (i + 1, total, broken), end='\r') print('\nDone.')
<commit_before><commit_msg>Add script to remove broken images.<commit_after>from pathlib import Path from PIL import Image from concurrent.futures import ProcessPoolExecutor import os import sys def verify_or_delete(filename): try: Image.open(filename).load() except OSError: return False return True if __name__ == '__main__': if len(sys.argv) < 2: print('Remove Broken Images\nUsage: python check.py <dir>') exit(-1) filenames = list(Path(sys.args[1]).rglob('*.*')) with ProcessPoolExecutor() as executor: broken, total = 0, len(filenames) jobs = executor.map(verify_or_delete, filenames) for i, (filename, verified) in enumerate(zip(filenames, jobs)): if not verified: broken += 1 os.system('rm "%s"' % filename) print('Checking %d/%d, %d deleted...' % (i + 1, total, broken), end='\r') print('\nDone.')
388bbd915a5e40a2e096eb22ab294ffcbd3db936
bananas/model.py
bananas/model.py
import numpy # FIXME: copy the functions here from sklearn.mixture.gmm import log_multivariate_normal_density, logsumexp class GMM(object): def __init__(self, weights, means, covs): self.weights = numpy.array(weights) self.means = numpy.array(means) self.covs = numpy.array(covs) def score(self, X, return_responsibilities=False): nc = len(self.weights) X = numpy.array(X) if X.ndim == 1: X = X[:, None] if X.size == 0: return numpy.array([]), numpy.empty((0, len(self.weights))) if X.shape[1] != self.means.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = numpy.log(self.weights)) + \ log_multivariate_normal_density(X, self.means, self.covs, 'full') logprob = logsumexp(lpr, axis=1) if return_responsibilities: responsibilities = numpy.exp(lpr - logprob[:, None]) return logprob, responsibilities return logprob @classmethod def fit(kls, nc, X): # FIXME: get rid of this and add weights support from sklearn import mixture model = mixture.GMM(nc, covariance_type='full', n_iter=100) model.fit(X) if not model.converged_: raise ValueError("Your data is strange. Gaussian mixture failed to converge") return kls(model.weights_, model.means_, model.covars_) class Confidence(object): def __init__(self, model, confidence_table) self.model = model self.confidence_table = confidence_table def score(self, X): x, y = self.confidence_table sc = self.model.score(X) return numpy.interp(sc, x, y, left=1., right=0.) @classmethod def fit(kls, model, X, vmin=-5, vmax=0, nb=100): sc = model.score(X) confidence_levels = 1 - numpy.logspace(vmin, vmax, num=nb) # FIXME: add weight support here sc_cl = numpy.percentile(sc, 100. - confidence_levels * 100.) confidence_table = numpy.array([sc_cl, confidence_levels]) return kls(model, confidence_table)
Add a gmm, currently wrapping sklearn
Add a gmm, currently wrapping sklearn
Python
apache-2.0
bccp/bananaplots,bccp/bananaplots
Add a gmm, currently wrapping sklearn
import numpy # FIXME: copy the functions here from sklearn.mixture.gmm import log_multivariate_normal_density, logsumexp class GMM(object): def __init__(self, weights, means, covs): self.weights = numpy.array(weights) self.means = numpy.array(means) self.covs = numpy.array(covs) def score(self, X, return_responsibilities=False): nc = len(self.weights) X = numpy.array(X) if X.ndim == 1: X = X[:, None] if X.size == 0: return numpy.array([]), numpy.empty((0, len(self.weights))) if X.shape[1] != self.means.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = numpy.log(self.weights)) + \ log_multivariate_normal_density(X, self.means, self.covs, 'full') logprob = logsumexp(lpr, axis=1) if return_responsibilities: responsibilities = numpy.exp(lpr - logprob[:, None]) return logprob, responsibilities return logprob @classmethod def fit(kls, nc, X): # FIXME: get rid of this and add weights support from sklearn import mixture model = mixture.GMM(nc, covariance_type='full', n_iter=100) model.fit(X) if not model.converged_: raise ValueError("Your data is strange. Gaussian mixture failed to converge") return kls(model.weights_, model.means_, model.covars_) class Confidence(object): def __init__(self, model, confidence_table) self.model = model self.confidence_table = confidence_table def score(self, X): x, y = self.confidence_table sc = self.model.score(X) return numpy.interp(sc, x, y, left=1., right=0.) @classmethod def fit(kls, model, X, vmin=-5, vmax=0, nb=100): sc = model.score(X) confidence_levels = 1 - numpy.logspace(vmin, vmax, num=nb) # FIXME: add weight support here sc_cl = numpy.percentile(sc, 100. - confidence_levels * 100.) confidence_table = numpy.array([sc_cl, confidence_levels]) return kls(model, confidence_table)
<commit_before><commit_msg>Add a gmm, currently wrapping sklearn<commit_after>
import numpy # FIXME: copy the functions here from sklearn.mixture.gmm import log_multivariate_normal_density, logsumexp class GMM(object): def __init__(self, weights, means, covs): self.weights = numpy.array(weights) self.means = numpy.array(means) self.covs = numpy.array(covs) def score(self, X, return_responsibilities=False): nc = len(self.weights) X = numpy.array(X) if X.ndim == 1: X = X[:, None] if X.size == 0: return numpy.array([]), numpy.empty((0, len(self.weights))) if X.shape[1] != self.means.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = numpy.log(self.weights)) + \ log_multivariate_normal_density(X, self.means, self.covs, 'full') logprob = logsumexp(lpr, axis=1) if return_responsibilities: responsibilities = numpy.exp(lpr - logprob[:, None]) return logprob, responsibilities return logprob @classmethod def fit(kls, nc, X): # FIXME: get rid of this and add weights support from sklearn import mixture model = mixture.GMM(nc, covariance_type='full', n_iter=100) model.fit(X) if not model.converged_: raise ValueError("Your data is strange. Gaussian mixture failed to converge") return kls(model.weights_, model.means_, model.covars_) class Confidence(object): def __init__(self, model, confidence_table) self.model = model self.confidence_table = confidence_table def score(self, X): x, y = self.confidence_table sc = self.model.score(X) return numpy.interp(sc, x, y, left=1., right=0.) @classmethod def fit(kls, model, X, vmin=-5, vmax=0, nb=100): sc = model.score(X) confidence_levels = 1 - numpy.logspace(vmin, vmax, num=nb) # FIXME: add weight support here sc_cl = numpy.percentile(sc, 100. - confidence_levels * 100.) confidence_table = numpy.array([sc_cl, confidence_levels]) return kls(model, confidence_table)
Add a gmm, currently wrapping sklearnimport numpy # FIXME: copy the functions here from sklearn.mixture.gmm import log_multivariate_normal_density, logsumexp class GMM(object): def __init__(self, weights, means, covs): self.weights = numpy.array(weights) self.means = numpy.array(means) self.covs = numpy.array(covs) def score(self, X, return_responsibilities=False): nc = len(self.weights) X = numpy.array(X) if X.ndim == 1: X = X[:, None] if X.size == 0: return numpy.array([]), numpy.empty((0, len(self.weights))) if X.shape[1] != self.means.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = numpy.log(self.weights)) + \ log_multivariate_normal_density(X, self.means, self.covs, 'full') logprob = logsumexp(lpr, axis=1) if return_responsibilities: responsibilities = numpy.exp(lpr - logprob[:, None]) return logprob, responsibilities return logprob @classmethod def fit(kls, nc, X): # FIXME: get rid of this and add weights support from sklearn import mixture model = mixture.GMM(nc, covariance_type='full', n_iter=100) model.fit(X) if not model.converged_: raise ValueError("Your data is strange. Gaussian mixture failed to converge") return kls(model.weights_, model.means_, model.covars_) class Confidence(object): def __init__(self, model, confidence_table) self.model = model self.confidence_table = confidence_table def score(self, X): x, y = self.confidence_table sc = self.model.score(X) return numpy.interp(sc, x, y, left=1., right=0.) @classmethod def fit(kls, model, X, vmin=-5, vmax=0, nb=100): sc = model.score(X) confidence_levels = 1 - numpy.logspace(vmin, vmax, num=nb) # FIXME: add weight support here sc_cl = numpy.percentile(sc, 100. - confidence_levels * 100.) confidence_table = numpy.array([sc_cl, confidence_levels]) return kls(model, confidence_table)
<commit_before><commit_msg>Add a gmm, currently wrapping sklearn<commit_after>import numpy # FIXME: copy the functions here from sklearn.mixture.gmm import log_multivariate_normal_density, logsumexp class GMM(object): def __init__(self, weights, means, covs): self.weights = numpy.array(weights) self.means = numpy.array(means) self.covs = numpy.array(covs) def score(self, X, return_responsibilities=False): nc = len(self.weights) X = numpy.array(X) if X.ndim == 1: X = X[:, None] if X.size == 0: return numpy.array([]), numpy.empty((0, len(self.weights))) if X.shape[1] != self.means.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = numpy.log(self.weights)) + \ log_multivariate_normal_density(X, self.means, self.covs, 'full') logprob = logsumexp(lpr, axis=1) if return_responsibilities: responsibilities = numpy.exp(lpr - logprob[:, None]) return logprob, responsibilities return logprob @classmethod def fit(kls, nc, X): # FIXME: get rid of this and add weights support from sklearn import mixture model = mixture.GMM(nc, covariance_type='full', n_iter=100) model.fit(X) if not model.converged_: raise ValueError("Your data is strange. Gaussian mixture failed to converge") return kls(model.weights_, model.means_, model.covars_) class Confidence(object): def __init__(self, model, confidence_table) self.model = model self.confidence_table = confidence_table def score(self, X): x, y = self.confidence_table sc = self.model.score(X) return numpy.interp(sc, x, y, left=1., right=0.) @classmethod def fit(kls, model, X, vmin=-5, vmax=0, nb=100): sc = model.score(X) confidence_levels = 1 - numpy.logspace(vmin, vmax, num=nb) # FIXME: add weight support here sc_cl = numpy.percentile(sc, 100. - confidence_levels * 100.) confidence_table = numpy.array([sc_cl, confidence_levels]) return kls(model, confidence_table)
7e9794dc98a268479f0f57128effc67f88586c8f
bvspca/core/migrations/0025_auto_20180202_1214.py
bvspca/core/migrations/0025_auto_20180202_1214.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-02 19:14 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0024_contentindexpage_empty_message'), ] operations = [ migrations.AlterField( model_name='contentindexpage', name='empty_message', field=models.CharField(default='Empty', max_length=200), ), ]
Add default message for list pages
Add default message for list pages
Python
mit
nfletton/bvspca,nfletton/bvspca,nfletton/bvspca,nfletton/bvspca
Add default message for list pages
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-02 19:14 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0024_contentindexpage_empty_message'), ] operations = [ migrations.AlterField( model_name='contentindexpage', name='empty_message', field=models.CharField(default='Empty', max_length=200), ), ]
<commit_before><commit_msg>Add default message for list pages<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-02 19:14 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0024_contentindexpage_empty_message'), ] operations = [ migrations.AlterField( model_name='contentindexpage', name='empty_message', field=models.CharField(default='Empty', max_length=200), ), ]
Add default message for list pages# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-02 19:14 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0024_contentindexpage_empty_message'), ] operations = [ migrations.AlterField( model_name='contentindexpage', name='empty_message', field=models.CharField(default='Empty', max_length=200), ), ]
<commit_before><commit_msg>Add default message for list pages<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-02 19:14 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0024_contentindexpage_empty_message'), ] operations = [ migrations.AlterField( model_name='contentindexpage', name='empty_message', field=models.CharField(default='Empty', max_length=200), ), ]
9ce90bc43cfcc5a56be958671f304e7929eb0446
cmsplugin_collapse/migrations/0002_auto_20160210_0651.py
cmsplugin_collapse/migrations/0002_auto_20160210_0651.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('cmsplugin_collapse', '0001_initial'), ] operations = [ migrations.AlterField( model_name='accordionheader', name='show_first', field=models.BooleanField(default=True, help_text='If selected, the first collapsible will be displayed in the open state.'), preserve_default=True, ), ]
Add missing migration step dua changes in model
Add missing migration step dua changes in model This should fix #5.
Python
bsd-3-clause
nimbis/cmsplugin-collapse,nimbis/cmsplugin-collapse
Add missing migration step dua changes in model This should fix #5.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('cmsplugin_collapse', '0001_initial'), ] operations = [ migrations.AlterField( model_name='accordionheader', name='show_first', field=models.BooleanField(default=True, help_text='If selected, the first collapsible will be displayed in the open state.'), preserve_default=True, ), ]
<commit_before><commit_msg>Add missing migration step dua changes in model This should fix #5.<commit_after>
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('cmsplugin_collapse', '0001_initial'), ] operations = [ migrations.AlterField( model_name='accordionheader', name='show_first', field=models.BooleanField(default=True, help_text='If selected, the first collapsible will be displayed in the open state.'), preserve_default=True, ), ]
Add missing migration step dua changes in model This should fix #5.# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('cmsplugin_collapse', '0001_initial'), ] operations = [ migrations.AlterField( model_name='accordionheader', name='show_first', field=models.BooleanField(default=True, help_text='If selected, the first collapsible will be displayed in the open state.'), preserve_default=True, ), ]
<commit_before><commit_msg>Add missing migration step dua changes in model This should fix #5.<commit_after># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('cmsplugin_collapse', '0001_initial'), ] operations = [ migrations.AlterField( model_name='accordionheader', name='show_first', field=models.BooleanField(default=True, help_text='If selected, the first collapsible will be displayed in the open state.'), preserve_default=True, ), ]
842c796a223ee9cb78c69ccb59416a2afe0fcee0
tests/permissions.py
tests/permissions.py
import unittest from permission import Permission, PERMISSION_DELIMITER class BasicPermissionTests(unittest.TestCase): def setUp(self): self.p1 = Permission("test{0}1{0}hello".format(PERMISSION_DELIMITER)) self.p2 = Permission("test{0}2{0}hello".format(PERMISSION_DELIMITER)) self.p3 = Permission("test") self.p4 = Permission("test{0}1{0}hello".format(PERMISSION_DELIMITER)) self.ps1 = {self.p1, self.p2} self.ps2 = {self.p1, self.p4} self.ps3 = {self.p1} def test_equal(self): self.assertEqual(self.p1, self.p4) self.assertNotEqual(self.p1, self.p2) self.assertNotEqual(self.p1, self.p3) self.assertEqual(self.ps2, self.ps3) def test_grants_permission(self): self.assertTrue(self.p1.grants_permission(self.p1)) self.assertTrue(self.p1.grants_permission(self.p4)) self.assertFalse(self.p1.grants_permission(self.p2)) self.assertFalse(self.p1.grants_permission(self.p3)) self.assertFalse(self.p3.grants_permission(self.p1)) def test_grants_any_permission(self): self.assertTrue(self.p1.grants_any_permission(self.ps1)) self.assertTrue(self.p2.grants_any_permission(self.ps1)) self.assertFalse(self.p3.grants_any_permission(self.ps1)) self.assertTrue(self.p4.grants_any_permission(self.ps1)) def test_segments(self): self.assertEqual(self.p1.segments, ["test", "1", "hello"]) self.assertEqual(self.p2.segments, ["test", "2", "hello"]) self.assertEqual(self.p3.segments, ["test"]) self.assertEqual(self.p1.segments, self.p4.segments) if __name__ == "__main__": unittest.main()
Add tests for Permission class.
Add tests for Permission class. Signed-off-by: Tyler O'Meara <c794bd35db97c1cf0b8edc21ac218cd202f68ca7@TylerOMeara.com>
Python
mit
Acidity/PyPermissions
Add tests for Permission class. Signed-off-by: Tyler O'Meara <c794bd35db97c1cf0b8edc21ac218cd202f68ca7@TylerOMeara.com>
import unittest from permission import Permission, PERMISSION_DELIMITER class BasicPermissionTests(unittest.TestCase): def setUp(self): self.p1 = Permission("test{0}1{0}hello".format(PERMISSION_DELIMITER)) self.p2 = Permission("test{0}2{0}hello".format(PERMISSION_DELIMITER)) self.p3 = Permission("test") self.p4 = Permission("test{0}1{0}hello".format(PERMISSION_DELIMITER)) self.ps1 = {self.p1, self.p2} self.ps2 = {self.p1, self.p4} self.ps3 = {self.p1} def test_equal(self): self.assertEqual(self.p1, self.p4) self.assertNotEqual(self.p1, self.p2) self.assertNotEqual(self.p1, self.p3) self.assertEqual(self.ps2, self.ps3) def test_grants_permission(self): self.assertTrue(self.p1.grants_permission(self.p1)) self.assertTrue(self.p1.grants_permission(self.p4)) self.assertFalse(self.p1.grants_permission(self.p2)) self.assertFalse(self.p1.grants_permission(self.p3)) self.assertFalse(self.p3.grants_permission(self.p1)) def test_grants_any_permission(self): self.assertTrue(self.p1.grants_any_permission(self.ps1)) self.assertTrue(self.p2.grants_any_permission(self.ps1)) self.assertFalse(self.p3.grants_any_permission(self.ps1)) self.assertTrue(self.p4.grants_any_permission(self.ps1)) def test_segments(self): self.assertEqual(self.p1.segments, ["test", "1", "hello"]) self.assertEqual(self.p2.segments, ["test", "2", "hello"]) self.assertEqual(self.p3.segments, ["test"]) self.assertEqual(self.p1.segments, self.p4.segments) if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Add tests for Permission class. Signed-off-by: Tyler O'Meara <c794bd35db97c1cf0b8edc21ac218cd202f68ca7@TylerOMeara.com><commit_after>
import unittest from permission import Permission, PERMISSION_DELIMITER class BasicPermissionTests(unittest.TestCase): def setUp(self): self.p1 = Permission("test{0}1{0}hello".format(PERMISSION_DELIMITER)) self.p2 = Permission("test{0}2{0}hello".format(PERMISSION_DELIMITER)) self.p3 = Permission("test") self.p4 = Permission("test{0}1{0}hello".format(PERMISSION_DELIMITER)) self.ps1 = {self.p1, self.p2} self.ps2 = {self.p1, self.p4} self.ps3 = {self.p1} def test_equal(self): self.assertEqual(self.p1, self.p4) self.assertNotEqual(self.p1, self.p2) self.assertNotEqual(self.p1, self.p3) self.assertEqual(self.ps2, self.ps3) def test_grants_permission(self): self.assertTrue(self.p1.grants_permission(self.p1)) self.assertTrue(self.p1.grants_permission(self.p4)) self.assertFalse(self.p1.grants_permission(self.p2)) self.assertFalse(self.p1.grants_permission(self.p3)) self.assertFalse(self.p3.grants_permission(self.p1)) def test_grants_any_permission(self): self.assertTrue(self.p1.grants_any_permission(self.ps1)) self.assertTrue(self.p2.grants_any_permission(self.ps1)) self.assertFalse(self.p3.grants_any_permission(self.ps1)) self.assertTrue(self.p4.grants_any_permission(self.ps1)) def test_segments(self): self.assertEqual(self.p1.segments, ["test", "1", "hello"]) self.assertEqual(self.p2.segments, ["test", "2", "hello"]) self.assertEqual(self.p3.segments, ["test"]) self.assertEqual(self.p1.segments, self.p4.segments) if __name__ == "__main__": unittest.main()
Add tests for Permission class. Signed-off-by: Tyler O'Meara <c794bd35db97c1cf0b8edc21ac218cd202f68ca7@TylerOMeara.com>import unittest from permission import Permission, PERMISSION_DELIMITER class BasicPermissionTests(unittest.TestCase): def setUp(self): self.p1 = Permission("test{0}1{0}hello".format(PERMISSION_DELIMITER)) self.p2 = Permission("test{0}2{0}hello".format(PERMISSION_DELIMITER)) self.p3 = Permission("test") self.p4 = Permission("test{0}1{0}hello".format(PERMISSION_DELIMITER)) self.ps1 = {self.p1, self.p2} self.ps2 = {self.p1, self.p4} self.ps3 = {self.p1} def test_equal(self): self.assertEqual(self.p1, self.p4) self.assertNotEqual(self.p1, self.p2) self.assertNotEqual(self.p1, self.p3) self.assertEqual(self.ps2, self.ps3) def test_grants_permission(self): self.assertTrue(self.p1.grants_permission(self.p1)) self.assertTrue(self.p1.grants_permission(self.p4)) self.assertFalse(self.p1.grants_permission(self.p2)) self.assertFalse(self.p1.grants_permission(self.p3)) self.assertFalse(self.p3.grants_permission(self.p1)) def test_grants_any_permission(self): self.assertTrue(self.p1.grants_any_permission(self.ps1)) self.assertTrue(self.p2.grants_any_permission(self.ps1)) self.assertFalse(self.p3.grants_any_permission(self.ps1)) self.assertTrue(self.p4.grants_any_permission(self.ps1)) def test_segments(self): self.assertEqual(self.p1.segments, ["test", "1", "hello"]) self.assertEqual(self.p2.segments, ["test", "2", "hello"]) self.assertEqual(self.p3.segments, ["test"]) self.assertEqual(self.p1.segments, self.p4.segments) if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Add tests for Permission class. Signed-off-by: Tyler O'Meara <c794bd35db97c1cf0b8edc21ac218cd202f68ca7@TylerOMeara.com><commit_after>import unittest from permission import Permission, PERMISSION_DELIMITER class BasicPermissionTests(unittest.TestCase): def setUp(self): self.p1 = Permission("test{0}1{0}hello".format(PERMISSION_DELIMITER)) self.p2 = Permission("test{0}2{0}hello".format(PERMISSION_DELIMITER)) self.p3 = Permission("test") self.p4 = Permission("test{0}1{0}hello".format(PERMISSION_DELIMITER)) self.ps1 = {self.p1, self.p2} self.ps2 = {self.p1, self.p4} self.ps3 = {self.p1} def test_equal(self): self.assertEqual(self.p1, self.p4) self.assertNotEqual(self.p1, self.p2) self.assertNotEqual(self.p1, self.p3) self.assertEqual(self.ps2, self.ps3) def test_grants_permission(self): self.assertTrue(self.p1.grants_permission(self.p1)) self.assertTrue(self.p1.grants_permission(self.p4)) self.assertFalse(self.p1.grants_permission(self.p2)) self.assertFalse(self.p1.grants_permission(self.p3)) self.assertFalse(self.p3.grants_permission(self.p1)) def test_grants_any_permission(self): self.assertTrue(self.p1.grants_any_permission(self.ps1)) self.assertTrue(self.p2.grants_any_permission(self.ps1)) self.assertFalse(self.p3.grants_any_permission(self.ps1)) self.assertTrue(self.p4.grants_any_permission(self.ps1)) def test_segments(self): self.assertEqual(self.p1.segments, ["test", "1", "hello"]) self.assertEqual(self.p2.segments, ["test", "2", "hello"]) self.assertEqual(self.p3.segments, ["test"]) self.assertEqual(self.p1.segments, self.p4.segments) if __name__ == "__main__": unittest.main()
f5d2b17371dbd974820b9b8ab1fcdb11ad8fa646
backend/scripts/countdups.py
backend/scripts/countdups.py
#!/usr/bin/env python import rethinkdb as r conn = r.connect('localhost', 30815, db='materialscommons') rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size') total_bytes = 0 total_files = 0 for doc in rql.run(conn): total_bytes = total_bytes + doc['size'] total_files = total_files + 1 print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files)
Add in script to count duplicates.
Add in script to count duplicates.
Python
mit
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
Add in script to count duplicates.
#!/usr/bin/env python import rethinkdb as r conn = r.connect('localhost', 30815, db='materialscommons') rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size') total_bytes = 0 total_files = 0 for doc in rql.run(conn): total_bytes = total_bytes + doc['size'] total_files = total_files + 1 print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files)
<commit_before><commit_msg>Add in script to count duplicates.<commit_after>
#!/usr/bin/env python import rethinkdb as r conn = r.connect('localhost', 30815, db='materialscommons') rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size') total_bytes = 0 total_files = 0 for doc in rql.run(conn): total_bytes = total_bytes + doc['size'] total_files = total_files + 1 print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files)
Add in script to count duplicates.#!/usr/bin/env python import rethinkdb as r conn = r.connect('localhost', 30815, db='materialscommons') rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size') total_bytes = 0 total_files = 0 for doc in rql.run(conn): total_bytes = total_bytes + doc['size'] total_files = total_files + 1 print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files)
<commit_before><commit_msg>Add in script to count duplicates.<commit_after>#!/usr/bin/env python import rethinkdb as r conn = r.connect('localhost', 30815, db='materialscommons') rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size') total_bytes = 0 total_files = 0 for doc in rql.run(conn): total_bytes = total_bytes + doc['size'] total_files = total_files + 1 print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files)
8488e7c5245758e4651e6d723f93d52f3ff54d73
tools/submit_awcy.py
tools/submit_awcy.py
#!/usr/bin/env python from __future__ import print_function import requests import argparse import os import subprocess import sys if 'DAALA_ROOT' not in os.environ: print("Please specify the DAALA_ROOT environment variable to use this tool.") sys.exit(1) keyfile = open('secret_key','r') key = keyfile.read().strip() daala_root = os.environ['DAALA_ROOT'] os.chdir(daala_root) branch = subprocess.check_output('git symbolic-ref -q --short HEAD',shell=True).strip() parser = argparse.ArgumentParser(description='Submit test to arewecompressedyet.com') parser.add_argument('-prefix',default=branch) args = parser.parse_args() commit = subprocess.check_output('git rev-parse HEAD',shell=True).strip() short = subprocess.check_output('git rev-parse --short HEAD',shell=True).strip() date = subprocess.check_output(['git','show','-s','--format=%ci',commit]).strip() date_short = date.split()[0]; user = args.prefix run_id = user+'-'+date_short+'-'+short print('Creating run '+run_id) r = requests.post("https://arewecompressedyet.com/submit/job", {'run_id': run_id, 'commit': commit, 'key': key}) print(r)
Add tool for submitting jobs to AreWeCompressedYet
Add tool for submitting jobs to AreWeCompressedYet
Python
bsd-2-clause
luctrudeau/daala,kodabb/daala,vr000m/daala,nvoron23/daala,iankronquist/daala,kustom666/daala,kustom666/daala,jmvalin/daala,kbara/daala,xiph/daala,kbara/daala,xiph/daala,jmvalin/daala,ascent12/daala,tribouille/daala,tribouille/daala,xiph/daala,xiphmont/daala,ycho/daala,xiph/daala,HeadhunterXamd/daala,nvoron23/daala,HeadhunterXamd/daala,xiphmont/daala,kustom666/daala,vr000m/daala,iankronquist/daala,luctrudeau/daala,KyleSiefring/daala,luctrudeau/daala,ascent12/daala,iankronquist/daala,kodabb/daala,nvoron23/daala,nvoron23/daala,kbara/daala,KyleSiefring/daala,ycho/daala,tribouille/daala,felipebetancur/daala,ascent12/daala,KyleSiefring/daala,iankronquist/daala,iankronquist/daala,mbebenita/daala,vr000m/daala,felipebetancur/daala,xiph/daala,kodabb/daala,jmvalin/daala,kustom666/daala,luctrudeau/daala,jmvalin/daala,ascent12/daala,kustom666/daala,xiphmont/daala,felipebetancur/daala,ycho/daala,tribouille/daala,mbebenita/daala,vr000m/daala,xiphmont/daala,nvoron23/daala,felipebetancur/daala,ycho/daala,ascent12/daala,KyleSiefring/daala,tribouille/daala,HeadhunterXamd/daala,luctrudeau/daala,mbebenita/daala,mbebenita/daala,vr000m/daala,kbara/daala,ycho/daala,kodabb/daala,xiphmont/daala,HeadhunterXamd/daala,kodabb/daala,KyleSiefring/daala,jmvalin/daala,felipebetancur/daala,HeadhunterXamd/daala,mbebenita/daala
Add tool for submitting jobs to AreWeCompressedYet
#!/usr/bin/env python from __future__ import print_function import requests import argparse import os import subprocess import sys if 'DAALA_ROOT' not in os.environ: print("Please specify the DAALA_ROOT environment variable to use this tool.") sys.exit(1) keyfile = open('secret_key','r') key = keyfile.read().strip() daala_root = os.environ['DAALA_ROOT'] os.chdir(daala_root) branch = subprocess.check_output('git symbolic-ref -q --short HEAD',shell=True).strip() parser = argparse.ArgumentParser(description='Submit test to arewecompressedyet.com') parser.add_argument('-prefix',default=branch) args = parser.parse_args() commit = subprocess.check_output('git rev-parse HEAD',shell=True).strip() short = subprocess.check_output('git rev-parse --short HEAD',shell=True).strip() date = subprocess.check_output(['git','show','-s','--format=%ci',commit]).strip() date_short = date.split()[0]; user = args.prefix run_id = user+'-'+date_short+'-'+short print('Creating run '+run_id) r = requests.post("https://arewecompressedyet.com/submit/job", {'run_id': run_id, 'commit': commit, 'key': key}) print(r)
<commit_before><commit_msg>Add tool for submitting jobs to AreWeCompressedYet<commit_after>
#!/usr/bin/env python from __future__ import print_function import requests import argparse import os import subprocess import sys if 'DAALA_ROOT' not in os.environ: print("Please specify the DAALA_ROOT environment variable to use this tool.") sys.exit(1) keyfile = open('secret_key','r') key = keyfile.read().strip() daala_root = os.environ['DAALA_ROOT'] os.chdir(daala_root) branch = subprocess.check_output('git symbolic-ref -q --short HEAD',shell=True).strip() parser = argparse.ArgumentParser(description='Submit test to arewecompressedyet.com') parser.add_argument('-prefix',default=branch) args = parser.parse_args() commit = subprocess.check_output('git rev-parse HEAD',shell=True).strip() short = subprocess.check_output('git rev-parse --short HEAD',shell=True).strip() date = subprocess.check_output(['git','show','-s','--format=%ci',commit]).strip() date_short = date.split()[0]; user = args.prefix run_id = user+'-'+date_short+'-'+short print('Creating run '+run_id) r = requests.post("https://arewecompressedyet.com/submit/job", {'run_id': run_id, 'commit': commit, 'key': key}) print(r)
Add tool for submitting jobs to AreWeCompressedYet#!/usr/bin/env python from __future__ import print_function import requests import argparse import os import subprocess import sys if 'DAALA_ROOT' not in os.environ: print("Please specify the DAALA_ROOT environment variable to use this tool.") sys.exit(1) keyfile = open('secret_key','r') key = keyfile.read().strip() daala_root = os.environ['DAALA_ROOT'] os.chdir(daala_root) branch = subprocess.check_output('git symbolic-ref -q --short HEAD',shell=True).strip() parser = argparse.ArgumentParser(description='Submit test to arewecompressedyet.com') parser.add_argument('-prefix',default=branch) args = parser.parse_args() commit = subprocess.check_output('git rev-parse HEAD',shell=True).strip() short = subprocess.check_output('git rev-parse --short HEAD',shell=True).strip() date = subprocess.check_output(['git','show','-s','--format=%ci',commit]).strip() date_short = date.split()[0]; user = args.prefix run_id = user+'-'+date_short+'-'+short print('Creating run '+run_id) r = requests.post("https://arewecompressedyet.com/submit/job", {'run_id': run_id, 'commit': commit, 'key': key}) print(r)
<commit_before><commit_msg>Add tool for submitting jobs to AreWeCompressedYet<commit_after>#!/usr/bin/env python from __future__ import print_function import requests import argparse import os import subprocess import sys if 'DAALA_ROOT' not in os.environ: print("Please specify the DAALA_ROOT environment variable to use this tool.") sys.exit(1) keyfile = open('secret_key','r') key = keyfile.read().strip() daala_root = os.environ['DAALA_ROOT'] os.chdir(daala_root) branch = subprocess.check_output('git symbolic-ref -q --short HEAD',shell=True).strip() parser = argparse.ArgumentParser(description='Submit test to arewecompressedyet.com') parser.add_argument('-prefix',default=branch) args = parser.parse_args() commit = subprocess.check_output('git rev-parse HEAD',shell=True).strip() short = subprocess.check_output('git rev-parse --short HEAD',shell=True).strip() date = subprocess.check_output(['git','show','-s','--format=%ci',commit]).strip() date_short = date.split()[0]; user = args.prefix run_id = user+'-'+date_short+'-'+short print('Creating run '+run_id) r = requests.post("https://arewecompressedyet.com/submit/job", {'run_id': run_id, 'commit': commit, 'key': key}) print(r)
4d30756e722cafa40fa449e48c967eeebc58500a
zerver/management/commands/import_realm_filters.py
zerver/management/commands/import_realm_filters.py
from __future__ import absolute_import from django.core.management.base import BaseCommand from zerver.models import RealmFilter, get_realm import logging class Command(BaseCommand): help = """Imports realm filters to database""" def handle(self, *args, **options): realm_filters = { "zulip.com": [ ("#(?P<id>[0-9]{2,8})", "https://trac.zulip.net/ticket/%(id)s"), ], "mit.edu/zephyr_mirror": [], } for domain, filters in realm_filters.iteritems(): realm = get_realm(domain) if realm is None: logging.error("Failed to get realm for domain %s" % (domain,)) continue for filter in filters: RealmFilter(realm=realm, pattern=filter[0], url_format_string=filter[1]).save() logging.info("Created realm filter %s for %s" % (filter[0], domain))
Add a manage.py command to import realm filters
[manual] Add a manage.py command to import realm filters This must be run manually on staging after deployment. Once it has been run, it can be deleted. It only needs to be run on staging, not prod. (imported from commit 79252c23ba8cda93500a18aa7b02575f406dd379)
Python
apache-2.0
vabs22/zulip,calvinleenyc/zulip,sup95/zulip,deer-hope/zulip,kokoar/zulip,ryanbackman/zulip,ahmadassaf/zulip,akuseru/zulip,shaunstanislaus/zulip,Gabriel0402/zulip,souravbadami/zulip,JanzTam/zulip,ufosky-server/zulip,noroot/zulip,amallia/zulip,vikas-parashar/zulip,kaiyuanheshang/zulip,synicalsyntax/zulip,ashwinirudrappa/zulip,aps-sids/zulip,voidException/zulip,krtkmj/zulip,huangkebo/zulip,sonali0901/zulip,ahmadassaf/zulip,zhaoweigg/zulip,arpith/zulip,PaulPetring/zulip,rishig/zulip,willingc/zulip,willingc/zulip,sonali0901/zulip,LeeRisk/zulip,ryansnowboarder/zulip,dhcrzf/zulip,wangdeshui/zulip,ahmadassaf/zulip,zulip/zulip,zacps/zulip,so0k/zulip,jessedhillon/zulip,swinghu/zulip,verma-varsha/zulip,noroot/zulip,isht3/zulip,dwrpayne/zulip,Vallher/zulip,blaze225/zulip,eastlhu/zulip,eeshangarg/zulip,eastlhu/zulip,m1ssou/zulip,rht/zulip,ericzhou2008/zulip,hj3938/zulip,easyfmxu/zulip,amanharitsh123/zulip,Qgap/zulip,synicalsyntax/zulip,babbage/zulip,joshisa/zulip,Diptanshu8/zulip,lfranchi/zulip,itnihao/zulip,xuxiao/zulip,tommyip/zulip,tbutter/zulip,JPJPJPOPOP/zulip,andersk/zulip,synicalsyntax/zulip,Vallher/zulip,eeshangarg/zulip,zofuthan/zulip,esander91/zulip,bastianh/zulip,hackerkid/zulip,showell/zulip,johnnygaddarr/zulip,christi3k/zulip,amallia/zulip,praveenaki/zulip,jimmy54/zulip,cosmicAsymmetry/zulip,dxq-git/zulip,arpitpanwar/zulip,kokoar/zulip,glovebx/zulip,calvinleenyc/zulip,shubhamdhama/zulip,saitodisse/zulip,aliceriot/zulip,mahim97/zulip,levixie/zulip,Diptanshu8/zulip,ashwinirudrappa/zulip,reyha/zulip,sonali0901/zulip,amallia/zulip,arpith/zulip,umkay/zulip,jphilipsen05/zulip,tdr130/zulip,dotcool/zulip,vakila/zulip,itnihao/zulip,punchagan/zulip,grave-w-grave/zulip,arpitpanwar/zulip,yocome/zulip,jainayush975/zulip,fw1121/zulip,yocome/zulip,EasonYi/zulip,dotcool/zulip,souravbadami/zulip,shrikrishnaholla/zulip,udxxabp/zulip,wavelets/zulip,seapasulli/zulip,susansls/zulip,bluesea/zulip,aps-sids/zulip,xuxiao/zulip,amyliu345/zulip,arpitpanwar/zulip,he15his/zulip,hustlzp/zulip,christi3k/zulip,EasonYi/zulip,proliming/zulip,hayderimran7/zulip,wdaher/zulip,susansls/zulip,tdr130/zulip,MariaFaBella85/zulip,technicalpickles/zulip,LeeRisk/zulip,yuvipanda/zulip,mdavid/zulip,tommyip/zulip,natanovia/zulip,Juanvulcano/zulip,swinghu/zulip,cosmicAsymmetry/zulip,he15his/zulip,punchagan/zulip,jrowan/zulip,amallia/zulip,blaze225/zulip,MayB/zulip,noroot/zulip,Drooids/zulip,blaze225/zulip,rht/zulip,adnanh/zulip,adnanh/zulip,babbage/zulip,schatt/zulip,peiwei/zulip,yuvipanda/zulip,avastu/zulip,calvinleenyc/zulip,wdaher/zulip,zachallaun/zulip,DazWorrall/zulip,dawran6/zulip,he15his/zulip,ipernet/zulip,krtkmj/zulip,amanharitsh123/zulip,zofuthan/zulip,aakash-cr7/zulip,stamhe/zulip,karamcnair/zulip,so0k/zulip,codeKonami/zulip,seapasulli/zulip,pradiptad/zulip,tiansiyuan/zulip,mahim97/zulip,paxapy/zulip,hustlzp/zulip,pradiptad/zulip,KJin99/zulip,luyifan/zulip,mansilladev/zulip,atomic-labs/zulip,ryansnowboarder/zulip,noroot/zulip,tommyip/zulip,atomic-labs/zulip,JPJPJPOPOP/zulip,easyfmxu/zulip,proliming/zulip,ryanbackman/zulip,suxinde2009/zulip,joshisa/zulip,lfranchi/zulip,rht/zulip,karamcnair/zulip,tiansiyuan/zulip,tiansiyuan/zulip,rishig/zulip,kaiyuanheshang/zulip,bowlofstew/zulip,tiansiyuan/zulip,praveenaki/zulip,shubhamdhama/zulip,ipernet/zulip,schatt/zulip,tbutter/zulip,Batterfii/zulip,babbage/zulip,hafeez3000/zulip,MayB/zulip,willingc/zulip,nicholasbs/zulip,krtkmj/zulip,firstblade/zulip,jessedhillon/zulip,synicalsyntax/zulip,bluesea/zulip,hj3938/zulip,bitemyapp/zulip,KingxBanana/zulip,hayderimran7/zulip,aakash-cr7/zulip,yuvipanda/zulip,reyha/zulip,itnihao/zulip,tdr130/zulip,vabs22/zulip,Batterfii/zulip,bastianh/zulip,aliceriot/zulip,ryansnowboarder/zulip,Frouk/zulip,tommyip/zulip,thomasboyt/zulip,mansilladev/zulip,punchagan/zulip,zachallaun/zulip,reyha/zulip,DazWorrall/zulip,zacps/zulip,ikasumiwt/zulip,brockwhittaker/zulip,amyliu345/zulip,hengqujushi/zulip,showell/zulip,jeffcao/zulip,Juanvulcano/zulip,gigawhitlocks/zulip,MariaFaBella85/zulip,glovebx/zulip,aps-sids/zulip,luyifan/zulip,peiwei/zulip,praveenaki/zulip,xuxiao/zulip,showell/zulip,saitodisse/zulip,j831/zulip,hengqujushi/zulip,armooo/zulip,tommyip/zulip,kaiyuanheshang/zulip,joshisa/zulip,rishig/zulip,voidException/zulip,LeeRisk/zulip,alliejones/zulip,akuseru/zulip,tdr130/zulip,wweiradio/zulip,bowlofstew/zulip,ahmadassaf/zulip,xuxiao/zulip,codeKonami/zulip,bowlofstew/zulip,firstblade/zulip,thomasboyt/zulip,KingxBanana/zulip,sup95/zulip,kou/zulip,he15his/zulip,wavelets/zulip,Qgap/zulip,kou/zulip,tommyip/zulip,j831/zulip,thomasboyt/zulip,swinghu/zulip,themass/zulip,dotcool/zulip,MayB/zulip,levixie/zulip,verma-varsha/zulip,hustlzp/zulip,luyifan/zulip,johnny9/zulip,codeKonami/zulip,voidException/zulip,moria/zulip,firstblade/zulip,RobotCaleb/zulip,j831/zulip,jainayush975/zulip,amyliu345/zulip,hackerkid/zulip,mansilladev/zulip,gkotian/zulip,qq1012803704/zulip,fw1121/zulip,jackrzhang/zulip,eeshangarg/zulip,susansls/zulip,Batterfii/zulip,amanharitsh123/zulip,karamcnair/zulip,kou/zulip,themass/zulip,shrikrishnaholla/zulip,jrowan/zulip,dawran6/zulip,dattatreya303/zulip,pradiptad/zulip,joyhchen/zulip,zulip/zulip,saitodisse/zulip,PhilSk/zulip,Suninus/zulip,armooo/zulip,zwily/zulip,hengqujushi/zulip,vabs22/zulip,nicholasbs/zulip,Vallher/zulip,Cheppers/zulip,joyhchen/zulip,RobotCaleb/zulip,RobotCaleb/zulip,so0k/zulip,johnny9/zulip,Juanvulcano/zulip,zofuthan/zulip,akuseru/zulip,yocome/zulip,jerryge/zulip,zhaoweigg/zulip,MayB/zulip,littledogboy/zulip,Frouk/zulip,jrowan/zulip,zorojean/zulip,rishig/zulip,andersk/zulip,wavelets/zulip,tommyip/zulip,gigawhitlocks/zulip,bastianh/zulip,susansls/zulip,eeshangarg/zulip,glovebx/zulip,schatt/zulip,amanharitsh123/zulip,Galexrt/zulip,yocome/zulip,gigawhitlocks/zulip,jeffcao/zulip,Suninus/zulip,hackerkid/zulip,esander91/zulip,tiansiyuan/zulip,huangkebo/zulip,avastu/zulip,eeshangarg/zulip,m1ssou/zulip,niftynei/zulip,timabbott/zulip,christi3k/zulip,arpith/zulip,cosmicAsymmetry/zulip,schatt/zulip,dawran6/zulip,jainayush975/zulip,lfranchi/zulip,TigorC/zulip,zofuthan/zulip,mansilladev/zulip,niftynei/zulip,KJin99/zulip,j831/zulip,seapasulli/zulip,stamhe/zulip,krtkmj/zulip,adnanh/zulip,jeffcao/zulip,vaidap/zulip,isht3/zulip,luyifan/zulip,punchagan/zulip,glovebx/zulip,tbutter/zulip,vakila/zulip,verma-varsha/zulip,swinghu/zulip,JPJPJPOPOP/zulip,niftynei/zulip,so0k/zulip,cosmicAsymmetry/zulip,dotcool/zulip,jonesgithub/zulip,ApsOps/zulip,themass/zulip,Galexrt/zulip,jessedhillon/zulip,hustlzp/zulip,ufosky-server/zulip,sharmaeklavya2/zulip,xuanhan863/zulip,joyhchen/zulip,zwily/zulip,kaiyuanheshang/zulip,ryanbackman/zulip,timabbott/zulip,dxq-git/zulip,easyfmxu/zulip,shrikrishnaholla/zulip,developerfm/zulip,isht3/zulip,dotcool/zulip,eeshangarg/zulip,jackrzhang/zulip,ahmadassaf/zulip,brainwane/zulip,aakash-cr7/zulip,jimmy54/zulip,wweiradio/zulip,tbutter/zulip,timabbott/zulip,deer-hope/zulip,swinghu/zulip,bastianh/zulip,dwrpayne/zulip,Vallher/zulip,Suninus/zulip,sup95/zulip,wdaher/zulip,arpith/zulip,alliejones/zulip,samatdav/zulip,verma-varsha/zulip,punchagan/zulip,fw1121/zulip,gigawhitlocks/zulip,rishig/zulip,developerfm/zulip,JPJPJPOPOP/zulip,ahmadassaf/zulip,Gabriel0402/zulip,synicalsyntax/zulip,vakila/zulip,babbage/zulip,ericzhou2008/zulip,jainayush975/zulip,bssrdf/zulip,Frouk/zulip,dotcool/zulip,dwrpayne/zulip,wavelets/zulip,esander91/zulip,mdavid/zulip,dxq-git/zulip,showell/zulip,kokoar/zulip,jerryge/zulip,vikas-parashar/zulip,calvinleenyc/zulip,KJin99/zulip,dxq-git/zulip,armooo/zulip,johnny9/zulip,hackerkid/zulip,samatdav/zulip,m1ssou/zulip,so0k/zulip,sonali0901/zulip,ryanbackman/zulip,Jianchun1/zulip,akuseru/zulip,saitodisse/zulip,grave-w-grave/zulip,littledogboy/zulip,guiquanz/zulip,bssrdf/zulip,MayB/zulip,umkay/zulip,fw1121/zulip,ryanbackman/zulip,dhcrzf/zulip,yocome/zulip,jphilipsen05/zulip,eeshangarg/zulip,zacps/zulip,MayB/zulip,Drooids/zulip,schatt/zulip,amyliu345/zulip,ApsOps/zulip,suxinde2009/zulip,zacps/zulip,praveenaki/zulip,vaidap/zulip,esander91/zulip,mdavid/zulip,Suninus/zulip,hackerkid/zulip,wdaher/zulip,ericzhou2008/zulip,sharmaeklavya2/zulip,ipernet/zulip,SmartPeople/zulip,vaidap/zulip,j831/zulip,PhilSk/zulip,saitodisse/zulip,DazWorrall/zulip,Batterfii/zulip,willingc/zulip,wangdeshui/zulip,esander91/zulip,KJin99/zulip,firstblade/zulip,Gabriel0402/zulip,dattatreya303/zulip,EasonYi/zulip,amyliu345/zulip,stamhe/zulip,Diptanshu8/zulip,Gabriel0402/zulip,ericzhou2008/zulip,atomic-labs/zulip,DazWorrall/zulip,umkay/zulip,alliejones/zulip,vakila/zulip,amallia/zulip,so0k/zulip,LAndreas/zulip,dhcrzf/zulip,LeeRisk/zulip,ashwinirudrappa/zulip,mdavid/zulip,technicalpickles/zulip,praveenaki/zulip,joyhchen/zulip,eastlhu/zulip,saitodisse/zulip,ipernet/zulip,Drooids/zulip,arpitpanwar/zulip,shubhamdhama/zulip,ipernet/zulip,shubhamdhama/zulip,wangdeshui/zulip,fw1121/zulip,Gabriel0402/zulip,Cheppers/zulip,umkay/zulip,krtkmj/zulip,lfranchi/zulip,suxinde2009/zulip,schatt/zulip,shubhamdhama/zulip,Qgap/zulip,jerryge/zulip,bowlofstew/zulip,jrowan/zulip,vabs22/zulip,jainayush975/zulip,AZtheAsian/zulip,TigorC/zulip,RobotCaleb/zulip,paxapy/zulip,brockwhittaker/zulip,gkotian/zulip,avastu/zulip,vakila/zulip,punchagan/zulip,seapasulli/zulip,ikasumiwt/zulip,susansls/zulip,ashwinirudrappa/zulip,natanovia/zulip,sonali0901/zulip,codeKonami/zulip,MariaFaBella85/zulip,Qgap/zulip,MariaFaBella85/zulip,zachallaun/zulip,jimmy54/zulip,stamhe/zulip,hj3938/zulip,DazWorrall/zulip,vikas-parashar/zulip,andersk/zulip,natanovia/zulip,mansilladev/zulip,udxxabp/zulip,dhcrzf/zulip,LAndreas/zulip,voidException/zulip,littledogboy/zulip,bssrdf/zulip,peguin40/zulip,wavelets/zulip,dxq-git/zulip,bastianh/zulip,Vallher/zulip,pradiptad/zulip,alliejones/zulip,alliejones/zulip,aliceriot/zulip,jessedhillon/zulip,zachallaun/zulip,natanovia/zulip,hafeez3000/zulip,akuseru/zulip,LAndreas/zulip,udxxabp/zulip,brainwane/zulip,littledogboy/zulip,ahmadassaf/zulip,m1ssou/zulip,souravbadami/zulip,vakila/zulip,umkay/zulip,samatdav/zulip,MariaFaBella85/zulip,dwrpayne/zulip,itnihao/zulip,PhilSk/zulip,synicalsyntax/zulip,zhaoweigg/zulip,codeKonami/zulip,gkotian/zulip,bssrdf/zulip,jphilipsen05/zulip,johnnygaddarr/zulip,bastianh/zulip,LAndreas/zulip,zhaoweigg/zulip,zacps/zulip,suxinde2009/zulip,swinghu/zulip,zwily/zulip,natanovia/zulip,TigorC/zulip,ApsOps/zulip,yocome/zulip,jonesgithub/zulip,ufosky-server/zulip,jessedhillon/zulip,zwily/zulip,tdr130/zulip,Cheppers/zulip,avastu/zulip,udxxabp/zulip,he15his/zulip,JanzTam/zulip,gkotian/zulip,easyfmxu/zulip,TigorC/zulip,aakash-cr7/zulip,nicholasbs/zulip,deer-hope/zulip,dawran6/zulip,noroot/zulip,Diptanshu8/zulip,Frouk/zulip,zorojean/zulip,sharmaeklavya2/zulip,saitodisse/zulip,timabbott/zulip,LeeRisk/zulip,peguin40/zulip,jessedhillon/zulip,KJin99/zulip,alliejones/zulip,atomic-labs/zulip,shubhamdhama/zulip,samatdav/zulip,krtkmj/zulip,kaiyuanheshang/zulip,themass/zulip,Qgap/zulip,johnny9/zulip,nicholasbs/zulip,moria/zulip,Galexrt/zulip,Suninus/zulip,ipernet/zulip,technicalpickles/zulip,vabs22/zulip,bitemyapp/zulip,praveenaki/zulip,joyhchen/zulip,avastu/zulip,qq1012803704/zulip,rishig/zulip,ufosky-server/zulip,proliming/zulip,mdavid/zulip,jackrzhang/zulip,jimmy54/zulip,jeffcao/zulip,ipernet/zulip,m1ssou/zulip,PhilSk/zulip,Cheppers/zulip,Cheppers/zulip,PaulPetring/zulip,tiansiyuan/zulip,jerryge/zulip,qq1012803704/zulip,nicholasbs/zulip,bowlofstew/zulip,shrikrishnaholla/zulip,ApsOps/zulip,zofuthan/zulip,bssrdf/zulip,Cheppers/zulip,AZtheAsian/zulip,vabs22/zulip,dwrpayne/zulip,calvinleenyc/zulip,wangdeshui/zulip,hengqujushi/zulip,mohsenSy/zulip,huangkebo/zulip,mahim97/zulip,bitemyapp/zulip,kou/zulip,zachallaun/zulip,lfranchi/zulip,mdavid/zulip,he15his/zulip,Frouk/zulip,johnnygaddarr/zulip,zorojean/zulip,arpitpanwar/zulip,showell/zulip,gkotian/zulip,zorojean/zulip,sharmaeklavya2/zulip,Jianchun1/zulip,yocome/zulip,babbage/zulip,krtkmj/zulip,nicholasbs/zulip,Vallher/zulip,stamhe/zulip,mohsenSy/zulip,Suninus/zulip,ryansnowboarder/zulip,huangkebo/zulip,adnanh/zulip,jerryge/zulip,wweiradio/zulip,amyliu345/zulip,xuxiao/zulip,hackerkid/zulip,shrikrishnaholla/zulip,LAndreas/zulip,itnihao/zulip,glovebx/zulip,seapasulli/zulip,RobotCaleb/zulip,vaidap/zulip,joshisa/zulip,samatdav/zulip,karamcnair/zulip,wangdeshui/zulip,johnny9/zulip,sharmaeklavya2/zulip,avastu/zulip,rht/zulip,johnnygaddarr/zulip,technicalpickles/zulip,xuanhan863/zulip,arpith/zulip,shrikrishnaholla/zulip,aakash-cr7/zulip,dnmfarrell/zulip,moria/zulip,seapasulli/zulip,proliming/zulip,sup95/zulip,arpitpanwar/zulip,tdr130/zulip,jackrzhang/zulip,xuanhan863/zulip,aliceriot/zulip,bitemyapp/zulip,bitemyapp/zulip,synicalsyntax/zulip,vikas-parashar/zulip,isht3/zulip,wweiradio/zulip,pradiptad/zulip,codeKonami/zulip,wavelets/zulip,lfranchi/zulip,shaunstanislaus/zulip,sharmaeklavya2/zulip,souravbadami/zulip,grave-w-grave/zulip,udxxabp/zulip,natanovia/zulip,armooo/zulip,aps-sids/zulip,tdr130/zulip,LAndreas/zulip,johnny9/zulip,ufosky-server/zulip,ericzhou2008/zulip,jackrzhang/zulip,zorojean/zulip,vakila/zulip,JanzTam/zulip,mohsenSy/zulip,dattatreya303/zulip,PaulPetring/zulip,deer-hope/zulip,willingc/zulip,qq1012803704/zulip,ikasumiwt/zulip,jonesgithub/zulip,blaze225/zulip,KingxBanana/zulip,firstblade/zulip,shaunstanislaus/zulip,hayderimran7/zulip,joyhchen/zulip,ApsOps/zulip,tbutter/zulip,fw1121/zulip,hj3938/zulip,wangdeshui/zulip,thomasboyt/zulip,hustlzp/zulip,peiwei/zulip,cosmicAsymmetry/zulip,ashwinirudrappa/zulip,wdaher/zulip,Drooids/zulip,Qgap/zulip,wweiradio/zulip,yuvipanda/zulip,Juanvulcano/zulip,zofuthan/zulip,KJin99/zulip,paxapy/zulip,niftynei/zulip,Jianchun1/zulip,brainwane/zulip,aliceriot/zulip,KingxBanana/zulip,suxinde2009/zulip,EasonYi/zulip,jessedhillon/zulip,dhcrzf/zulip,levixie/zulip,nicholasbs/zulip,proliming/zulip,brainwane/zulip,amallia/zulip,tbutter/zulip,shubhamdhama/zulip,brockwhittaker/zulip,proliming/zulip,hackerkid/zulip,bitemyapp/zulip,KingxBanana/zulip,ApsOps/zulip,zhaoweigg/zulip,guiquanz/zulip,wdaher/zulip,pradiptad/zulip,umkay/zulip,qq1012803704/zulip,Suninus/zulip,hafeez3000/zulip,dotcool/zulip,mansilladev/zulip,moria/zulip,mohsenSy/zulip,zhaoweigg/zulip,jerryge/zulip,babbage/zulip,hayderimran7/zulip,PaulPetring/zulip,jimmy54/zulip,peiwei/zulip,SmartPeople/zulip,verma-varsha/zulip,Jianchun1/zulip,grave-w-grave/zulip,Batterfii/zulip,ikasumiwt/zulip,AZtheAsian/zulip,akuseru/zulip,peguin40/zulip,armooo/zulip,xuanhan863/zulip,AZtheAsian/zulip,andersk/zulip,ryanbackman/zulip,moria/zulip,ericzhou2008/zulip,SmartPeople/zulip,reyha/zulip,MariaFaBella85/zulip,timabbott/zulip,schatt/zulip,zulip/zulip,Drooids/zulip,karamcnair/zulip,noroot/zulip,easyfmxu/zulip,qq1012803704/zulip,sonali0901/zulip,jrowan/zulip,bssrdf/zulip,LeeRisk/zulip,armooo/zulip,KJin99/zulip,Juanvulcano/zulip,timabbott/zulip,dattatreya303/zulip,praveenaki/zulip,xuanhan863/zulip,zofuthan/zulip,atomic-labs/zulip,rht/zulip,hengqujushi/zulip,m1ssou/zulip,qq1012803704/zulip,EasonYi/zulip,hayderimran7/zulip,brainwane/zulip,j831/zulip,kou/zulip,huangkebo/zulip,TigorC/zulip,dxq-git/zulip,samatdav/zulip,ikasumiwt/zulip,ikasumiwt/zulip,SmartPeople/zulip,alliejones/zulip,JanzTam/zulip,Batterfii/zulip,DazWorrall/zulip,AZtheAsian/zulip,ikasumiwt/zulip,johnnygaddarr/zulip,brockwhittaker/zulip,bluesea/zulip,ryansnowboarder/zulip,jonesgithub/zulip,JPJPJPOPOP/zulip,eastlhu/zulip,eastlhu/zulip,gigawhitlocks/zulip,zwily/zulip,armooo/zulip,mahim97/zulip,jimmy54/zulip,reyha/zulip,JanzTam/zulip,tiansiyuan/zulip,dhcrzf/zulip,KingxBanana/zulip,jonesgithub/zulip,developerfm/zulip,dnmfarrell/zulip,ericzhou2008/zulip,christi3k/zulip,eastlhu/zulip,kokoar/zulip,hj3938/zulip,dnmfarrell/zulip,deer-hope/zulip,verma-varsha/zulip,zorojean/zulip,peiwei/zulip,cosmicAsymmetry/zulip,bluesea/zulip,guiquanz/zulip,umkay/zulip,RobotCaleb/zulip,hengqujushi/zulip,levixie/zulip,shrikrishnaholla/zulip,adnanh/zulip,technicalpickles/zulip,JanzTam/zulip,hafeez3000/zulip,gkotian/zulip,esander91/zulip,kaiyuanheshang/zulip,Gabriel0402/zulip,themass/zulip,jphilipsen05/zulip,EasonYi/zulip,mahim97/zulip,susansls/zulip,Galexrt/zulip,AZtheAsian/zulip,brockwhittaker/zulip,zorojean/zulip,bastianh/zulip,themass/zulip,jphilipsen05/zulip,brainwane/zulip,arpitpanwar/zulip,kaiyuanheshang/zulip,LAndreas/zulip,xuanhan863/zulip,kokoar/zulip,Jianchun1/zulip,aliceriot/zulip,suxinde2009/zulip,glovebx/zulip,kokoar/zulip,zachallaun/zulip,ApsOps/zulip,grave-w-grave/zulip,DazWorrall/zulip,paxapy/zulip,zulip/zulip,zachallaun/zulip,zhaoweigg/zulip,rht/zulip,punchagan/zulip,andersk/zulip,joshisa/zulip,jackrzhang/zulip,levixie/zulip,thomasboyt/zulip,luyifan/zulip,PaulPetring/zulip,johnny9/zulip,dawran6/zulip,bluesea/zulip,suxinde2009/zulip,PhilSk/zulip,niftynei/zulip,showell/zulip,fw1121/zulip,tbutter/zulip,lfranchi/zulip,dnmfarrell/zulip,blaze225/zulip,niftynei/zulip,amanharitsh123/zulip,amallia/zulip,dhcrzf/zulip,souravbadami/zulip,vikas-parashar/zulip,dattatreya303/zulip,adnanh/zulip,Frouk/zulip,blaze225/zulip,hafeez3000/zulip,deer-hope/zulip,hayderimran7/zulip,hayderimran7/zulip,atomic-labs/zulip,Diptanshu8/zulip,kou/zulip,levixie/zulip,pradiptad/zulip,firstblade/zulip,littledogboy/zulip,voidException/zulip,easyfmxu/zulip,developerfm/zulip,akuseru/zulip,jonesgithub/zulip,Juanvulcano/zulip,dnmfarrell/zulip,zwily/zulip,mansilladev/zulip,MariaFaBella85/zulip,vikas-parashar/zulip,ufosky-server/zulip,TigorC/zulip,kou/zulip,bowlofstew/zulip,shaunstanislaus/zulip,littledogboy/zulip,peguin40/zulip,bluesea/zulip,shaunstanislaus/zulip,hafeez3000/zulip,avastu/zulip,SmartPeople/zulip,gigawhitlocks/zulip,Vallher/zulip,hustlzp/zulip,hj3938/zulip,dwrpayne/zulip,moria/zulip,gkotian/zulip,Gabriel0402/zulip,showell/zulip,dattatreya303/zulip,yuvipanda/zulip,PhilSk/zulip,MayB/zulip,proliming/zulip,amanharitsh123/zulip,xuanhan863/zulip,developerfm/zulip,luyifan/zulip,jainayush975/zulip,so0k/zulip,technicalpickles/zulip,joshisa/zulip,guiquanz/zulip,aliceriot/zulip,ryansnowboarder/zulip,yuvipanda/zulip,dawran6/zulip,huangkebo/zulip,karamcnair/zulip,karamcnair/zulip,seapasulli/zulip,Cheppers/zulip,reyha/zulip,johnnygaddarr/zulip,moria/zulip,mahim97/zulip,voidException/zulip,hustlzp/zulip,guiquanz/zulip,Frouk/zulip,paxapy/zulip,zulip/zulip,RobotCaleb/zulip,dxq-git/zulip,levixie/zulip,wangdeshui/zulip,Drooids/zulip,paxapy/zulip,udxxabp/zulip,Batterfii/zulip,brainwane/zulip,zacps/zulip,dnmfarrell/zulip,EasonYi/zulip,christi3k/zulip,aakash-cr7/zulip,codeKonami/zulip,ashwinirudrappa/zulip,SmartPeople/zulip,sup95/zulip,LeeRisk/zulip,themass/zulip,jerryge/zulip,wavelets/zulip,vaidap/zulip,arpith/zulip,jeffcao/zulip,dnmfarrell/zulip,Galexrt/zulip,itnihao/zulip,JanzTam/zulip,bluesea/zulip,andersk/zulip,stamhe/zulip,stamhe/zulip,jeffcao/zulip,voidException/zulip,adnanh/zulip,bitemyapp/zulip,itnihao/zulip,natanovia/zulip,andersk/zulip,Diptanshu8/zulip,luyifan/zulip,dwrpayne/zulip,Qgap/zulip,esander91/zulip,yuvipanda/zulip,johnnygaddarr/zulip,rht/zulip,hengqujushi/zulip,jonesgithub/zulip,peiwei/zulip,rishig/zulip,JPJPJPOPOP/zulip,shaunstanislaus/zulip,littledogboy/zulip,vaidap/zulip,thomasboyt/zulip,zwily/zulip,wweiradio/zulip,peiwei/zulip,Drooids/zulip,souravbadami/zulip,noroot/zulip,he15his/zulip,developerfm/zulip,bssrdf/zulip,willingc/zulip,PaulPetring/zulip,firstblade/zulip,eastlhu/zulip,joshisa/zulip,m1ssou/zulip,babbage/zulip,jackrzhang/zulip,sup95/zulip,kokoar/zulip,brockwhittaker/zulip,hj3938/zulip,ryansnowboarder/zulip,grave-w-grave/zulip,huangkebo/zulip,mdavid/zulip,bowlofstew/zulip,jrowan/zulip,aps-sids/zulip,calvinleenyc/zulip,thomasboyt/zulip,Jianchun1/zulip,wdaher/zulip,Galexrt/zulip,peguin40/zulip,jeffcao/zulip,willingc/zulip,deer-hope/zulip,udxxabp/zulip,mohsenSy/zulip,gigawhitlocks/zulip,ufosky-server/zulip,PaulPetring/zulip,isht3/zulip,shaunstanislaus/zulip,atomic-labs/zulip,mohsenSy/zulip,ashwinirudrappa/zulip,jimmy54/zulip,hafeez3000/zulip,Galexrt/zulip,easyfmxu/zulip,zulip/zulip,guiquanz/zulip,wweiradio/zulip,xuxiao/zulip,guiquanz/zulip,isht3/zulip,aps-sids/zulip,zulip/zulip,peguin40/zulip,swinghu/zulip,xuxiao/zulip,technicalpickles/zulip,aps-sids/zulip,glovebx/zulip,developerfm/zulip,christi3k/zulip,jphilipsen05/zulip,timabbott/zulip
[manual] Add a manage.py command to import realm filters This must be run manually on staging after deployment. Once it has been run, it can be deleted. It only needs to be run on staging, not prod. (imported from commit 79252c23ba8cda93500a18aa7b02575f406dd379)
from __future__ import absolute_import from django.core.management.base import BaseCommand from zerver.models import RealmFilter, get_realm import logging class Command(BaseCommand): help = """Imports realm filters to database""" def handle(self, *args, **options): realm_filters = { "zulip.com": [ ("#(?P<id>[0-9]{2,8})", "https://trac.zulip.net/ticket/%(id)s"), ], "mit.edu/zephyr_mirror": [], } for domain, filters in realm_filters.iteritems(): realm = get_realm(domain) if realm is None: logging.error("Failed to get realm for domain %s" % (domain,)) continue for filter in filters: RealmFilter(realm=realm, pattern=filter[0], url_format_string=filter[1]).save() logging.info("Created realm filter %s for %s" % (filter[0], domain))
<commit_before><commit_msg>[manual] Add a manage.py command to import realm filters This must be run manually on staging after deployment. Once it has been run, it can be deleted. It only needs to be run on staging, not prod. (imported from commit 79252c23ba8cda93500a18aa7b02575f406dd379)<commit_after>
from __future__ import absolute_import from django.core.management.base import BaseCommand from zerver.models import RealmFilter, get_realm import logging class Command(BaseCommand): help = """Imports realm filters to database""" def handle(self, *args, **options): realm_filters = { "zulip.com": [ ("#(?P<id>[0-9]{2,8})", "https://trac.zulip.net/ticket/%(id)s"), ], "mit.edu/zephyr_mirror": [], } for domain, filters in realm_filters.iteritems(): realm = get_realm(domain) if realm is None: logging.error("Failed to get realm for domain %s" % (domain,)) continue for filter in filters: RealmFilter(realm=realm, pattern=filter[0], url_format_string=filter[1]).save() logging.info("Created realm filter %s for %s" % (filter[0], domain))
[manual] Add a manage.py command to import realm filters This must be run manually on staging after deployment. Once it has been run, it can be deleted. It only needs to be run on staging, not prod. (imported from commit 79252c23ba8cda93500a18aa7b02575f406dd379)from __future__ import absolute_import from django.core.management.base import BaseCommand from zerver.models import RealmFilter, get_realm import logging class Command(BaseCommand): help = """Imports realm filters to database""" def handle(self, *args, **options): realm_filters = { "zulip.com": [ ("#(?P<id>[0-9]{2,8})", "https://trac.zulip.net/ticket/%(id)s"), ], "mit.edu/zephyr_mirror": [], } for domain, filters in realm_filters.iteritems(): realm = get_realm(domain) if realm is None: logging.error("Failed to get realm for domain %s" % (domain,)) continue for filter in filters: RealmFilter(realm=realm, pattern=filter[0], url_format_string=filter[1]).save() logging.info("Created realm filter %s for %s" % (filter[0], domain))
<commit_before><commit_msg>[manual] Add a manage.py command to import realm filters This must be run manually on staging after deployment. Once it has been run, it can be deleted. It only needs to be run on staging, not prod. (imported from commit 79252c23ba8cda93500a18aa7b02575f406dd379)<commit_after>from __future__ import absolute_import from django.core.management.base import BaseCommand from zerver.models import RealmFilter, get_realm import logging class Command(BaseCommand): help = """Imports realm filters to database""" def handle(self, *args, **options): realm_filters = { "zulip.com": [ ("#(?P<id>[0-9]{2,8})", "https://trac.zulip.net/ticket/%(id)s"), ], "mit.edu/zephyr_mirror": [], } for domain, filters in realm_filters.iteritems(): realm = get_realm(domain) if realm is None: logging.error("Failed to get realm for domain %s" % (domain,)) continue for filter in filters: RealmFilter(realm=realm, pattern=filter[0], url_format_string=filter[1]).save() logging.info("Created realm filter %s for %s" % (filter[0], domain))