commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42a287d23a1153df636c193695615d99b7c75e4d
|
test/stop_all.py
|
test/stop_all.py
|
import urbackup_api
server = urbackup_api.urbackup_server("http://127.0.0.1:55414/x", "admin", "foo")
for action in server.get_actions():
a = action["action"]
if a ==server.action_full_file or a==server.action_resumed_full_file:
print("Running full file backup: "+action["name"])
print("Stopping...")
server.stop_action(action)
|
Test stopping all running file backups
|
Test stopping all running file backups
|
Python
|
apache-2.0
|
uroni/urbackup-server-python-web-api-wrapper
|
Test stopping all running file backups
|
import urbackup_api
server = urbackup_api.urbackup_server("http://127.0.0.1:55414/x", "admin", "foo")
for action in server.get_actions():
a = action["action"]
if a ==server.action_full_file or a==server.action_resumed_full_file:
print("Running full file backup: "+action["name"])
print("Stopping...")
server.stop_action(action)
|
<commit_before><commit_msg>Test stopping all running file backups<commit_after>
|
import urbackup_api
server = urbackup_api.urbackup_server("http://127.0.0.1:55414/x", "admin", "foo")
for action in server.get_actions():
a = action["action"]
if a ==server.action_full_file or a==server.action_resumed_full_file:
print("Running full file backup: "+action["name"])
print("Stopping...")
server.stop_action(action)
|
Test stopping all running file backupsimport urbackup_api
server = urbackup_api.urbackup_server("http://127.0.0.1:55414/x", "admin", "foo")
for action in server.get_actions():
a = action["action"]
if a ==server.action_full_file or a==server.action_resumed_full_file:
print("Running full file backup: "+action["name"])
print("Stopping...")
server.stop_action(action)
|
<commit_before><commit_msg>Test stopping all running file backups<commit_after>import urbackup_api
server = urbackup_api.urbackup_server("http://127.0.0.1:55414/x", "admin", "foo")
for action in server.get_actions():
a = action["action"]
if a ==server.action_full_file or a==server.action_resumed_full_file:
print("Running full file backup: "+action["name"])
print("Stopping...")
server.stop_action(action)
|
|
3a9445c6b3053d492c12bbf808d251c6da55632a
|
tests/import/builtin_import.py
|
tests/import/builtin_import.py
|
# test calling builtin import function
# basic test
__import__('builtins')
# first arg should be a string
try:
__import__(1)
except TypeError:
print('TypeError')
# level argument should be non-negative
try:
__import__('xyz', None, None, None, -1)
except ValueError:
print('ValueError')
|
Add a test for the builtin __import__ function.
|
tests/import: Add a test for the builtin __import__ function.
|
Python
|
mit
|
pozetroninc/micropython,pfalcon/micropython,micropython/micropython-esp32,AriZuu/micropython,torwag/micropython,HenrikSolver/micropython,Timmenem/micropython,pozetroninc/micropython,ryannathans/micropython,tralamazza/micropython,blazewicz/micropython,oopy/micropython,lowRISC/micropython,alex-robbins/micropython,bvernoux/micropython,bvernoux/micropython,pramasoul/micropython,tralamazza/micropython,henriknelson/micropython,lowRISC/micropython,chrisdearman/micropython,blazewicz/micropython,Timmenem/micropython,dmazzella/micropython,AriZuu/micropython,adafruit/circuitpython,MrSurly/micropython,HenrikSolver/micropython,henriknelson/micropython,adafruit/circuitpython,bvernoux/micropython,selste/micropython,pramasoul/micropython,selste/micropython,MrSurly/micropython-esp32,adafruit/circuitpython,Timmenem/micropython,oopy/micropython,MrSurly/micropython-esp32,ryannathans/micropython,blazewicz/micropython,deshipu/micropython,MrSurly/micropython-esp32,infinnovation/micropython,tobbad/micropython,adafruit/micropython,trezor/micropython,blazewicz/micropython,micropython/micropython-esp32,tralamazza/micropython,adafruit/circuitpython,SHA2017-badge/micropython-esp32,PappaPeppar/micropython,pozetroninc/micropython,selste/micropython,SHA2017-badge/micropython-esp32,selste/micropython,pramasoul/micropython,ryannathans/micropython,lowRISC/micropython,pfalcon/micropython,kerneltask/micropython,MrSurly/micropython,micropython/micropython-esp32,selste/micropython,kerneltask/micropython,torwag/micropython,adafruit/micropython,swegener/micropython,alex-robbins/micropython,pfalcon/micropython,tobbad/micropython,torwag/micropython,oopy/micropython,deshipu/micropython,infinnovation/micropython,micropython/micropython-esp32,trezor/micropython,MrSurly/micropython,TDAbboud/micropython,TDAbboud/micropython,lowRISC/micropython,Timmenem/micropython,lowRISC/micropython,pozetroninc/micropython,blazewicz/micropython,dmazzella/micropython,alex-robbins/micropython,tobbad/micropython,torwag/micropython,SHA2017-badge/micropython-esp32,pramasoul/micropython,adafruit/circuitpython,TDAbboud/micropython,adafruit/micropython,Timmenem/micropython,chrisdearman/micropython,alex-robbins/micropython,pozetroninc/micropython,trezor/micropython,oopy/micropython,swegener/micropython,PappaPeppar/micropython,swegener/micropython,AriZuu/micropython,henriknelson/micropython,bvernoux/micropython,tralamazza/micropython,alex-robbins/micropython,PappaPeppar/micropython,adafruit/circuitpython,swegener/micropython,bvernoux/micropython,infinnovation/micropython,MrSurly/micropython-esp32,deshipu/micropython,infinnovation/micropython,henriknelson/micropython,infinnovation/micropython,kerneltask/micropython,torwag/micropython,dmazzella/micropython,kerneltask/micropython,HenrikSolver/micropython,ryannathans/micropython,chrisdearman/micropython,AriZuu/micropython,pfalcon/micropython,MrSurly/micropython,tobbad/micropython,HenrikSolver/micropython,micropython/micropython-esp32,trezor/micropython,chrisdearman/micropython,tobbad/micropython,MrSurly/micropython,SHA2017-badge/micropython-esp32,HenrikSolver/micropython,trezor/micropython,pfalcon/micropython,oopy/micropython,TDAbboud/micropython,PappaPeppar/micropython,ryannathans/micropython,PappaPeppar/micropython,TDAbboud/micropython,chrisdearman/micropython,adafruit/micropython,deshipu/micropython,deshipu/micropython,MrSurly/micropython-esp32,kerneltask/micropython,henriknelson/micropython,adafruit/micropython,SHA2017-badge/micropython-esp32,dmazzella/micropython,swegener/micropython,AriZuu/micropython,pramasoul/micropython
|
tests/import: Add a test for the builtin __import__ function.
|
# test calling builtin import function
# basic test
__import__('builtins')
# first arg should be a string
try:
__import__(1)
except TypeError:
print('TypeError')
# level argument should be non-negative
try:
__import__('xyz', None, None, None, -1)
except ValueError:
print('ValueError')
|
<commit_before><commit_msg>tests/import: Add a test for the builtin __import__ function.<commit_after>
|
# test calling builtin import function
# basic test
__import__('builtins')
# first arg should be a string
try:
__import__(1)
except TypeError:
print('TypeError')
# level argument should be non-negative
try:
__import__('xyz', None, None, None, -1)
except ValueError:
print('ValueError')
|
tests/import: Add a test for the builtin __import__ function.# test calling builtin import function
# basic test
__import__('builtins')
# first arg should be a string
try:
__import__(1)
except TypeError:
print('TypeError')
# level argument should be non-negative
try:
__import__('xyz', None, None, None, -1)
except ValueError:
print('ValueError')
|
<commit_before><commit_msg>tests/import: Add a test for the builtin __import__ function.<commit_after># test calling builtin import function
# basic test
__import__('builtins')
# first arg should be a string
try:
__import__(1)
except TypeError:
print('TypeError')
# level argument should be non-negative
try:
__import__('xyz', None, None, None, -1)
except ValueError:
print('ValueError')
|
|
a72a0674a6db3880ed699101be3c9c46671989f0
|
xxdata_11.py
|
xxdata_11.py
|
import os
import _xxdata_11
parameters = {
'isdimd' : 200,
'iddimd' : 40,
'itdimd' : 50,
'ndptnl' : 4,
'ndptn' : 128,
'ndptnc' : 256,
'ndcnct' : 100
}
def read_scd(filename):
fd = open(filename, 'r')
fortran_filename = 'fort.%d' % fd.fileno()
os.symlink(filename, fortran_filename)
iclass = 2 # class number for scd files
ret = _xxdata_11.xxdata_11(fd.fileno(), iclass, **parameters)
os.unlink(fortran_filename)
return ret
if __name__ == '__main__':
out = read_scd('scd96_c.dat')
print out[0]
|
Add a primitive pythonic wrapper.
|
Add a primitive pythonic wrapper.
|
Python
|
mit
|
cfe316/atomic,ezekial4/atomic_neu,ezekial4/atomic_neu
|
Add a primitive pythonic wrapper.
|
import os
import _xxdata_11
parameters = {
'isdimd' : 200,
'iddimd' : 40,
'itdimd' : 50,
'ndptnl' : 4,
'ndptn' : 128,
'ndptnc' : 256,
'ndcnct' : 100
}
def read_scd(filename):
fd = open(filename, 'r')
fortran_filename = 'fort.%d' % fd.fileno()
os.symlink(filename, fortran_filename)
iclass = 2 # class number for scd files
ret = _xxdata_11.xxdata_11(fd.fileno(), iclass, **parameters)
os.unlink(fortran_filename)
return ret
if __name__ == '__main__':
out = read_scd('scd96_c.dat')
print out[0]
|
<commit_before><commit_msg>Add a primitive pythonic wrapper.<commit_after>
|
import os
import _xxdata_11
parameters = {
'isdimd' : 200,
'iddimd' : 40,
'itdimd' : 50,
'ndptnl' : 4,
'ndptn' : 128,
'ndptnc' : 256,
'ndcnct' : 100
}
def read_scd(filename):
fd = open(filename, 'r')
fortran_filename = 'fort.%d' % fd.fileno()
os.symlink(filename, fortran_filename)
iclass = 2 # class number for scd files
ret = _xxdata_11.xxdata_11(fd.fileno(), iclass, **parameters)
os.unlink(fortran_filename)
return ret
if __name__ == '__main__':
out = read_scd('scd96_c.dat')
print out[0]
|
Add a primitive pythonic wrapper.import os
import _xxdata_11
parameters = {
'isdimd' : 200,
'iddimd' : 40,
'itdimd' : 50,
'ndptnl' : 4,
'ndptn' : 128,
'ndptnc' : 256,
'ndcnct' : 100
}
def read_scd(filename):
fd = open(filename, 'r')
fortran_filename = 'fort.%d' % fd.fileno()
os.symlink(filename, fortran_filename)
iclass = 2 # class number for scd files
ret = _xxdata_11.xxdata_11(fd.fileno(), iclass, **parameters)
os.unlink(fortran_filename)
return ret
if __name__ == '__main__':
out = read_scd('scd96_c.dat')
print out[0]
|
<commit_before><commit_msg>Add a primitive pythonic wrapper.<commit_after>import os
import _xxdata_11
parameters = {
'isdimd' : 200,
'iddimd' : 40,
'itdimd' : 50,
'ndptnl' : 4,
'ndptn' : 128,
'ndptnc' : 256,
'ndcnct' : 100
}
def read_scd(filename):
fd = open(filename, 'r')
fortran_filename = 'fort.%d' % fd.fileno()
os.symlink(filename, fortran_filename)
iclass = 2 # class number for scd files
ret = _xxdata_11.xxdata_11(fd.fileno(), iclass, **parameters)
os.unlink(fortran_filename)
return ret
if __name__ == '__main__':
out = read_scd('scd96_c.dat')
print out[0]
|
|
b6cd59f800b254d91da76083546ab7c10689df5f
|
tests/test_no_dup_filenames.py
|
tests/test_no_dup_filenames.py
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import testtools
class TestNoDupFilenames(testtools.TestCase):
def test_no_dup_filenames(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../')
elements_glob = os.path.join(topdir, "elements", "*")
filenames = []
dirs_to_check = ['block-device.d', 'cleanup.d', 'extra-data.d',
'finalise.d', 'install.d', 'post-install.d',
'pre-install.d', 'root.d']
for element_dir in glob.iglob(elements_glob):
for dir_to_check in dirs_to_check:
target_dir = os.path.join(element_dir, dir_to_check, "*")
for target in glob.iglob(target_dir):
short_path = target[len(element_dir) + 1:]
if not os.path.isdir(target):
err_msg = 'Duplicate file name found %s' % short_path
self.assertFalse(short_path in filenames, err_msg)
filenames.append(short_path)
|
Add unit test to enforce unique file names.
|
Add unit test to enforce unique file names.
This patch adds a unit test to ensure we have uniquely named
files across each of the element/target directories. Because
DIB copies these files into a common namespace files can
by cryptically overwritten thus causing a variety of failures.
Simply checking for uniquely named files when running unit
tests should help eliminate future issues here.
Change-Id: Ic22f9eb5e002a0cfe94942e7210eb73231560e82
Partial-bug: #1321849
|
Python
|
apache-2.0
|
rdo-management/tripleo-image-elements,rdo-management/tripleo-image-elements,openstack/tripleo-image-elements,radez/tripleo-image-elements,radez/tripleo-image-elements,openstack/tripleo-image-elements
|
Add unit test to enforce unique file names.
This patch adds a unit test to ensure we have uniquely named
files across each of the element/target directories. Because
DIB copies these files into a common namespace files can
by cryptically overwritten thus causing a variety of failures.
Simply checking for uniquely named files when running unit
tests should help eliminate future issues here.
Change-Id: Ic22f9eb5e002a0cfe94942e7210eb73231560e82
Partial-bug: #1321849
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import testtools
class TestNoDupFilenames(testtools.TestCase):
def test_no_dup_filenames(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../')
elements_glob = os.path.join(topdir, "elements", "*")
filenames = []
dirs_to_check = ['block-device.d', 'cleanup.d', 'extra-data.d',
'finalise.d', 'install.d', 'post-install.d',
'pre-install.d', 'root.d']
for element_dir in glob.iglob(elements_glob):
for dir_to_check in dirs_to_check:
target_dir = os.path.join(element_dir, dir_to_check, "*")
for target in glob.iglob(target_dir):
short_path = target[len(element_dir) + 1:]
if not os.path.isdir(target):
err_msg = 'Duplicate file name found %s' % short_path
self.assertFalse(short_path in filenames, err_msg)
filenames.append(short_path)
|
<commit_before><commit_msg>Add unit test to enforce unique file names.
This patch adds a unit test to ensure we have uniquely named
files across each of the element/target directories. Because
DIB copies these files into a common namespace files can
by cryptically overwritten thus causing a variety of failures.
Simply checking for uniquely named files when running unit
tests should help eliminate future issues here.
Change-Id: Ic22f9eb5e002a0cfe94942e7210eb73231560e82
Partial-bug: #1321849<commit_after>
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import testtools
class TestNoDupFilenames(testtools.TestCase):
def test_no_dup_filenames(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../')
elements_glob = os.path.join(topdir, "elements", "*")
filenames = []
dirs_to_check = ['block-device.d', 'cleanup.d', 'extra-data.d',
'finalise.d', 'install.d', 'post-install.d',
'pre-install.d', 'root.d']
for element_dir in glob.iglob(elements_glob):
for dir_to_check in dirs_to_check:
target_dir = os.path.join(element_dir, dir_to_check, "*")
for target in glob.iglob(target_dir):
short_path = target[len(element_dir) + 1:]
if not os.path.isdir(target):
err_msg = 'Duplicate file name found %s' % short_path
self.assertFalse(short_path in filenames, err_msg)
filenames.append(short_path)
|
Add unit test to enforce unique file names.
This patch adds a unit test to ensure we have uniquely named
files across each of the element/target directories. Because
DIB copies these files into a common namespace files can
by cryptically overwritten thus causing a variety of failures.
Simply checking for uniquely named files when running unit
tests should help eliminate future issues here.
Change-Id: Ic22f9eb5e002a0cfe94942e7210eb73231560e82
Partial-bug: #1321849# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import testtools
class TestNoDupFilenames(testtools.TestCase):
def test_no_dup_filenames(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../')
elements_glob = os.path.join(topdir, "elements", "*")
filenames = []
dirs_to_check = ['block-device.d', 'cleanup.d', 'extra-data.d',
'finalise.d', 'install.d', 'post-install.d',
'pre-install.d', 'root.d']
for element_dir in glob.iglob(elements_glob):
for dir_to_check in dirs_to_check:
target_dir = os.path.join(element_dir, dir_to_check, "*")
for target in glob.iglob(target_dir):
short_path = target[len(element_dir) + 1:]
if not os.path.isdir(target):
err_msg = 'Duplicate file name found %s' % short_path
self.assertFalse(short_path in filenames, err_msg)
filenames.append(short_path)
|
<commit_before><commit_msg>Add unit test to enforce unique file names.
This patch adds a unit test to ensure we have uniquely named
files across each of the element/target directories. Because
DIB copies these files into a common namespace files can
by cryptically overwritten thus causing a variety of failures.
Simply checking for uniquely named files when running unit
tests should help eliminate future issues here.
Change-Id: Ic22f9eb5e002a0cfe94942e7210eb73231560e82
Partial-bug: #1321849<commit_after># Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import testtools
class TestNoDupFilenames(testtools.TestCase):
def test_no_dup_filenames(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../')
elements_glob = os.path.join(topdir, "elements", "*")
filenames = []
dirs_to_check = ['block-device.d', 'cleanup.d', 'extra-data.d',
'finalise.d', 'install.d', 'post-install.d',
'pre-install.d', 'root.d']
for element_dir in glob.iglob(elements_glob):
for dir_to_check in dirs_to_check:
target_dir = os.path.join(element_dir, dir_to_check, "*")
for target in glob.iglob(target_dir):
short_path = target[len(element_dir) + 1:]
if not os.path.isdir(target):
err_msg = 'Duplicate file name found %s' % short_path
self.assertFalse(short_path in filenames, err_msg)
filenames.append(short_path)
|
|
e81fd02cc7431ea01416126b88a22b4bba9b755e
|
tests/test_tools/test_cmake.py
|
tests/test_tools/test_cmake.py
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import shutil
from unittest import TestCase
from project_generator.generate import Generator
from project_generator.project import Project
from project_generator.settings import ProjectSettings
from project_generator.tools.cmake import CMakeGccArm
from .simple_project import project_1_yaml, projects_1_yaml
class TestProject(TestCase):
"""test things related to the cmake tool"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# write project file
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
# write projects file
with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:
f.write(yaml.dump(projects_1_yaml, default_flow_style=False))
self.project = next(Generator(projects_1_yaml).generate('project_1'))
self.cmake = CMakeGccArm(self.project.project, ProjectSettings())
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
shutil.rmtree('generated_projects', ignore_errors=True)
def test_export_project(self):
result = self.project.generate('cmake_gcc_arm', False)
projectfiles = self.project.get_generated_project_files('cmake_gcc_arm')
assert result == 0
assert projectfiles
|
Test - add cmake test tool
|
Test - add cmake test tool
|
Python
|
apache-2.0
|
sarahmarshy/project_generator,ohagendorf/project_generator,0xc0170/project_generator,project-generator/project_generator
|
Test - add cmake test tool
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import shutil
from unittest import TestCase
from project_generator.generate import Generator
from project_generator.project import Project
from project_generator.settings import ProjectSettings
from project_generator.tools.cmake import CMakeGccArm
from .simple_project import project_1_yaml, projects_1_yaml
class TestProject(TestCase):
"""test things related to the cmake tool"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# write project file
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
# write projects file
with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:
f.write(yaml.dump(projects_1_yaml, default_flow_style=False))
self.project = next(Generator(projects_1_yaml).generate('project_1'))
self.cmake = CMakeGccArm(self.project.project, ProjectSettings())
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
shutil.rmtree('generated_projects', ignore_errors=True)
def test_export_project(self):
result = self.project.generate('cmake_gcc_arm', False)
projectfiles = self.project.get_generated_project_files('cmake_gcc_arm')
assert result == 0
assert projectfiles
|
<commit_before><commit_msg>Test - add cmake test tool<commit_after>
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import shutil
from unittest import TestCase
from project_generator.generate import Generator
from project_generator.project import Project
from project_generator.settings import ProjectSettings
from project_generator.tools.cmake import CMakeGccArm
from .simple_project import project_1_yaml, projects_1_yaml
class TestProject(TestCase):
"""test things related to the cmake tool"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# write project file
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
# write projects file
with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:
f.write(yaml.dump(projects_1_yaml, default_flow_style=False))
self.project = next(Generator(projects_1_yaml).generate('project_1'))
self.cmake = CMakeGccArm(self.project.project, ProjectSettings())
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
shutil.rmtree('generated_projects', ignore_errors=True)
def test_export_project(self):
result = self.project.generate('cmake_gcc_arm', False)
projectfiles = self.project.get_generated_project_files('cmake_gcc_arm')
assert result == 0
assert projectfiles
|
Test - add cmake test tool# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import shutil
from unittest import TestCase
from project_generator.generate import Generator
from project_generator.project import Project
from project_generator.settings import ProjectSettings
from project_generator.tools.cmake import CMakeGccArm
from .simple_project import project_1_yaml, projects_1_yaml
class TestProject(TestCase):
"""test things related to the cmake tool"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# write project file
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
# write projects file
with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:
f.write(yaml.dump(projects_1_yaml, default_flow_style=False))
self.project = next(Generator(projects_1_yaml).generate('project_1'))
self.cmake = CMakeGccArm(self.project.project, ProjectSettings())
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
shutil.rmtree('generated_projects', ignore_errors=True)
def test_export_project(self):
result = self.project.generate('cmake_gcc_arm', False)
projectfiles = self.project.get_generated_project_files('cmake_gcc_arm')
assert result == 0
assert projectfiles
|
<commit_before><commit_msg>Test - add cmake test tool<commit_after># Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import shutil
from unittest import TestCase
from project_generator.generate import Generator
from project_generator.project import Project
from project_generator.settings import ProjectSettings
from project_generator.tools.cmake import CMakeGccArm
from .simple_project import project_1_yaml, projects_1_yaml
class TestProject(TestCase):
"""test things related to the cmake tool"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# write project file
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
# write projects file
with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:
f.write(yaml.dump(projects_1_yaml, default_flow_style=False))
self.project = next(Generator(projects_1_yaml).generate('project_1'))
self.cmake = CMakeGccArm(self.project.project, ProjectSettings())
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
shutil.rmtree('generated_projects', ignore_errors=True)
def test_export_project(self):
result = self.project.generate('cmake_gcc_arm', False)
projectfiles = self.project.get_generated_project_files('cmake_gcc_arm')
assert result == 0
assert projectfiles
|
|
abe6ead4f93f98406fe197b6884e51015c200ca1
|
test/test_searchentities.py
|
test/test_searchentities.py
|
import unittest
from . import models
from sir.schema.searchentities import SearchEntity as E, SearchField as F
class QueryResultToDictTest(unittest.TestCase):
def setUp(self):
self.entity = E(models.B, [
F("id", "id"),
F("c_bar", "c.bar"),
F("c_bar_trans", "c.bar", transformfunc=lambda v:
v.union(set(["yay"])))
],
1.1
)
self.expected = {
"id": 1,
"c_bar": "foo",
"c_bar_trans": set(["foo", "yay"]),
}
c = models.C(id=2, bar="foo")
self.val = models.B(id=1, c=c)
def test_fields(self):
res = self.entity.query_result_to_dict(self.val)
self.assertDictEqual(self.expected, res)
|
Add a test for query_result_to_dict
|
Add a test for query_result_to_dict
|
Python
|
mit
|
jeffweeksio/sir
|
Add a test for query_result_to_dict
|
import unittest
from . import models
from sir.schema.searchentities import SearchEntity as E, SearchField as F
class QueryResultToDictTest(unittest.TestCase):
def setUp(self):
self.entity = E(models.B, [
F("id", "id"),
F("c_bar", "c.bar"),
F("c_bar_trans", "c.bar", transformfunc=lambda v:
v.union(set(["yay"])))
],
1.1
)
self.expected = {
"id": 1,
"c_bar": "foo",
"c_bar_trans": set(["foo", "yay"]),
}
c = models.C(id=2, bar="foo")
self.val = models.B(id=1, c=c)
def test_fields(self):
res = self.entity.query_result_to_dict(self.val)
self.assertDictEqual(self.expected, res)
|
<commit_before><commit_msg>Add a test for query_result_to_dict<commit_after>
|
import unittest
from . import models
from sir.schema.searchentities import SearchEntity as E, SearchField as F
class QueryResultToDictTest(unittest.TestCase):
def setUp(self):
self.entity = E(models.B, [
F("id", "id"),
F("c_bar", "c.bar"),
F("c_bar_trans", "c.bar", transformfunc=lambda v:
v.union(set(["yay"])))
],
1.1
)
self.expected = {
"id": 1,
"c_bar": "foo",
"c_bar_trans": set(["foo", "yay"]),
}
c = models.C(id=2, bar="foo")
self.val = models.B(id=1, c=c)
def test_fields(self):
res = self.entity.query_result_to_dict(self.val)
self.assertDictEqual(self.expected, res)
|
Add a test for query_result_to_dictimport unittest
from . import models
from sir.schema.searchentities import SearchEntity as E, SearchField as F
class QueryResultToDictTest(unittest.TestCase):
def setUp(self):
self.entity = E(models.B, [
F("id", "id"),
F("c_bar", "c.bar"),
F("c_bar_trans", "c.bar", transformfunc=lambda v:
v.union(set(["yay"])))
],
1.1
)
self.expected = {
"id": 1,
"c_bar": "foo",
"c_bar_trans": set(["foo", "yay"]),
}
c = models.C(id=2, bar="foo")
self.val = models.B(id=1, c=c)
def test_fields(self):
res = self.entity.query_result_to_dict(self.val)
self.assertDictEqual(self.expected, res)
|
<commit_before><commit_msg>Add a test for query_result_to_dict<commit_after>import unittest
from . import models
from sir.schema.searchentities import SearchEntity as E, SearchField as F
class QueryResultToDictTest(unittest.TestCase):
def setUp(self):
self.entity = E(models.B, [
F("id", "id"),
F("c_bar", "c.bar"),
F("c_bar_trans", "c.bar", transformfunc=lambda v:
v.union(set(["yay"])))
],
1.1
)
self.expected = {
"id": 1,
"c_bar": "foo",
"c_bar_trans": set(["foo", "yay"]),
}
c = models.C(id=2, bar="foo")
self.val = models.B(id=1, c=c)
def test_fields(self):
res = self.entity.query_result_to_dict(self.val)
self.assertDictEqual(self.expected, res)
|
|
257a328745b9622713afa218940d2cd820987e93
|
examples/color-correction-ui.py
|
examples/color-correction-ui.py
|
#!/usr/bin/env python
#
# Simple example color correction UI.
# Talks to an fcserver running on localhost.
#
# Micah Elizabeth Scott
# This example code is released into the public domain.
#
import Tkinter as tk
import socket
import json
import struct
s = socket.socket()
s.connect(('localhost', 7890))
print "Connected to OPC server"
def setGlobalColorCorrection(**obj):
msg = json.dumps(obj)
s.send(struct.pack(">BBH", 0, 0xF0, len(msg)) + msg)
def update(_):
setGlobalColorCorrection(
gamma = gamma.get(),
whitepoint = [
red.get(),
green.get(),
blue.get(),
])
def slider(name, from_, to):
s = tk.Scale(root, label=name, from_=from_, to=to, resolution=0.01,
showvalue='yes', orient='horizontal', length=400, command=update)
s.set(1.0)
s.pack()
return s
root = tk.Tk()
root.title("Fadecandy Color Correction Example")
gamma = slider("Gamma", 0.2, 3.0)
red = slider("Red", 0.0, 1.5)
green = slider("Green", 0.0, 1.5)
blue = slider("Blue", 0.0, 1.5)
root.mainloop()
|
Add a super simple color correction client example
|
Add a super simple color correction client example
|
Python
|
mit
|
PimentNoir/fadecandy,fragmede/fadecandy,fragmede/fadecandy,Jorgen-VikingGod/fadecandy,PimentNoir/fadecandy,poe/fadecandy,pixelmatix/fadecandy,pixelmatix/fadecandy,adam-back/fadecandy,jsestrich/fadecandy,Protoneer/fadecandy,Protoneer/fadecandy,hakan42/fadecandy,adam-back/fadecandy,Protoneer/fadecandy,piers7/fadecandy,piers7/fadecandy,adam-back/fadecandy,hakan42/fadecandy,pixelmatix/fadecandy,poe/fadecandy,adam-back/fadecandy,nomis52/fadecandy,fragmede/fadecandy,pixelmatix/fadecandy,Jorgen-VikingGod/fadecandy,fragmede/fadecandy,jsestrich/fadecandy,nomis52/fadecandy,scanlime/fadecandy,Jorgen-VikingGod/fadecandy,nomis52/fadecandy,nomis52/fadecandy,lincomatic/fadecandy,lincomatic/fadecandy,fragmede/fadecandy,poe/fadecandy,nomis52/fadecandy,Protoneer/fadecandy,poe/fadecandy,hakan42/fadecandy,piers7/fadecandy,lincomatic/fadecandy,PimentNoir/fadecandy,fragmede/fadecandy,poe/fadecandy,nomis52/fadecandy,adam-back/fadecandy,fragmede/fadecandy,pixelmatix/fadecandy,PimentNoir/fadecandy,Jorgen-VikingGod/fadecandy,poe/fadecandy,PimentNoir/fadecandy,scanlime/fadecandy,hakan42/fadecandy,piers7/fadecandy,lincomatic/fadecandy,poe/fadecandy,scanlime/fadecandy,poe/fadecandy,PimentNoir/fadecandy,Jorgen-VikingGod/fadecandy,nomis52/fadecandy,scanlime/fadecandy,jsestrich/fadecandy,lincomatic/fadecandy,scanlime/fadecandy,Protoneer/fadecandy,jsestrich/fadecandy,nomis52/fadecandy,jsestrich/fadecandy,lincomatic/fadecandy,piers7/fadecandy,lincomatic/fadecandy,hakan42/fadecandy,fragmede/fadecandy,scanlime/fadecandy,lincomatic/fadecandy
|
Add a super simple color correction client example
|
#!/usr/bin/env python
#
# Simple example color correction UI.
# Talks to an fcserver running on localhost.
#
# Micah Elizabeth Scott
# This example code is released into the public domain.
#
import Tkinter as tk
import socket
import json
import struct
s = socket.socket()
s.connect(('localhost', 7890))
print "Connected to OPC server"
def setGlobalColorCorrection(**obj):
msg = json.dumps(obj)
s.send(struct.pack(">BBH", 0, 0xF0, len(msg)) + msg)
def update(_):
setGlobalColorCorrection(
gamma = gamma.get(),
whitepoint = [
red.get(),
green.get(),
blue.get(),
])
def slider(name, from_, to):
s = tk.Scale(root, label=name, from_=from_, to=to, resolution=0.01,
showvalue='yes', orient='horizontal', length=400, command=update)
s.set(1.0)
s.pack()
return s
root = tk.Tk()
root.title("Fadecandy Color Correction Example")
gamma = slider("Gamma", 0.2, 3.0)
red = slider("Red", 0.0, 1.5)
green = slider("Green", 0.0, 1.5)
blue = slider("Blue", 0.0, 1.5)
root.mainloop()
|
<commit_before><commit_msg>Add a super simple color correction client example<commit_after>
|
#!/usr/bin/env python
#
# Simple example color correction UI.
# Talks to an fcserver running on localhost.
#
# Micah Elizabeth Scott
# This example code is released into the public domain.
#
import Tkinter as tk
import socket
import json
import struct
s = socket.socket()
s.connect(('localhost', 7890))
print "Connected to OPC server"
def setGlobalColorCorrection(**obj):
msg = json.dumps(obj)
s.send(struct.pack(">BBH", 0, 0xF0, len(msg)) + msg)
def update(_):
setGlobalColorCorrection(
gamma = gamma.get(),
whitepoint = [
red.get(),
green.get(),
blue.get(),
])
def slider(name, from_, to):
s = tk.Scale(root, label=name, from_=from_, to=to, resolution=0.01,
showvalue='yes', orient='horizontal', length=400, command=update)
s.set(1.0)
s.pack()
return s
root = tk.Tk()
root.title("Fadecandy Color Correction Example")
gamma = slider("Gamma", 0.2, 3.0)
red = slider("Red", 0.0, 1.5)
green = slider("Green", 0.0, 1.5)
blue = slider("Blue", 0.0, 1.5)
root.mainloop()
|
Add a super simple color correction client example#!/usr/bin/env python
#
# Simple example color correction UI.
# Talks to an fcserver running on localhost.
#
# Micah Elizabeth Scott
# This example code is released into the public domain.
#
import Tkinter as tk
import socket
import json
import struct
s = socket.socket()
s.connect(('localhost', 7890))
print "Connected to OPC server"
def setGlobalColorCorrection(**obj):
msg = json.dumps(obj)
s.send(struct.pack(">BBH", 0, 0xF0, len(msg)) + msg)
def update(_):
setGlobalColorCorrection(
gamma = gamma.get(),
whitepoint = [
red.get(),
green.get(),
blue.get(),
])
def slider(name, from_, to):
s = tk.Scale(root, label=name, from_=from_, to=to, resolution=0.01,
showvalue='yes', orient='horizontal', length=400, command=update)
s.set(1.0)
s.pack()
return s
root = tk.Tk()
root.title("Fadecandy Color Correction Example")
gamma = slider("Gamma", 0.2, 3.0)
red = slider("Red", 0.0, 1.5)
green = slider("Green", 0.0, 1.5)
blue = slider("Blue", 0.0, 1.5)
root.mainloop()
|
<commit_before><commit_msg>Add a super simple color correction client example<commit_after>#!/usr/bin/env python
#
# Simple example color correction UI.
# Talks to an fcserver running on localhost.
#
# Micah Elizabeth Scott
# This example code is released into the public domain.
#
import Tkinter as tk
import socket
import json
import struct
s = socket.socket()
s.connect(('localhost', 7890))
print "Connected to OPC server"
def setGlobalColorCorrection(**obj):
msg = json.dumps(obj)
s.send(struct.pack(">BBH", 0, 0xF0, len(msg)) + msg)
def update(_):
setGlobalColorCorrection(
gamma = gamma.get(),
whitepoint = [
red.get(),
green.get(),
blue.get(),
])
def slider(name, from_, to):
s = tk.Scale(root, label=name, from_=from_, to=to, resolution=0.01,
showvalue='yes', orient='horizontal', length=400, command=update)
s.set(1.0)
s.pack()
return s
root = tk.Tk()
root.title("Fadecandy Color Correction Example")
gamma = slider("Gamma", 0.2, 3.0)
red = slider("Red", 0.0, 1.5)
green = slider("Green", 0.0, 1.5)
blue = slider("Blue", 0.0, 1.5)
root.mainloop()
|
|
c68c5bf488cb7224d675bec333c6b7a4992574ed
|
apl_exception.py
|
apl_exception.py
|
"""
A simple APL exception class
"""
class APL_Exception (BaseException):
"""
APL Exception Class
"""
def __init__ (self,message,line=None):
self.message = message
self.line = line
# EOF
|
Add a simple APL exception class
|
Add a simple APL exception class
|
Python
|
apache-2.0
|
NewForester/apl-py,NewForester/apl-py
|
Add a simple APL exception class
|
"""
A simple APL exception class
"""
class APL_Exception (BaseException):
"""
APL Exception Class
"""
def __init__ (self,message,line=None):
self.message = message
self.line = line
# EOF
|
<commit_before><commit_msg>Add a simple APL exception class<commit_after>
|
"""
A simple APL exception class
"""
class APL_Exception (BaseException):
"""
APL Exception Class
"""
def __init__ (self,message,line=None):
self.message = message
self.line = line
# EOF
|
Add a simple APL exception class"""
A simple APL exception class
"""
class APL_Exception (BaseException):
"""
APL Exception Class
"""
def __init__ (self,message,line=None):
self.message = message
self.line = line
# EOF
|
<commit_before><commit_msg>Add a simple APL exception class<commit_after>"""
A simple APL exception class
"""
class APL_Exception (BaseException):
"""
APL Exception Class
"""
def __init__ (self,message,line=None):
self.message = message
self.line = line
# EOF
|
|
274e7a93bac93461f07dd43f3f84f1f00e229ffd
|
hr_employee_relative/migrations/12.0.1.0.0/post-migration.py
|
hr_employee_relative/migrations/12.0.1.0.0/post-migration.py
|
# Copyright 2019 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
cr = env.cr
columns = 'fam_spouse, fam_spouse_employer, fam_spouse_tel, fam_father,' \
' fam_father_date_of_birth, fam_mother, fam_mother_date_of_birth'
cr.execute('SELECT id, %s FROM hr_employee' % columns)
relation_spouse = env.ref('hr_employee_relative.relation_spouse').id
relation_parent = env.ref('hr_employee_relative.relation_parent').id
relation_child = env.ref('hr_employee_relative.relation_child').id
for employee in cr.fetchall():
if employee[1] or employee[2] or employee[3]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[1] or 'Spouse',
'relation_id': relation_spouse
})
if employee[4] or employee[5]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[4] or 'Father',
'date_of_birth': employee[5] or False,
'relation_id': relation_parent
})
if employee[6] or employee[7]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[6] or 'Mother',
'date_of_birth': employee[7] or False,
'relation_id': relation_parent
})
cr.execute(
'SELECT name, date_of_birth, employee_id, gender'
' FROM hr_employee_children'
)
for children in cr.fetchall():
env['hr.employee.relative'].create({
'name': children[0] or 'Child',
'date_of_birth': children[1] or False,
'employee_id': children[2],
'gender': children[3] or False,
'relation_id': relation_child
})
|
Add migration script hr_family -> hr_employee_relative
|
[12.0][IMP] Add migration script hr_family -> hr_employee_relative
|
Python
|
agpl-3.0
|
OCA/hr,OCA/hr,OCA/hr
|
[12.0][IMP] Add migration script hr_family -> hr_employee_relative
|
# Copyright 2019 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
cr = env.cr
columns = 'fam_spouse, fam_spouse_employer, fam_spouse_tel, fam_father,' \
' fam_father_date_of_birth, fam_mother, fam_mother_date_of_birth'
cr.execute('SELECT id, %s FROM hr_employee' % columns)
relation_spouse = env.ref('hr_employee_relative.relation_spouse').id
relation_parent = env.ref('hr_employee_relative.relation_parent').id
relation_child = env.ref('hr_employee_relative.relation_child').id
for employee in cr.fetchall():
if employee[1] or employee[2] or employee[3]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[1] or 'Spouse',
'relation_id': relation_spouse
})
if employee[4] or employee[5]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[4] or 'Father',
'date_of_birth': employee[5] or False,
'relation_id': relation_parent
})
if employee[6] or employee[7]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[6] or 'Mother',
'date_of_birth': employee[7] or False,
'relation_id': relation_parent
})
cr.execute(
'SELECT name, date_of_birth, employee_id, gender'
' FROM hr_employee_children'
)
for children in cr.fetchall():
env['hr.employee.relative'].create({
'name': children[0] or 'Child',
'date_of_birth': children[1] or False,
'employee_id': children[2],
'gender': children[3] or False,
'relation_id': relation_child
})
|
<commit_before><commit_msg>[12.0][IMP] Add migration script hr_family -> hr_employee_relative<commit_after>
|
# Copyright 2019 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
cr = env.cr
columns = 'fam_spouse, fam_spouse_employer, fam_spouse_tel, fam_father,' \
' fam_father_date_of_birth, fam_mother, fam_mother_date_of_birth'
cr.execute('SELECT id, %s FROM hr_employee' % columns)
relation_spouse = env.ref('hr_employee_relative.relation_spouse').id
relation_parent = env.ref('hr_employee_relative.relation_parent').id
relation_child = env.ref('hr_employee_relative.relation_child').id
for employee in cr.fetchall():
if employee[1] or employee[2] or employee[3]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[1] or 'Spouse',
'relation_id': relation_spouse
})
if employee[4] or employee[5]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[4] or 'Father',
'date_of_birth': employee[5] or False,
'relation_id': relation_parent
})
if employee[6] or employee[7]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[6] or 'Mother',
'date_of_birth': employee[7] or False,
'relation_id': relation_parent
})
cr.execute(
'SELECT name, date_of_birth, employee_id, gender'
' FROM hr_employee_children'
)
for children in cr.fetchall():
env['hr.employee.relative'].create({
'name': children[0] or 'Child',
'date_of_birth': children[1] or False,
'employee_id': children[2],
'gender': children[3] or False,
'relation_id': relation_child
})
|
[12.0][IMP] Add migration script hr_family -> hr_employee_relative# Copyright 2019 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
cr = env.cr
columns = 'fam_spouse, fam_spouse_employer, fam_spouse_tel, fam_father,' \
' fam_father_date_of_birth, fam_mother, fam_mother_date_of_birth'
cr.execute('SELECT id, %s FROM hr_employee' % columns)
relation_spouse = env.ref('hr_employee_relative.relation_spouse').id
relation_parent = env.ref('hr_employee_relative.relation_parent').id
relation_child = env.ref('hr_employee_relative.relation_child').id
for employee in cr.fetchall():
if employee[1] or employee[2] or employee[3]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[1] or 'Spouse',
'relation_id': relation_spouse
})
if employee[4] or employee[5]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[4] or 'Father',
'date_of_birth': employee[5] or False,
'relation_id': relation_parent
})
if employee[6] or employee[7]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[6] or 'Mother',
'date_of_birth': employee[7] or False,
'relation_id': relation_parent
})
cr.execute(
'SELECT name, date_of_birth, employee_id, gender'
' FROM hr_employee_children'
)
for children in cr.fetchall():
env['hr.employee.relative'].create({
'name': children[0] or 'Child',
'date_of_birth': children[1] or False,
'employee_id': children[2],
'gender': children[3] or False,
'relation_id': relation_child
})
|
<commit_before><commit_msg>[12.0][IMP] Add migration script hr_family -> hr_employee_relative<commit_after># Copyright 2019 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
cr = env.cr
columns = 'fam_spouse, fam_spouse_employer, fam_spouse_tel, fam_father,' \
' fam_father_date_of_birth, fam_mother, fam_mother_date_of_birth'
cr.execute('SELECT id, %s FROM hr_employee' % columns)
relation_spouse = env.ref('hr_employee_relative.relation_spouse').id
relation_parent = env.ref('hr_employee_relative.relation_parent').id
relation_child = env.ref('hr_employee_relative.relation_child').id
for employee in cr.fetchall():
if employee[1] or employee[2] or employee[3]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[1] or 'Spouse',
'relation_id': relation_spouse
})
if employee[4] or employee[5]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[4] or 'Father',
'date_of_birth': employee[5] or False,
'relation_id': relation_parent
})
if employee[6] or employee[7]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[6] or 'Mother',
'date_of_birth': employee[7] or False,
'relation_id': relation_parent
})
cr.execute(
'SELECT name, date_of_birth, employee_id, gender'
' FROM hr_employee_children'
)
for children in cr.fetchall():
env['hr.employee.relative'].create({
'name': children[0] or 'Child',
'date_of_birth': children[1] or False,
'employee_id': children[2],
'gender': children[3] or False,
'relation_id': relation_child
})
|
|
0179d4d84987da76c517de4e01100f0e1d2049ea
|
tests/unit/modules/pacman_test.py
|
tests/unit/modules/pacman_test.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Eric Vz <eric@base10.org>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import pacman
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PacmanTestCase(TestCase):
'''
Test cases for salt.modules.pacman
'''
def setUp(self):
pacman.__salt__ = {}
pacman.__context__ = {}
def test_list_pkgs(self):
'''
Test if it list the packages currently installed in a dict
'''
cmdmock = MagicMock(return_value='A 1.0\nB 2.0')
sortmock = MagicMock()
stringifymock = MagicMock()
with patch.dict(pacman.__salt__, {'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock}):
self.assertDictEqual(pacman.list_pkgs(), {'A': ['1.0'], 'B': ['2.0']})
sortmock.assert_called_once()
stringifymock.assert_called_once()
def test_list_pkgs_as_list(self):
'''
Test if it list the packages currently installed in a dict
'''
cmdmock = MagicMock(return_value='A 1.0\nB 2.0')
sortmock = MagicMock()
stringifymock = MagicMock()
with patch.dict(pacman.__salt__, {'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock}):
self.assertDictEqual(pacman.list_pkgs(True), {'A': ['1.0'], 'B': ['2.0']})
sortmock.assert_called_once()
stringifymock.assert_not_called()
'''
Helper methods for test cases
'''
def _add_pkg(self, pkgs, name, version):
pkgs.setdefault(name, []).append(version)
if __name__ == '__main__':
from integration import run_tests
run_tests(PacmanTestCase, needs_daemon=False)
|
Add unit tests for pacman list packages
|
Add unit tests for pacman list packages
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add unit tests for pacman list packages
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Eric Vz <eric@base10.org>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import pacman
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PacmanTestCase(TestCase):
'''
Test cases for salt.modules.pacman
'''
def setUp(self):
pacman.__salt__ = {}
pacman.__context__ = {}
def test_list_pkgs(self):
'''
Test if it list the packages currently installed in a dict
'''
cmdmock = MagicMock(return_value='A 1.0\nB 2.0')
sortmock = MagicMock()
stringifymock = MagicMock()
with patch.dict(pacman.__salt__, {'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock}):
self.assertDictEqual(pacman.list_pkgs(), {'A': ['1.0'], 'B': ['2.0']})
sortmock.assert_called_once()
stringifymock.assert_called_once()
def test_list_pkgs_as_list(self):
'''
Test if it list the packages currently installed in a dict
'''
cmdmock = MagicMock(return_value='A 1.0\nB 2.0')
sortmock = MagicMock()
stringifymock = MagicMock()
with patch.dict(pacman.__salt__, {'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock}):
self.assertDictEqual(pacman.list_pkgs(True), {'A': ['1.0'], 'B': ['2.0']})
sortmock.assert_called_once()
stringifymock.assert_not_called()
'''
Helper methods for test cases
'''
def _add_pkg(self, pkgs, name, version):
pkgs.setdefault(name, []).append(version)
if __name__ == '__main__':
from integration import run_tests
run_tests(PacmanTestCase, needs_daemon=False)
|
<commit_before><commit_msg>Add unit tests for pacman list packages<commit_after>
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Eric Vz <eric@base10.org>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import pacman
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PacmanTestCase(TestCase):
'''
Test cases for salt.modules.pacman
'''
def setUp(self):
pacman.__salt__ = {}
pacman.__context__ = {}
def test_list_pkgs(self):
'''
Test if it list the packages currently installed in a dict
'''
cmdmock = MagicMock(return_value='A 1.0\nB 2.0')
sortmock = MagicMock()
stringifymock = MagicMock()
with patch.dict(pacman.__salt__, {'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock}):
self.assertDictEqual(pacman.list_pkgs(), {'A': ['1.0'], 'B': ['2.0']})
sortmock.assert_called_once()
stringifymock.assert_called_once()
def test_list_pkgs_as_list(self):
'''
Test if it list the packages currently installed in a dict
'''
cmdmock = MagicMock(return_value='A 1.0\nB 2.0')
sortmock = MagicMock()
stringifymock = MagicMock()
with patch.dict(pacman.__salt__, {'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock}):
self.assertDictEqual(pacman.list_pkgs(True), {'A': ['1.0'], 'B': ['2.0']})
sortmock.assert_called_once()
stringifymock.assert_not_called()
'''
Helper methods for test cases
'''
def _add_pkg(self, pkgs, name, version):
pkgs.setdefault(name, []).append(version)
if __name__ == '__main__':
from integration import run_tests
run_tests(PacmanTestCase, needs_daemon=False)
|
Add unit tests for pacman list packages# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Eric Vz <eric@base10.org>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import pacman
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PacmanTestCase(TestCase):
'''
Test cases for salt.modules.pacman
'''
def setUp(self):
pacman.__salt__ = {}
pacman.__context__ = {}
def test_list_pkgs(self):
'''
Test if it list the packages currently installed in a dict
'''
cmdmock = MagicMock(return_value='A 1.0\nB 2.0')
sortmock = MagicMock()
stringifymock = MagicMock()
with patch.dict(pacman.__salt__, {'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock}):
self.assertDictEqual(pacman.list_pkgs(), {'A': ['1.0'], 'B': ['2.0']})
sortmock.assert_called_once()
stringifymock.assert_called_once()
def test_list_pkgs_as_list(self):
'''
Test if it list the packages currently installed in a dict
'''
cmdmock = MagicMock(return_value='A 1.0\nB 2.0')
sortmock = MagicMock()
stringifymock = MagicMock()
with patch.dict(pacman.__salt__, {'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock}):
self.assertDictEqual(pacman.list_pkgs(True), {'A': ['1.0'], 'B': ['2.0']})
sortmock.assert_called_once()
stringifymock.assert_not_called()
'''
Helper methods for test cases
'''
def _add_pkg(self, pkgs, name, version):
pkgs.setdefault(name, []).append(version)
if __name__ == '__main__':
from integration import run_tests
run_tests(PacmanTestCase, needs_daemon=False)
|
<commit_before><commit_msg>Add unit tests for pacman list packages<commit_after># -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Eric Vz <eric@base10.org>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import pacman
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PacmanTestCase(TestCase):
'''
Test cases for salt.modules.pacman
'''
def setUp(self):
pacman.__salt__ = {}
pacman.__context__ = {}
def test_list_pkgs(self):
'''
Test if it list the packages currently installed in a dict
'''
cmdmock = MagicMock(return_value='A 1.0\nB 2.0')
sortmock = MagicMock()
stringifymock = MagicMock()
with patch.dict(pacman.__salt__, {'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock}):
self.assertDictEqual(pacman.list_pkgs(), {'A': ['1.0'], 'B': ['2.0']})
sortmock.assert_called_once()
stringifymock.assert_called_once()
def test_list_pkgs_as_list(self):
'''
Test if it list the packages currently installed in a dict
'''
cmdmock = MagicMock(return_value='A 1.0\nB 2.0')
sortmock = MagicMock()
stringifymock = MagicMock()
with patch.dict(pacman.__salt__, {'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock}):
self.assertDictEqual(pacman.list_pkgs(True), {'A': ['1.0'], 'B': ['2.0']})
sortmock.assert_called_once()
stringifymock.assert_not_called()
'''
Helper methods for test cases
'''
def _add_pkg(self, pkgs, name, version):
pkgs.setdefault(name, []).append(version)
if __name__ == '__main__':
from integration import run_tests
run_tests(PacmanTestCase, needs_daemon=False)
|
|
f2d4ddba7c594ec93f0ede0be1fc515b0c7c2d7b
|
HJoystick.py
|
HJoystick.py
|
#from direct.showbase import DirectObject
import pygame #pygame must be in the Main.py directory
#THIS FILE MUST BE IN THE MAIN.PY DIRECTORY BECAUSE SON PATH ISSUES
class HJoystickSensor():
def __init__(self,joystickId=0):
#print os.getcwd()
pygame.init()
pygame.joystick.init()
c=pygame.joystick.get_count()
if c>0:
self.id=joystickId
self.object=pygame.joystick.Joystick(self.id)
self.numButtons=self.object.get_numbuttons()
self.numAxes=self.object.get_numaxes()
base.taskMgr.add(self._task,"taskForJoystick_"+self.id)
else:
print "No Joystick"
def _task(self,t):
pygame.event.pump()
for b in range(self.numButtons):
if self.object.get_button(b):
messenger.send("Joystick_Button_"+str(b))
for a in range(self.numAxes):
axis=self.object.get_axis(a)
if axis!=0:
messenger.send("Joystick_Axis_"+str(a),sentArgs[a])
return t.cont
##Hats y otras cosas que no uso ahorita
if __name__=="__main__":
a=HJoystickSensor()
|
Remove HInput and Isolate joystick related code because son path isues with pygame
|
Remove HInput and Isolate joystick related code because son path isues with pygame
|
Python
|
bsd-2-clause
|
hikaruAi/HPanda
|
Remove HInput and Isolate joystick related code because son path isues with pygame
|
#from direct.showbase import DirectObject
import pygame #pygame must be in the Main.py directory
#THIS FILE MUST BE IN THE MAIN.PY DIRECTORY BECAUSE SON PATH ISSUES
class HJoystickSensor():
def __init__(self,joystickId=0):
#print os.getcwd()
pygame.init()
pygame.joystick.init()
c=pygame.joystick.get_count()
if c>0:
self.id=joystickId
self.object=pygame.joystick.Joystick(self.id)
self.numButtons=self.object.get_numbuttons()
self.numAxes=self.object.get_numaxes()
base.taskMgr.add(self._task,"taskForJoystick_"+self.id)
else:
print "No Joystick"
def _task(self,t):
pygame.event.pump()
for b in range(self.numButtons):
if self.object.get_button(b):
messenger.send("Joystick_Button_"+str(b))
for a in range(self.numAxes):
axis=self.object.get_axis(a)
if axis!=0:
messenger.send("Joystick_Axis_"+str(a),sentArgs[a])
return t.cont
##Hats y otras cosas que no uso ahorita
if __name__=="__main__":
a=HJoystickSensor()
|
<commit_before><commit_msg>Remove HInput and Isolate joystick related code because son path isues with pygame<commit_after>
|
#from direct.showbase import DirectObject
import pygame #pygame must be in the Main.py directory
#THIS FILE MUST BE IN THE MAIN.PY DIRECTORY BECAUSE SON PATH ISSUES
class HJoystickSensor():
def __init__(self,joystickId=0):
#print os.getcwd()
pygame.init()
pygame.joystick.init()
c=pygame.joystick.get_count()
if c>0:
self.id=joystickId
self.object=pygame.joystick.Joystick(self.id)
self.numButtons=self.object.get_numbuttons()
self.numAxes=self.object.get_numaxes()
base.taskMgr.add(self._task,"taskForJoystick_"+self.id)
else:
print "No Joystick"
def _task(self,t):
pygame.event.pump()
for b in range(self.numButtons):
if self.object.get_button(b):
messenger.send("Joystick_Button_"+str(b))
for a in range(self.numAxes):
axis=self.object.get_axis(a)
if axis!=0:
messenger.send("Joystick_Axis_"+str(a),sentArgs[a])
return t.cont
##Hats y otras cosas que no uso ahorita
if __name__=="__main__":
a=HJoystickSensor()
|
Remove HInput and Isolate joystick related code because son path isues with pygame#from direct.showbase import DirectObject
import pygame #pygame must be in the Main.py directory
#THIS FILE MUST BE IN THE MAIN.PY DIRECTORY BECAUSE SON PATH ISSUES
class HJoystickSensor():
def __init__(self,joystickId=0):
#print os.getcwd()
pygame.init()
pygame.joystick.init()
c=pygame.joystick.get_count()
if c>0:
self.id=joystickId
self.object=pygame.joystick.Joystick(self.id)
self.numButtons=self.object.get_numbuttons()
self.numAxes=self.object.get_numaxes()
base.taskMgr.add(self._task,"taskForJoystick_"+self.id)
else:
print "No Joystick"
def _task(self,t):
pygame.event.pump()
for b in range(self.numButtons):
if self.object.get_button(b):
messenger.send("Joystick_Button_"+str(b))
for a in range(self.numAxes):
axis=self.object.get_axis(a)
if axis!=0:
messenger.send("Joystick_Axis_"+str(a),sentArgs[a])
return t.cont
##Hats y otras cosas que no uso ahorita
if __name__=="__main__":
a=HJoystickSensor()
|
<commit_before><commit_msg>Remove HInput and Isolate joystick related code because son path isues with pygame<commit_after>#from direct.showbase import DirectObject
import pygame #pygame must be in the Main.py directory
#THIS FILE MUST BE IN THE MAIN.PY DIRECTORY BECAUSE SON PATH ISSUES
class HJoystickSensor():
def __init__(self,joystickId=0):
#print os.getcwd()
pygame.init()
pygame.joystick.init()
c=pygame.joystick.get_count()
if c>0:
self.id=joystickId
self.object=pygame.joystick.Joystick(self.id)
self.numButtons=self.object.get_numbuttons()
self.numAxes=self.object.get_numaxes()
base.taskMgr.add(self._task,"taskForJoystick_"+self.id)
else:
print "No Joystick"
def _task(self,t):
pygame.event.pump()
for b in range(self.numButtons):
if self.object.get_button(b):
messenger.send("Joystick_Button_"+str(b))
for a in range(self.numAxes):
axis=self.object.get_axis(a)
if axis!=0:
messenger.send("Joystick_Axis_"+str(a),sentArgs[a])
return t.cont
##Hats y otras cosas que no uso ahorita
if __name__=="__main__":
a=HJoystickSensor()
|
|
fe63d6e1e822f7cb60d1c0bdaa08eb53d3849783
|
benchmark/datasets/musicbrainz/extract-from-dbdump.py
|
benchmark/datasets/musicbrainz/extract-from-dbdump.py
|
#!/usr/bin/env python
"""
Script to extract the artist names from a MusicBrainz database dump.
Usage:
./extract-from-dbdump.py <dump_dir>/artist <outfile>
"""
import pandas as pd
import sys
__author__ = "Uwe L. Korn"
__license__ = "MIT"
input_file = sys.argv[1]
output_file = sys.argv[2]
df = pd.read_csv(input_file, sep='\t', header=None)
df.ix[:, 2].to_csv(outfile, index=False)
|
Add script to extract artist names from MusicBrainz database
|
Add script to extract artist names from MusicBrainz database
|
Python
|
mit
|
xhochy/libfuzzymatch,xhochy/libfuzzymatch
|
Add script to extract artist names from MusicBrainz database
|
#!/usr/bin/env python
"""
Script to extract the artist names from a MusicBrainz database dump.
Usage:
./extract-from-dbdump.py <dump_dir>/artist <outfile>
"""
import pandas as pd
import sys
__author__ = "Uwe L. Korn"
__license__ = "MIT"
input_file = sys.argv[1]
output_file = sys.argv[2]
df = pd.read_csv(input_file, sep='\t', header=None)
df.ix[:, 2].to_csv(outfile, index=False)
|
<commit_before><commit_msg>Add script to extract artist names from MusicBrainz database<commit_after>
|
#!/usr/bin/env python
"""
Script to extract the artist names from a MusicBrainz database dump.
Usage:
./extract-from-dbdump.py <dump_dir>/artist <outfile>
"""
import pandas as pd
import sys
__author__ = "Uwe L. Korn"
__license__ = "MIT"
input_file = sys.argv[1]
output_file = sys.argv[2]
df = pd.read_csv(input_file, sep='\t', header=None)
df.ix[:, 2].to_csv(outfile, index=False)
|
Add script to extract artist names from MusicBrainz database#!/usr/bin/env python
"""
Script to extract the artist names from a MusicBrainz database dump.
Usage:
./extract-from-dbdump.py <dump_dir>/artist <outfile>
"""
import pandas as pd
import sys
__author__ = "Uwe L. Korn"
__license__ = "MIT"
input_file = sys.argv[1]
output_file = sys.argv[2]
df = pd.read_csv(input_file, sep='\t', header=None)
df.ix[:, 2].to_csv(outfile, index=False)
|
<commit_before><commit_msg>Add script to extract artist names from MusicBrainz database<commit_after>#!/usr/bin/env python
"""
Script to extract the artist names from a MusicBrainz database dump.
Usage:
./extract-from-dbdump.py <dump_dir>/artist <outfile>
"""
import pandas as pd
import sys
__author__ = "Uwe L. Korn"
__license__ = "MIT"
input_file = sys.argv[1]
output_file = sys.argv[2]
df = pd.read_csv(input_file, sep='\t', header=None)
df.ix[:, 2].to_csv(outfile, index=False)
|
|
3a235e25ac3f5d76eb4030e01afbe7b716ec6d91
|
py/verify-preorder-serialization-of-a-binary-tree.py
|
py/verify-preorder-serialization-of-a-binary-tree.py
|
class Solution(object):
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
def get_tree(nodes, offset):
if nodes[offset] == '#':
return offset + 1
else:
left = get_tree(nodes, offset + 1)
right = get_tree(nodes, left)
return right
nodes = preorder.split(',')
try:
ret = get_tree(nodes, 0)
return ret == len(nodes)
except IndexError:
return False
|
Add py solution for 331. Verify Preorder Serialization of a Binary Tree
|
Add py solution for 331. Verify Preorder Serialization of a Binary Tree
331. Verify Preorder Serialization of a Binary Tree: https://leetcode.com/problems/verify-preorder-serialization-of-a-binary-tree/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 331. Verify Preorder Serialization of a Binary Tree
331. Verify Preorder Serialization of a Binary Tree: https://leetcode.com/problems/verify-preorder-serialization-of-a-binary-tree/
|
class Solution(object):
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
def get_tree(nodes, offset):
if nodes[offset] == '#':
return offset + 1
else:
left = get_tree(nodes, offset + 1)
right = get_tree(nodes, left)
return right
nodes = preorder.split(',')
try:
ret = get_tree(nodes, 0)
return ret == len(nodes)
except IndexError:
return False
|
<commit_before><commit_msg>Add py solution for 331. Verify Preorder Serialization of a Binary Tree
331. Verify Preorder Serialization of a Binary Tree: https://leetcode.com/problems/verify-preorder-serialization-of-a-binary-tree/<commit_after>
|
class Solution(object):
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
def get_tree(nodes, offset):
if nodes[offset] == '#':
return offset + 1
else:
left = get_tree(nodes, offset + 1)
right = get_tree(nodes, left)
return right
nodes = preorder.split(',')
try:
ret = get_tree(nodes, 0)
return ret == len(nodes)
except IndexError:
return False
|
Add py solution for 331. Verify Preorder Serialization of a Binary Tree
331. Verify Preorder Serialization of a Binary Tree: https://leetcode.com/problems/verify-preorder-serialization-of-a-binary-tree/class Solution(object):
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
def get_tree(nodes, offset):
if nodes[offset] == '#':
return offset + 1
else:
left = get_tree(nodes, offset + 1)
right = get_tree(nodes, left)
return right
nodes = preorder.split(',')
try:
ret = get_tree(nodes, 0)
return ret == len(nodes)
except IndexError:
return False
|
<commit_before><commit_msg>Add py solution for 331. Verify Preorder Serialization of a Binary Tree
331. Verify Preorder Serialization of a Binary Tree: https://leetcode.com/problems/verify-preorder-serialization-of-a-binary-tree/<commit_after>class Solution(object):
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
def get_tree(nodes, offset):
if nodes[offset] == '#':
return offset + 1
else:
left = get_tree(nodes, offset + 1)
right = get_tree(nodes, left)
return right
nodes = preorder.split(',')
try:
ret = get_tree(nodes, 0)
return ret == len(nodes)
except IndexError:
return False
|
|
bcee6173027c48bfb25a65d3e97660f2e2a0852b
|
gentest.py
|
gentest.py
|
from itertools import product
import json
import numpy
cube = numpy.array(range(1, 9)).reshape(2, 2, 2)
pcube = [
cube[0 ,0 ,0 ],
cube[0 ,0 ,0:2],
cube[0 ,0:2,0:1],
cube[0 ,0:2,0:2],
cube[0:2,0:1,0:1],
cube[0:2,0:1,0:2],
cube[0:2,0:2,0:1],
cube[0:2,0:2,0:2],
]
for (i, (a, b)) in enumerate(product(pcube, repeat=2), start=1):
print 'public function testBsxfun{0:0>2d}()'.format(i)
print '{'
print '$a = {0};'.format(json.dumps(a.tolist()))
print '$b = {0};'.format(json.dumps(b.tolist()))
print '$expected = {0};'.format(json.dumps((a * b).tolist()))
print '$actual = Bsxfun::bsxfun($this->times, $a, $b);'
print '$this->assertEquals($expected, $actual);'
print '}'
print
|
Add a python script to generate test methods
|
Add a python script to generate test methods
|
Python
|
mit
|
y-uti/php-bsxfun,y-uti/php-bsxfun
|
Add a python script to generate test methods
|
from itertools import product
import json
import numpy
cube = numpy.array(range(1, 9)).reshape(2, 2, 2)
pcube = [
cube[0 ,0 ,0 ],
cube[0 ,0 ,0:2],
cube[0 ,0:2,0:1],
cube[0 ,0:2,0:2],
cube[0:2,0:1,0:1],
cube[0:2,0:1,0:2],
cube[0:2,0:2,0:1],
cube[0:2,0:2,0:2],
]
for (i, (a, b)) in enumerate(product(pcube, repeat=2), start=1):
print 'public function testBsxfun{0:0>2d}()'.format(i)
print '{'
print '$a = {0};'.format(json.dumps(a.tolist()))
print '$b = {0};'.format(json.dumps(b.tolist()))
print '$expected = {0};'.format(json.dumps((a * b).tolist()))
print '$actual = Bsxfun::bsxfun($this->times, $a, $b);'
print '$this->assertEquals($expected, $actual);'
print '}'
print
|
<commit_before><commit_msg>Add a python script to generate test methods<commit_after>
|
from itertools import product
import json
import numpy
cube = numpy.array(range(1, 9)).reshape(2, 2, 2)
pcube = [
cube[0 ,0 ,0 ],
cube[0 ,0 ,0:2],
cube[0 ,0:2,0:1],
cube[0 ,0:2,0:2],
cube[0:2,0:1,0:1],
cube[0:2,0:1,0:2],
cube[0:2,0:2,0:1],
cube[0:2,0:2,0:2],
]
for (i, (a, b)) in enumerate(product(pcube, repeat=2), start=1):
print 'public function testBsxfun{0:0>2d}()'.format(i)
print '{'
print '$a = {0};'.format(json.dumps(a.tolist()))
print '$b = {0};'.format(json.dumps(b.tolist()))
print '$expected = {0};'.format(json.dumps((a * b).tolist()))
print '$actual = Bsxfun::bsxfun($this->times, $a, $b);'
print '$this->assertEquals($expected, $actual);'
print '}'
print
|
Add a python script to generate test methodsfrom itertools import product
import json
import numpy
cube = numpy.array(range(1, 9)).reshape(2, 2, 2)
pcube = [
cube[0 ,0 ,0 ],
cube[0 ,0 ,0:2],
cube[0 ,0:2,0:1],
cube[0 ,0:2,0:2],
cube[0:2,0:1,0:1],
cube[0:2,0:1,0:2],
cube[0:2,0:2,0:1],
cube[0:2,0:2,0:2],
]
for (i, (a, b)) in enumerate(product(pcube, repeat=2), start=1):
print 'public function testBsxfun{0:0>2d}()'.format(i)
print '{'
print '$a = {0};'.format(json.dumps(a.tolist()))
print '$b = {0};'.format(json.dumps(b.tolist()))
print '$expected = {0};'.format(json.dumps((a * b).tolist()))
print '$actual = Bsxfun::bsxfun($this->times, $a, $b);'
print '$this->assertEquals($expected, $actual);'
print '}'
print
|
<commit_before><commit_msg>Add a python script to generate test methods<commit_after>from itertools import product
import json
import numpy
cube = numpy.array(range(1, 9)).reshape(2, 2, 2)
pcube = [
cube[0 ,0 ,0 ],
cube[0 ,0 ,0:2],
cube[0 ,0:2,0:1],
cube[0 ,0:2,0:2],
cube[0:2,0:1,0:1],
cube[0:2,0:1,0:2],
cube[0:2,0:2,0:1],
cube[0:2,0:2,0:2],
]
for (i, (a, b)) in enumerate(product(pcube, repeat=2), start=1):
print 'public function testBsxfun{0:0>2d}()'.format(i)
print '{'
print '$a = {0};'.format(json.dumps(a.tolist()))
print '$b = {0};'.format(json.dumps(b.tolist()))
print '$expected = {0};'.format(json.dumps((a * b).tolist()))
print '$actual = Bsxfun::bsxfun($this->times, $a, $b);'
print '$this->assertEquals($expected, $actual);'
print '}'
print
|
|
052392da7980c4f4e2e86cd8eb65da5b91d3547b
|
CodeFights/differentSymbolsNaive.py
|
CodeFights/differentSymbolsNaive.py
|
#!/usr/local/bin/python
# Code Fights Different Symbols Naive Problem
from collections import Counter
def differentSymbolsNaive(s):
return len(Counter(s))
def main():
tests = [
["cabca", 3],
["aba", 2]
]
for t in tests:
res = differentSymbolsNaive(t[0])
ans = t[1]
if ans == res:
print("PASSED: differentSymbolsNaive({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: differentSymbolsNaive({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights different symbols naive problem
|
Solve Code Fights different symbols naive problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights different symbols naive problem
|
#!/usr/local/bin/python
# Code Fights Different Symbols Naive Problem
from collections import Counter
def differentSymbolsNaive(s):
return len(Counter(s))
def main():
tests = [
["cabca", 3],
["aba", 2]
]
for t in tests:
res = differentSymbolsNaive(t[0])
ans = t[1]
if ans == res:
print("PASSED: differentSymbolsNaive({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: differentSymbolsNaive({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights different symbols naive problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Different Symbols Naive Problem
from collections import Counter
def differentSymbolsNaive(s):
return len(Counter(s))
def main():
tests = [
["cabca", 3],
["aba", 2]
]
for t in tests:
res = differentSymbolsNaive(t[0])
ans = t[1]
if ans == res:
print("PASSED: differentSymbolsNaive({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: differentSymbolsNaive({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights different symbols naive problem#!/usr/local/bin/python
# Code Fights Different Symbols Naive Problem
from collections import Counter
def differentSymbolsNaive(s):
return len(Counter(s))
def main():
tests = [
["cabca", 3],
["aba", 2]
]
for t in tests:
res = differentSymbolsNaive(t[0])
ans = t[1]
if ans == res:
print("PASSED: differentSymbolsNaive({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: differentSymbolsNaive({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights different symbols naive problem<commit_after>#!/usr/local/bin/python
# Code Fights Different Symbols Naive Problem
from collections import Counter
def differentSymbolsNaive(s):
return len(Counter(s))
def main():
tests = [
["cabca", 3],
["aba", 2]
]
for t in tests:
res = differentSymbolsNaive(t[0])
ans = t[1]
if ans == res:
print("PASSED: differentSymbolsNaive({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: differentSymbolsNaive({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
d159b32d51339915ef633f3c6d33ce5eeafa78d6
|
py/rotate-function.py
|
py/rotate-function.py
|
class Solution(object):
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
lA = len(A)
if not lA:
return 0
subsum = 0
F = 0
for i in xrange(1, lA):
subsum += A[-i]
F += subsum
subsum += A[0]
m = F
for i in xrange(1, lA):
F += subsum
F -= lA * A[-i]
m = max(m, F)
return m
|
Add py solution for 396. Rotate Function
|
Add py solution for 396. Rotate Function
396. Rotate Function: https://leetcode.com/problems/rotate-function/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 396. Rotate Function
396. Rotate Function: https://leetcode.com/problems/rotate-function/
|
class Solution(object):
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
lA = len(A)
if not lA:
return 0
subsum = 0
F = 0
for i in xrange(1, lA):
subsum += A[-i]
F += subsum
subsum += A[0]
m = F
for i in xrange(1, lA):
F += subsum
F -= lA * A[-i]
m = max(m, F)
return m
|
<commit_before><commit_msg>Add py solution for 396. Rotate Function
396. Rotate Function: https://leetcode.com/problems/rotate-function/<commit_after>
|
class Solution(object):
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
lA = len(A)
if not lA:
return 0
subsum = 0
F = 0
for i in xrange(1, lA):
subsum += A[-i]
F += subsum
subsum += A[0]
m = F
for i in xrange(1, lA):
F += subsum
F -= lA * A[-i]
m = max(m, F)
return m
|
Add py solution for 396. Rotate Function
396. Rotate Function: https://leetcode.com/problems/rotate-function/class Solution(object):
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
lA = len(A)
if not lA:
return 0
subsum = 0
F = 0
for i in xrange(1, lA):
subsum += A[-i]
F += subsum
subsum += A[0]
m = F
for i in xrange(1, lA):
F += subsum
F -= lA * A[-i]
m = max(m, F)
return m
|
<commit_before><commit_msg>Add py solution for 396. Rotate Function
396. Rotate Function: https://leetcode.com/problems/rotate-function/<commit_after>class Solution(object):
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
lA = len(A)
if not lA:
return 0
subsum = 0
F = 0
for i in xrange(1, lA):
subsum += A[-i]
F += subsum
subsum += A[0]
m = F
for i in xrange(1, lA):
F += subsum
F -= lA * A[-i]
m = max(m, F)
return m
|
|
ee7c257b62bff832b899f54fd7bf39ae47db05b7
|
get_new_url.py
|
get_new_url.py
|
import sys
import polycules
if len(sys.argv) != 2:
print('Expected ID, got too little or too much')
old_id = sys.argv[1]
db = polycules.connect_db()
result = db.execute('select hash from polycules where id = ?', [
old_id,
]).fetchone()
if result is None:
print("Couldn't find the polycule with that ID")
print('New url: https://polycul.es/{}'.format(result[0][:7]))
|
Add tool to get new url
|
Add tool to get new url
|
Python
|
mit
|
makyo/polycul.es,makyo/polycul.es,makyo/polycul.es
|
Add tool to get new url
|
import sys
import polycules
if len(sys.argv) != 2:
print('Expected ID, got too little or too much')
old_id = sys.argv[1]
db = polycules.connect_db()
result = db.execute('select hash from polycules where id = ?', [
old_id,
]).fetchone()
if result is None:
print("Couldn't find the polycule with that ID")
print('New url: https://polycul.es/{}'.format(result[0][:7]))
|
<commit_before><commit_msg>Add tool to get new url<commit_after>
|
import sys
import polycules
if len(sys.argv) != 2:
print('Expected ID, got too little or too much')
old_id = sys.argv[1]
db = polycules.connect_db()
result = db.execute('select hash from polycules where id = ?', [
old_id,
]).fetchone()
if result is None:
print("Couldn't find the polycule with that ID")
print('New url: https://polycul.es/{}'.format(result[0][:7]))
|
Add tool to get new urlimport sys
import polycules
if len(sys.argv) != 2:
print('Expected ID, got too little or too much')
old_id = sys.argv[1]
db = polycules.connect_db()
result = db.execute('select hash from polycules where id = ?', [
old_id,
]).fetchone()
if result is None:
print("Couldn't find the polycule with that ID")
print('New url: https://polycul.es/{}'.format(result[0][:7]))
|
<commit_before><commit_msg>Add tool to get new url<commit_after>import sys
import polycules
if len(sys.argv) != 2:
print('Expected ID, got too little or too much')
old_id = sys.argv[1]
db = polycules.connect_db()
result = db.execute('select hash from polycules where id = ?', [
old_id,
]).fetchone()
if result is None:
print("Couldn't find the polycule with that ID")
print('New url: https://polycul.es/{}'.format(result[0][:7]))
|
|
7383343f7fb77c74455a50490ad2886fcf36bbd5
|
dlstats/fetchers/test_ecb.py
|
dlstats/fetchers/test_ecb.py
|
import unittest
import mongomock
import ulstats
from dlstats.fetchers._skeleton import (Skeleton, Category, Series, BulkSeries,
Dataset, Provider)
import datetime
from bson import ObjectId
#class CategoriesTestCase(unittest.TestCase):
#if __name__ == '__main__':
# unittest.main()
|
Comment test for the moment
|
Comment test for the moment
|
Python
|
agpl-3.0
|
Widukind/dlstats,mmalter/dlstats,mmalter/dlstats,MichelJuillard/dlstats,MichelJuillard/dlstats,MichelJuillard/dlstats,mmalter/dlstats,Widukind/dlstats
|
Comment test for the moment
|
import unittest
import mongomock
import ulstats
from dlstats.fetchers._skeleton import (Skeleton, Category, Series, BulkSeries,
Dataset, Provider)
import datetime
from bson import ObjectId
#class CategoriesTestCase(unittest.TestCase):
#if __name__ == '__main__':
# unittest.main()
|
<commit_before><commit_msg>Comment test for the moment<commit_after>
|
import unittest
import mongomock
import ulstats
from dlstats.fetchers._skeleton import (Skeleton, Category, Series, BulkSeries,
Dataset, Provider)
import datetime
from bson import ObjectId
#class CategoriesTestCase(unittest.TestCase):
#if __name__ == '__main__':
# unittest.main()
|
Comment test for the momentimport unittest
import mongomock
import ulstats
from dlstats.fetchers._skeleton import (Skeleton, Category, Series, BulkSeries,
Dataset, Provider)
import datetime
from bson import ObjectId
#class CategoriesTestCase(unittest.TestCase):
#if __name__ == '__main__':
# unittest.main()
|
<commit_before><commit_msg>Comment test for the moment<commit_after>import unittest
import mongomock
import ulstats
from dlstats.fetchers._skeleton import (Skeleton, Category, Series, BulkSeries,
Dataset, Provider)
import datetime
from bson import ObjectId
#class CategoriesTestCase(unittest.TestCase):
#if __name__ == '__main__':
# unittest.main()
|
|
9167643047c61bae50a7c73775631c7bfe434cc9
|
spam/ansiInventory.py
|
spam/ansiInventory.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
AnsibleInventory:
INTRO:
USAGE:
"""
import os
import ansible.inventory
class AnsibleInventory(object):
'''
Ansible Inventory wrapper class.
'''
def __init__(self, inventory_filename):
'''
Initialize Inventory
'''
if not os.path.exists(inventory_filename):
print "Provide a valid inventory filename"
return
self.inventory = ansible.inventory.InventoryParser(inventory_filename)
def get_hosts(self, group=None):
'''
Get the hosts
'''
if not group:
return self.inventory.hosts.keys()
groupobj = self.inventory.groups.get(group, None)
if not groupobj:
return None
hostobjs = groupobj.get_hosts()
hostlist = []
for host in hostobjs:
hostlist.append(host.name)
return hostlist
|
Add a new wrapper class for managing ansible static inventory.
|
Add a new wrapper class for managing ansible static inventory.
|
Python
|
apache-2.0
|
bdastur/spam,bdastur/spam
|
Add a new wrapper class for managing ansible static inventory.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
AnsibleInventory:
INTRO:
USAGE:
"""
import os
import ansible.inventory
class AnsibleInventory(object):
'''
Ansible Inventory wrapper class.
'''
def __init__(self, inventory_filename):
'''
Initialize Inventory
'''
if not os.path.exists(inventory_filename):
print "Provide a valid inventory filename"
return
self.inventory = ansible.inventory.InventoryParser(inventory_filename)
def get_hosts(self, group=None):
'''
Get the hosts
'''
if not group:
return self.inventory.hosts.keys()
groupobj = self.inventory.groups.get(group, None)
if not groupobj:
return None
hostobjs = groupobj.get_hosts()
hostlist = []
for host in hostobjs:
hostlist.append(host.name)
return hostlist
|
<commit_before><commit_msg>Add a new wrapper class for managing ansible static inventory.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
AnsibleInventory:
INTRO:
USAGE:
"""
import os
import ansible.inventory
class AnsibleInventory(object):
'''
Ansible Inventory wrapper class.
'''
def __init__(self, inventory_filename):
'''
Initialize Inventory
'''
if not os.path.exists(inventory_filename):
print "Provide a valid inventory filename"
return
self.inventory = ansible.inventory.InventoryParser(inventory_filename)
def get_hosts(self, group=None):
'''
Get the hosts
'''
if not group:
return self.inventory.hosts.keys()
groupobj = self.inventory.groups.get(group, None)
if not groupobj:
return None
hostobjs = groupobj.get_hosts()
hostlist = []
for host in hostobjs:
hostlist.append(host.name)
return hostlist
|
Add a new wrapper class for managing ansible static inventory.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
AnsibleInventory:
INTRO:
USAGE:
"""
import os
import ansible.inventory
class AnsibleInventory(object):
'''
Ansible Inventory wrapper class.
'''
def __init__(self, inventory_filename):
'''
Initialize Inventory
'''
if not os.path.exists(inventory_filename):
print "Provide a valid inventory filename"
return
self.inventory = ansible.inventory.InventoryParser(inventory_filename)
def get_hosts(self, group=None):
'''
Get the hosts
'''
if not group:
return self.inventory.hosts.keys()
groupobj = self.inventory.groups.get(group, None)
if not groupobj:
return None
hostobjs = groupobj.get_hosts()
hostlist = []
for host in hostobjs:
hostlist.append(host.name)
return hostlist
|
<commit_before><commit_msg>Add a new wrapper class for managing ansible static inventory.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
AnsibleInventory:
INTRO:
USAGE:
"""
import os
import ansible.inventory
class AnsibleInventory(object):
'''
Ansible Inventory wrapper class.
'''
def __init__(self, inventory_filename):
'''
Initialize Inventory
'''
if not os.path.exists(inventory_filename):
print "Provide a valid inventory filename"
return
self.inventory = ansible.inventory.InventoryParser(inventory_filename)
def get_hosts(self, group=None):
'''
Get the hosts
'''
if not group:
return self.inventory.hosts.keys()
groupobj = self.inventory.groups.get(group, None)
if not groupobj:
return None
hostobjs = groupobj.get_hosts()
hostlist = []
for host in hostobjs:
hostlist.append(host.name)
return hostlist
|
|
a67a4e15ce25e9e9a795534b4e629d6680fb491b
|
ludo/playermoverandom.py
|
ludo/playermoverandom.py
|
# Player
from playerbase import PlayerBase, Players
from random import randint
class PlayerMoveRandom(PlayerBase):
def get_desc(self):
""""Return description string"""""
return "Chooses a random pawn to move"
def _choose_move_impl(self, moves):
if not moves:
return None
return moves[randint(0, len(moves)-1)]
|
Implement player choosing a random pawn to move
|
Implement player choosing a random pawn to move
|
Python
|
mit
|
risteon/ludo_python
|
Implement player choosing a random pawn to move
|
# Player
from playerbase import PlayerBase, Players
from random import randint
class PlayerMoveRandom(PlayerBase):
def get_desc(self):
""""Return description string"""""
return "Chooses a random pawn to move"
def _choose_move_impl(self, moves):
if not moves:
return None
return moves[randint(0, len(moves)-1)]
|
<commit_before><commit_msg>Implement player choosing a random pawn to move<commit_after>
|
# Player
from playerbase import PlayerBase, Players
from random import randint
class PlayerMoveRandom(PlayerBase):
def get_desc(self):
""""Return description string"""""
return "Chooses a random pawn to move"
def _choose_move_impl(self, moves):
if not moves:
return None
return moves[randint(0, len(moves)-1)]
|
Implement player choosing a random pawn to move# Player
from playerbase import PlayerBase, Players
from random import randint
class PlayerMoveRandom(PlayerBase):
def get_desc(self):
""""Return description string"""""
return "Chooses a random pawn to move"
def _choose_move_impl(self, moves):
if not moves:
return None
return moves[randint(0, len(moves)-1)]
|
<commit_before><commit_msg>Implement player choosing a random pawn to move<commit_after># Player
from playerbase import PlayerBase, Players
from random import randint
class PlayerMoveRandom(PlayerBase):
def get_desc(self):
""""Return description string"""""
return "Chooses a random pawn to move"
def _choose_move_impl(self, moves):
if not moves:
return None
return moves[randint(0, len(moves)-1)]
|
|
c7e7430d76337ef5cfd6779d9a32c2c9d948eb86
|
carbon/guess-encoding.py
|
carbon/guess-encoding.py
|
"""
awk 'NR % 4 == 0' your.fastq | python %prog [options]
guess the encoding of a stream of qual lines.
"""
import sys
import optparse
RANGES = {
'Sanger': (33, 93),
'Solexa': (59, 104),
'Illumina-1.3': (64, 104),
'Illumina-1.5': (67, 104)
}
def get_qual_range(qual_str):
"""
>>> get_qual_range("DLXYXXRXWYYTPMLUUQWTXTRSXSWMDMTRNDNSMJFJFFRMV")
(68, 89)
"""
vals = [ord(c) for c in qual_str]
return min(vals), max(vals)
def get_encodings_in_range(rmin, rmax, ranges=RANGES):
valid_encodings = []
for encoding, (emin, emax) in ranges.items():
if rmin >= emin and rmax <= emax:
valid_encodings.append(encoding)
return valid_encodings
def main():
p = optparse.OptionParser(__doc__)
p.add_option("-n", dest="n", help="number of qual lines to test default:-1"
" means test until end of file or until it it possible to "
" determine a single file-type",
type='int', default=-1)
opts, args = p.parse_args()
print >>sys.stderr, "# reading qualities from stdin"
gmin, gmax = 99, 0
valid = []
for i, line in enumerate(sys.stdin):
lmin, lmax = get_qual_range(line.rstrip())
if lmin < gmin or lmax > gmax:
gmin, gmax = min(lmin, gmin), max(lmax, gmax)
valid = get_encodings_in_range(gmin, gmax)
if len(valid) == 0:
print >>sys.stderr, "no encodings for range: %s" % str((gmin, gmax))
sys.exit()
if len(valid) == 1 and opts.n == -1:
print "\t".join(valid) + "\t" + str((gmin, gmax))
sys.exit()
if opts.n > 0 and i > opts.n:
print "\t".join(valid) + "\t" + str((gmin, gmax))
sys.exit()
print "\t".join(valid) + "\t" + str((gmin, gmax))
if __name__ == "__main__":
import doctest
if doctest.testmod(optionflags=doctest.ELLIPSIS |\
doctest.NORMALIZE_WHITESPACE).failed == 0:
main()
|
Add guess phred encoding script
|
Add guess phred encoding script
|
Python
|
apache-2.0
|
jason-weirather/Au-public,jason-weirather/Au-public,jason-weirather/Au-public,jason-weirather/Au-public
|
Add guess phred encoding script
|
"""
awk 'NR % 4 == 0' your.fastq | python %prog [options]
guess the encoding of a stream of qual lines.
"""
import sys
import optparse
RANGES = {
'Sanger': (33, 93),
'Solexa': (59, 104),
'Illumina-1.3': (64, 104),
'Illumina-1.5': (67, 104)
}
def get_qual_range(qual_str):
"""
>>> get_qual_range("DLXYXXRXWYYTPMLUUQWTXTRSXSWMDMTRNDNSMJFJFFRMV")
(68, 89)
"""
vals = [ord(c) for c in qual_str]
return min(vals), max(vals)
def get_encodings_in_range(rmin, rmax, ranges=RANGES):
valid_encodings = []
for encoding, (emin, emax) in ranges.items():
if rmin >= emin and rmax <= emax:
valid_encodings.append(encoding)
return valid_encodings
def main():
p = optparse.OptionParser(__doc__)
p.add_option("-n", dest="n", help="number of qual lines to test default:-1"
" means test until end of file or until it it possible to "
" determine a single file-type",
type='int', default=-1)
opts, args = p.parse_args()
print >>sys.stderr, "# reading qualities from stdin"
gmin, gmax = 99, 0
valid = []
for i, line in enumerate(sys.stdin):
lmin, lmax = get_qual_range(line.rstrip())
if lmin < gmin or lmax > gmax:
gmin, gmax = min(lmin, gmin), max(lmax, gmax)
valid = get_encodings_in_range(gmin, gmax)
if len(valid) == 0:
print >>sys.stderr, "no encodings for range: %s" % str((gmin, gmax))
sys.exit()
if len(valid) == 1 and opts.n == -1:
print "\t".join(valid) + "\t" + str((gmin, gmax))
sys.exit()
if opts.n > 0 and i > opts.n:
print "\t".join(valid) + "\t" + str((gmin, gmax))
sys.exit()
print "\t".join(valid) + "\t" + str((gmin, gmax))
if __name__ == "__main__":
import doctest
if doctest.testmod(optionflags=doctest.ELLIPSIS |\
doctest.NORMALIZE_WHITESPACE).failed == 0:
main()
|
<commit_before><commit_msg>Add guess phred encoding script<commit_after>
|
"""
awk 'NR % 4 == 0' your.fastq | python %prog [options]
guess the encoding of a stream of qual lines.
"""
import sys
import optparse
RANGES = {
'Sanger': (33, 93),
'Solexa': (59, 104),
'Illumina-1.3': (64, 104),
'Illumina-1.5': (67, 104)
}
def get_qual_range(qual_str):
"""
>>> get_qual_range("DLXYXXRXWYYTPMLUUQWTXTRSXSWMDMTRNDNSMJFJFFRMV")
(68, 89)
"""
vals = [ord(c) for c in qual_str]
return min(vals), max(vals)
def get_encodings_in_range(rmin, rmax, ranges=RANGES):
valid_encodings = []
for encoding, (emin, emax) in ranges.items():
if rmin >= emin and rmax <= emax:
valid_encodings.append(encoding)
return valid_encodings
def main():
p = optparse.OptionParser(__doc__)
p.add_option("-n", dest="n", help="number of qual lines to test default:-1"
" means test until end of file or until it it possible to "
" determine a single file-type",
type='int', default=-1)
opts, args = p.parse_args()
print >>sys.stderr, "# reading qualities from stdin"
gmin, gmax = 99, 0
valid = []
for i, line in enumerate(sys.stdin):
lmin, lmax = get_qual_range(line.rstrip())
if lmin < gmin or lmax > gmax:
gmin, gmax = min(lmin, gmin), max(lmax, gmax)
valid = get_encodings_in_range(gmin, gmax)
if len(valid) == 0:
print >>sys.stderr, "no encodings for range: %s" % str((gmin, gmax))
sys.exit()
if len(valid) == 1 and opts.n == -1:
print "\t".join(valid) + "\t" + str((gmin, gmax))
sys.exit()
if opts.n > 0 and i > opts.n:
print "\t".join(valid) + "\t" + str((gmin, gmax))
sys.exit()
print "\t".join(valid) + "\t" + str((gmin, gmax))
if __name__ == "__main__":
import doctest
if doctest.testmod(optionflags=doctest.ELLIPSIS |\
doctest.NORMALIZE_WHITESPACE).failed == 0:
main()
|
Add guess phred encoding script"""
awk 'NR % 4 == 0' your.fastq | python %prog [options]
guess the encoding of a stream of qual lines.
"""
import sys
import optparse
RANGES = {
'Sanger': (33, 93),
'Solexa': (59, 104),
'Illumina-1.3': (64, 104),
'Illumina-1.5': (67, 104)
}
def get_qual_range(qual_str):
"""
>>> get_qual_range("DLXYXXRXWYYTPMLUUQWTXTRSXSWMDMTRNDNSMJFJFFRMV")
(68, 89)
"""
vals = [ord(c) for c in qual_str]
return min(vals), max(vals)
def get_encodings_in_range(rmin, rmax, ranges=RANGES):
valid_encodings = []
for encoding, (emin, emax) in ranges.items():
if rmin >= emin and rmax <= emax:
valid_encodings.append(encoding)
return valid_encodings
def main():
p = optparse.OptionParser(__doc__)
p.add_option("-n", dest="n", help="number of qual lines to test default:-1"
" means test until end of file or until it it possible to "
" determine a single file-type",
type='int', default=-1)
opts, args = p.parse_args()
print >>sys.stderr, "# reading qualities from stdin"
gmin, gmax = 99, 0
valid = []
for i, line in enumerate(sys.stdin):
lmin, lmax = get_qual_range(line.rstrip())
if lmin < gmin or lmax > gmax:
gmin, gmax = min(lmin, gmin), max(lmax, gmax)
valid = get_encodings_in_range(gmin, gmax)
if len(valid) == 0:
print >>sys.stderr, "no encodings for range: %s" % str((gmin, gmax))
sys.exit()
if len(valid) == 1 and opts.n == -1:
print "\t".join(valid) + "\t" + str((gmin, gmax))
sys.exit()
if opts.n > 0 and i > opts.n:
print "\t".join(valid) + "\t" + str((gmin, gmax))
sys.exit()
print "\t".join(valid) + "\t" + str((gmin, gmax))
if __name__ == "__main__":
import doctest
if doctest.testmod(optionflags=doctest.ELLIPSIS |\
doctest.NORMALIZE_WHITESPACE).failed == 0:
main()
|
<commit_before><commit_msg>Add guess phred encoding script<commit_after>"""
awk 'NR % 4 == 0' your.fastq | python %prog [options]
guess the encoding of a stream of qual lines.
"""
import sys
import optparse
RANGES = {
'Sanger': (33, 93),
'Solexa': (59, 104),
'Illumina-1.3': (64, 104),
'Illumina-1.5': (67, 104)
}
def get_qual_range(qual_str):
"""
>>> get_qual_range("DLXYXXRXWYYTPMLUUQWTXTRSXSWMDMTRNDNSMJFJFFRMV")
(68, 89)
"""
vals = [ord(c) for c in qual_str]
return min(vals), max(vals)
def get_encodings_in_range(rmin, rmax, ranges=RANGES):
valid_encodings = []
for encoding, (emin, emax) in ranges.items():
if rmin >= emin and rmax <= emax:
valid_encodings.append(encoding)
return valid_encodings
def main():
p = optparse.OptionParser(__doc__)
p.add_option("-n", dest="n", help="number of qual lines to test default:-1"
" means test until end of file or until it it possible to "
" determine a single file-type",
type='int', default=-1)
opts, args = p.parse_args()
print >>sys.stderr, "# reading qualities from stdin"
gmin, gmax = 99, 0
valid = []
for i, line in enumerate(sys.stdin):
lmin, lmax = get_qual_range(line.rstrip())
if lmin < gmin or lmax > gmax:
gmin, gmax = min(lmin, gmin), max(lmax, gmax)
valid = get_encodings_in_range(gmin, gmax)
if len(valid) == 0:
print >>sys.stderr, "no encodings for range: %s" % str((gmin, gmax))
sys.exit()
if len(valid) == 1 and opts.n == -1:
print "\t".join(valid) + "\t" + str((gmin, gmax))
sys.exit()
if opts.n > 0 and i > opts.n:
print "\t".join(valid) + "\t" + str((gmin, gmax))
sys.exit()
print "\t".join(valid) + "\t" + str((gmin, gmax))
if __name__ == "__main__":
import doctest
if doctest.testmod(optionflags=doctest.ELLIPSIS |\
doctest.NORMALIZE_WHITESPACE).failed == 0:
main()
|
|
61e0c6e325a91564250a937c0b1769992f65a7f5
|
tests/unit/modules/test_swarm.py
|
tests/unit/modules/test_swarm.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.swarm
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import patch
from tests.support.unit import TestCase
class SwarmTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.swarm
"""
def setup_loader_modules(self):
return {salt.modules.swarm: {}}
def test___virtual___valid_docker_module(self):
"""
Test that __virtual__ requires a proper loaded docker library
"""
class ValidDockerModule(object):
class APIClient:
pass
def from_env(self):
pass
with patch(
"salt.modules.swarm.HAS_DOCKER",
salt.modules.swarm._is_docker_module(ValidDockerModule()),
):
self.assertEqual(
salt.modules.swarm.__virtual__(), salt.modules.swarm.__virtualname__
)
def test___virtual___not_valid_docker_module(self):
class NotValidDockerModule(object):
pass
with patch(
"salt.modules.swarm.HAS_DOCKER",
salt.modules.swarm._is_docker_module(NotValidDockerModule()),
):
ret = salt.modules.swarm.__virtual__()
self.assertEqual(len(ret), 2)
self.assertFalse(ret[0])
|
Add initial unit tests for swarm module
|
Add initial unit tests for swarm module
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add initial unit tests for swarm module
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.swarm
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import patch
from tests.support.unit import TestCase
class SwarmTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.swarm
"""
def setup_loader_modules(self):
return {salt.modules.swarm: {}}
def test___virtual___valid_docker_module(self):
"""
Test that __virtual__ requires a proper loaded docker library
"""
class ValidDockerModule(object):
class APIClient:
pass
def from_env(self):
pass
with patch(
"salt.modules.swarm.HAS_DOCKER",
salt.modules.swarm._is_docker_module(ValidDockerModule()),
):
self.assertEqual(
salt.modules.swarm.__virtual__(), salt.modules.swarm.__virtualname__
)
def test___virtual___not_valid_docker_module(self):
class NotValidDockerModule(object):
pass
with patch(
"salt.modules.swarm.HAS_DOCKER",
salt.modules.swarm._is_docker_module(NotValidDockerModule()),
):
ret = salt.modules.swarm.__virtual__()
self.assertEqual(len(ret), 2)
self.assertFalse(ret[0])
|
<commit_before><commit_msg>Add initial unit tests for swarm module<commit_after>
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.swarm
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import patch
from tests.support.unit import TestCase
class SwarmTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.swarm
"""
def setup_loader_modules(self):
return {salt.modules.swarm: {}}
def test___virtual___valid_docker_module(self):
"""
Test that __virtual__ requires a proper loaded docker library
"""
class ValidDockerModule(object):
class APIClient:
pass
def from_env(self):
pass
with patch(
"salt.modules.swarm.HAS_DOCKER",
salt.modules.swarm._is_docker_module(ValidDockerModule()),
):
self.assertEqual(
salt.modules.swarm.__virtual__(), salt.modules.swarm.__virtualname__
)
def test___virtual___not_valid_docker_module(self):
class NotValidDockerModule(object):
pass
with patch(
"salt.modules.swarm.HAS_DOCKER",
salt.modules.swarm._is_docker_module(NotValidDockerModule()),
):
ret = salt.modules.swarm.__virtual__()
self.assertEqual(len(ret), 2)
self.assertFalse(ret[0])
|
Add initial unit tests for swarm module# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.swarm
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import patch
from tests.support.unit import TestCase
class SwarmTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.swarm
"""
def setup_loader_modules(self):
return {salt.modules.swarm: {}}
def test___virtual___valid_docker_module(self):
"""
Test that __virtual__ requires a proper loaded docker library
"""
class ValidDockerModule(object):
class APIClient:
pass
def from_env(self):
pass
with patch(
"salt.modules.swarm.HAS_DOCKER",
salt.modules.swarm._is_docker_module(ValidDockerModule()),
):
self.assertEqual(
salt.modules.swarm.__virtual__(), salt.modules.swarm.__virtualname__
)
def test___virtual___not_valid_docker_module(self):
class NotValidDockerModule(object):
pass
with patch(
"salt.modules.swarm.HAS_DOCKER",
salt.modules.swarm._is_docker_module(NotValidDockerModule()),
):
ret = salt.modules.swarm.__virtual__()
self.assertEqual(len(ret), 2)
self.assertFalse(ret[0])
|
<commit_before><commit_msg>Add initial unit tests for swarm module<commit_after># -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.swarm
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import patch
from tests.support.unit import TestCase
class SwarmTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.swarm
"""
def setup_loader_modules(self):
return {salt.modules.swarm: {}}
def test___virtual___valid_docker_module(self):
"""
Test that __virtual__ requires a proper loaded docker library
"""
class ValidDockerModule(object):
class APIClient:
pass
def from_env(self):
pass
with patch(
"salt.modules.swarm.HAS_DOCKER",
salt.modules.swarm._is_docker_module(ValidDockerModule()),
):
self.assertEqual(
salt.modules.swarm.__virtual__(), salt.modules.swarm.__virtualname__
)
def test___virtual___not_valid_docker_module(self):
class NotValidDockerModule(object):
pass
with patch(
"salt.modules.swarm.HAS_DOCKER",
salt.modules.swarm._is_docker_module(NotValidDockerModule()),
):
ret = salt.modules.swarm.__virtual__()
self.assertEqual(len(ret), 2)
self.assertFalse(ret[0])
|
|
a4d5e88973a25464be26488d17ecc663cce776d7
|
altair/examples/world_map.py
|
altair/examples/world_map.py
|
"""
World Map
---------
This example shows how to create a world map using data generators for
different background layers.
"""
# category: maps
import altair as alt
from vega_datasets import data
# Data generators for the background
sphere = alt.sphere()
graticule = alt.graticule()
# Source of land data
source = alt.topo_feature(data.world_110m.url, 'countries')
# Layering and configuring the components
alt.layer(
alt.Chart(sphere).mark_geoshape(fill='lightblue'),
alt.Chart(graticule).mark_geoshape(stroke='white', strokeWidth=0.5),
alt.Chart(source).mark_geoshape(fill='ForestGreen', stroke='black')
).project(
'naturalEarth1'
).properties(width=600, height=400).configure_view(stroke=None)
|
Add map example with data generators
|
DOC: Add map example with data generators
|
Python
|
bsd-3-clause
|
jakevdp/altair,altair-viz/altair
|
DOC: Add map example with data generators
|
"""
World Map
---------
This example shows how to create a world map using data generators for
different background layers.
"""
# category: maps
import altair as alt
from vega_datasets import data
# Data generators for the background
sphere = alt.sphere()
graticule = alt.graticule()
# Source of land data
source = alt.topo_feature(data.world_110m.url, 'countries')
# Layering and configuring the components
alt.layer(
alt.Chart(sphere).mark_geoshape(fill='lightblue'),
alt.Chart(graticule).mark_geoshape(stroke='white', strokeWidth=0.5),
alt.Chart(source).mark_geoshape(fill='ForestGreen', stroke='black')
).project(
'naturalEarth1'
).properties(width=600, height=400).configure_view(stroke=None)
|
<commit_before><commit_msg>DOC: Add map example with data generators<commit_after>
|
"""
World Map
---------
This example shows how to create a world map using data generators for
different background layers.
"""
# category: maps
import altair as alt
from vega_datasets import data
# Data generators for the background
sphere = alt.sphere()
graticule = alt.graticule()
# Source of land data
source = alt.topo_feature(data.world_110m.url, 'countries')
# Layering and configuring the components
alt.layer(
alt.Chart(sphere).mark_geoshape(fill='lightblue'),
alt.Chart(graticule).mark_geoshape(stroke='white', strokeWidth=0.5),
alt.Chart(source).mark_geoshape(fill='ForestGreen', stroke='black')
).project(
'naturalEarth1'
).properties(width=600, height=400).configure_view(stroke=None)
|
DOC: Add map example with data generators"""
World Map
---------
This example shows how to create a world map using data generators for
different background layers.
"""
# category: maps
import altair as alt
from vega_datasets import data
# Data generators for the background
sphere = alt.sphere()
graticule = alt.graticule()
# Source of land data
source = alt.topo_feature(data.world_110m.url, 'countries')
# Layering and configuring the components
alt.layer(
alt.Chart(sphere).mark_geoshape(fill='lightblue'),
alt.Chart(graticule).mark_geoshape(stroke='white', strokeWidth=0.5),
alt.Chart(source).mark_geoshape(fill='ForestGreen', stroke='black')
).project(
'naturalEarth1'
).properties(width=600, height=400).configure_view(stroke=None)
|
<commit_before><commit_msg>DOC: Add map example with data generators<commit_after>"""
World Map
---------
This example shows how to create a world map using data generators for
different background layers.
"""
# category: maps
import altair as alt
from vega_datasets import data
# Data generators for the background
sphere = alt.sphere()
graticule = alt.graticule()
# Source of land data
source = alt.topo_feature(data.world_110m.url, 'countries')
# Layering and configuring the components
alt.layer(
alt.Chart(sphere).mark_geoshape(fill='lightblue'),
alt.Chart(graticule).mark_geoshape(stroke='white', strokeWidth=0.5),
alt.Chart(source).mark_geoshape(fill='ForestGreen', stroke='black')
).project(
'naturalEarth1'
).properties(width=600, height=400).configure_view(stroke=None)
|
|
2d6ecb3b5b67539c6ad0f211d7b059ac44df2731
|
python/bending_examples.py
|
python/bending_examples.py
|
# Make a gallery of images showing the RGZ consensus double sources, sorted by bending angle.
from astropy.io import ascii
path = '/Users/willettk/Astronomy/Research/GalaxyZoo'
data = ascii.read('{:}/rgz-analysis/csv/static_catalog3.csv'.format(path),delimiter=' ')
import bending_angles as ba
import numpy as np
pathdict = ba.make_pathdict()
def bending_examples():
for a in np.linspace(0,80,9):
bdata = data[(data['bending_angle'] >= a) & (data['bending_angle'] < a+10.)]
count,errcount = 0,0
if len(bdata) > 0:
for b in bdata:
zid = b['zooniverse_id']
try:
if b['angle_type'] == 'multipeaked_singles':
angle_type = 'mps'
else:
angle_type = 'radio'
ba.plot_one_double(zid,pathdict,save_fig=True,anglepath='{0:.0f}_{1:.0f}/'.format(a,a+10),dbltype=angle_type)
count += 1
except ValueError as inst:
print "ValueError,",inst.args,zid
errcount += 1
print '{:d} galaxies with bending angle, {:d} with errors for angles between {:.0f} and {:.0f}'.format(count,errcount,a,a+10)
|
Make gallery of examples for various bending angles up to 90 degrees
|
Make gallery of examples for various bending angles up to 90 degrees
|
Python
|
mit
|
willettk/rgz-analysis,willettk/rgz-analysis,afgaron/rgz-analysis,willettk/rgz-analysis,afgaron/rgz-analysis,afgaron/rgz-analysis
|
Make gallery of examples for various bending angles up to 90 degrees
|
# Make a gallery of images showing the RGZ consensus double sources, sorted by bending angle.
from astropy.io import ascii
path = '/Users/willettk/Astronomy/Research/GalaxyZoo'
data = ascii.read('{:}/rgz-analysis/csv/static_catalog3.csv'.format(path),delimiter=' ')
import bending_angles as ba
import numpy as np
pathdict = ba.make_pathdict()
def bending_examples():
for a in np.linspace(0,80,9):
bdata = data[(data['bending_angle'] >= a) & (data['bending_angle'] < a+10.)]
count,errcount = 0,0
if len(bdata) > 0:
for b in bdata:
zid = b['zooniverse_id']
try:
if b['angle_type'] == 'multipeaked_singles':
angle_type = 'mps'
else:
angle_type = 'radio'
ba.plot_one_double(zid,pathdict,save_fig=True,anglepath='{0:.0f}_{1:.0f}/'.format(a,a+10),dbltype=angle_type)
count += 1
except ValueError as inst:
print "ValueError,",inst.args,zid
errcount += 1
print '{:d} galaxies with bending angle, {:d} with errors for angles between {:.0f} and {:.0f}'.format(count,errcount,a,a+10)
|
<commit_before><commit_msg>Make gallery of examples for various bending angles up to 90 degrees<commit_after>
|
# Make a gallery of images showing the RGZ consensus double sources, sorted by bending angle.
from astropy.io import ascii
path = '/Users/willettk/Astronomy/Research/GalaxyZoo'
data = ascii.read('{:}/rgz-analysis/csv/static_catalog3.csv'.format(path),delimiter=' ')
import bending_angles as ba
import numpy as np
pathdict = ba.make_pathdict()
def bending_examples():
for a in np.linspace(0,80,9):
bdata = data[(data['bending_angle'] >= a) & (data['bending_angle'] < a+10.)]
count,errcount = 0,0
if len(bdata) > 0:
for b in bdata:
zid = b['zooniverse_id']
try:
if b['angle_type'] == 'multipeaked_singles':
angle_type = 'mps'
else:
angle_type = 'radio'
ba.plot_one_double(zid,pathdict,save_fig=True,anglepath='{0:.0f}_{1:.0f}/'.format(a,a+10),dbltype=angle_type)
count += 1
except ValueError as inst:
print "ValueError,",inst.args,zid
errcount += 1
print '{:d} galaxies with bending angle, {:d} with errors for angles between {:.0f} and {:.0f}'.format(count,errcount,a,a+10)
|
Make gallery of examples for various bending angles up to 90 degrees# Make a gallery of images showing the RGZ consensus double sources, sorted by bending angle.
from astropy.io import ascii
path = '/Users/willettk/Astronomy/Research/GalaxyZoo'
data = ascii.read('{:}/rgz-analysis/csv/static_catalog3.csv'.format(path),delimiter=' ')
import bending_angles as ba
import numpy as np
pathdict = ba.make_pathdict()
def bending_examples():
for a in np.linspace(0,80,9):
bdata = data[(data['bending_angle'] >= a) & (data['bending_angle'] < a+10.)]
count,errcount = 0,0
if len(bdata) > 0:
for b in bdata:
zid = b['zooniverse_id']
try:
if b['angle_type'] == 'multipeaked_singles':
angle_type = 'mps'
else:
angle_type = 'radio'
ba.plot_one_double(zid,pathdict,save_fig=True,anglepath='{0:.0f}_{1:.0f}/'.format(a,a+10),dbltype=angle_type)
count += 1
except ValueError as inst:
print "ValueError,",inst.args,zid
errcount += 1
print '{:d} galaxies with bending angle, {:d} with errors for angles between {:.0f} and {:.0f}'.format(count,errcount,a,a+10)
|
<commit_before><commit_msg>Make gallery of examples for various bending angles up to 90 degrees<commit_after># Make a gallery of images showing the RGZ consensus double sources, sorted by bending angle.
from astropy.io import ascii
path = '/Users/willettk/Astronomy/Research/GalaxyZoo'
data = ascii.read('{:}/rgz-analysis/csv/static_catalog3.csv'.format(path),delimiter=' ')
import bending_angles as ba
import numpy as np
pathdict = ba.make_pathdict()
def bending_examples():
for a in np.linspace(0,80,9):
bdata = data[(data['bending_angle'] >= a) & (data['bending_angle'] < a+10.)]
count,errcount = 0,0
if len(bdata) > 0:
for b in bdata:
zid = b['zooniverse_id']
try:
if b['angle_type'] == 'multipeaked_singles':
angle_type = 'mps'
else:
angle_type = 'radio'
ba.plot_one_double(zid,pathdict,save_fig=True,anglepath='{0:.0f}_{1:.0f}/'.format(a,a+10),dbltype=angle_type)
count += 1
except ValueError as inst:
print "ValueError,",inst.args,zid
errcount += 1
print '{:d} galaxies with bending angle, {:d} with errors for angles between {:.0f} and {:.0f}'.format(count,errcount,a,a+10)
|
|
56f9ea1ba0026bc21eeb904afaf25606a6186125
|
test/test_regles.py
|
test/test_regles.py
|
import unittest
import mock
import settings
from soa.tiquets import GestioTiquets
from soa.identitat import GestioIdentitat
from filtres.nou import FiltreNou
from mailticket import MailTicket
from testhelper import llegir_mail
class TestRegles(unittest.TestCase):
def setUp(self):
self.tickets = mock.create_autospec(GestioTiquets)
self.identitat = mock.create_autospec(GestioIdentitat)
settings.init()
def test_regla_amb_cc_comprova_primer_valor (self):
settings.set("valors_defecte",
[
{"order":["Cc"],
"match":"mail.qualsevol2@mail.com",
"defaults":{"equipResolutor":"666"}
}
]
)
msg = llegir_mail("cc.txt")
f = FiltreNou(msg, self.tickets, self.identitat)
defaults= f.obtenir_parametres_addicionals()
self.assertEqual(defaults["equipResolutor"], "666")
def test_regla_amb_cc_comprova_segon_valor (self):
settings.set("valors_defecte",
[
{"order":["Cc"],
"match":"mail.concret2@mail.com",
"defaults":{"equipResolutor":"666"}
}
]
)
msg = llegir_mail("cc.txt")
f = FiltreNou(msg, self.tickets, self.identitat)
defaults= f.obtenir_parametres_addicionals()
self.assertEqual(defaults["equipResolutor"], "666")
if __name__ == '__main__':
unittest.main()
|
Test per veure que no permetem capçaleres multivaluades
|
Test per veure que no permetem capçaleres multivaluades
|
Python
|
agpl-3.0
|
UPC/mailtoticket,UPC/mailtoticket
|
Test per veure que no permetem capçaleres multivaluades
|
import unittest
import mock
import settings
from soa.tiquets import GestioTiquets
from soa.identitat import GestioIdentitat
from filtres.nou import FiltreNou
from mailticket import MailTicket
from testhelper import llegir_mail
class TestRegles(unittest.TestCase):
def setUp(self):
self.tickets = mock.create_autospec(GestioTiquets)
self.identitat = mock.create_autospec(GestioIdentitat)
settings.init()
def test_regla_amb_cc_comprova_primer_valor (self):
settings.set("valors_defecte",
[
{"order":["Cc"],
"match":"mail.qualsevol2@mail.com",
"defaults":{"equipResolutor":"666"}
}
]
)
msg = llegir_mail("cc.txt")
f = FiltreNou(msg, self.tickets, self.identitat)
defaults= f.obtenir_parametres_addicionals()
self.assertEqual(defaults["equipResolutor"], "666")
def test_regla_amb_cc_comprova_segon_valor (self):
settings.set("valors_defecte",
[
{"order":["Cc"],
"match":"mail.concret2@mail.com",
"defaults":{"equipResolutor":"666"}
}
]
)
msg = llegir_mail("cc.txt")
f = FiltreNou(msg, self.tickets, self.identitat)
defaults= f.obtenir_parametres_addicionals()
self.assertEqual(defaults["equipResolutor"], "666")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test per veure que no permetem capçaleres multivaluades<commit_after>
|
import unittest
import mock
import settings
from soa.tiquets import GestioTiquets
from soa.identitat import GestioIdentitat
from filtres.nou import FiltreNou
from mailticket import MailTicket
from testhelper import llegir_mail
class TestRegles(unittest.TestCase):
def setUp(self):
self.tickets = mock.create_autospec(GestioTiquets)
self.identitat = mock.create_autospec(GestioIdentitat)
settings.init()
def test_regla_amb_cc_comprova_primer_valor (self):
settings.set("valors_defecte",
[
{"order":["Cc"],
"match":"mail.qualsevol2@mail.com",
"defaults":{"equipResolutor":"666"}
}
]
)
msg = llegir_mail("cc.txt")
f = FiltreNou(msg, self.tickets, self.identitat)
defaults= f.obtenir_parametres_addicionals()
self.assertEqual(defaults["equipResolutor"], "666")
def test_regla_amb_cc_comprova_segon_valor (self):
settings.set("valors_defecte",
[
{"order":["Cc"],
"match":"mail.concret2@mail.com",
"defaults":{"equipResolutor":"666"}
}
]
)
msg = llegir_mail("cc.txt")
f = FiltreNou(msg, self.tickets, self.identitat)
defaults= f.obtenir_parametres_addicionals()
self.assertEqual(defaults["equipResolutor"], "666")
if __name__ == '__main__':
unittest.main()
|
Test per veure que no permetem capçaleres multivaluadesimport unittest
import mock
import settings
from soa.tiquets import GestioTiquets
from soa.identitat import GestioIdentitat
from filtres.nou import FiltreNou
from mailticket import MailTicket
from testhelper import llegir_mail
class TestRegles(unittest.TestCase):
def setUp(self):
self.tickets = mock.create_autospec(GestioTiquets)
self.identitat = mock.create_autospec(GestioIdentitat)
settings.init()
def test_regla_amb_cc_comprova_primer_valor (self):
settings.set("valors_defecte",
[
{"order":["Cc"],
"match":"mail.qualsevol2@mail.com",
"defaults":{"equipResolutor":"666"}
}
]
)
msg = llegir_mail("cc.txt")
f = FiltreNou(msg, self.tickets, self.identitat)
defaults= f.obtenir_parametres_addicionals()
self.assertEqual(defaults["equipResolutor"], "666")
def test_regla_amb_cc_comprova_segon_valor (self):
settings.set("valors_defecte",
[
{"order":["Cc"],
"match":"mail.concret2@mail.com",
"defaults":{"equipResolutor":"666"}
}
]
)
msg = llegir_mail("cc.txt")
f = FiltreNou(msg, self.tickets, self.identitat)
defaults= f.obtenir_parametres_addicionals()
self.assertEqual(defaults["equipResolutor"], "666")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test per veure que no permetem capçaleres multivaluades<commit_after>import unittest
import mock
import settings
from soa.tiquets import GestioTiquets
from soa.identitat import GestioIdentitat
from filtres.nou import FiltreNou
from mailticket import MailTicket
from testhelper import llegir_mail
class TestRegles(unittest.TestCase):
def setUp(self):
self.tickets = mock.create_autospec(GestioTiquets)
self.identitat = mock.create_autospec(GestioIdentitat)
settings.init()
def test_regla_amb_cc_comprova_primer_valor (self):
settings.set("valors_defecte",
[
{"order":["Cc"],
"match":"mail.qualsevol2@mail.com",
"defaults":{"equipResolutor":"666"}
}
]
)
msg = llegir_mail("cc.txt")
f = FiltreNou(msg, self.tickets, self.identitat)
defaults= f.obtenir_parametres_addicionals()
self.assertEqual(defaults["equipResolutor"], "666")
def test_regla_amb_cc_comprova_segon_valor (self):
settings.set("valors_defecte",
[
{"order":["Cc"],
"match":"mail.concret2@mail.com",
"defaults":{"equipResolutor":"666"}
}
]
)
msg = llegir_mail("cc.txt")
f = FiltreNou(msg, self.tickets, self.identitat)
defaults= f.obtenir_parametres_addicionals()
self.assertEqual(defaults["equipResolutor"], "666")
if __name__ == '__main__':
unittest.main()
|
|
ae7f22b5fc606a8415e286ffabd43d3fbb71977c
|
tests/test_euler.py
|
tests/test_euler.py
|
import unittest
from QGL import *
from QGL.Euler import *
from QGL.Cliffords import C1
import QGL.config
try:
from helpers import setup_test_lib
except:
from .helpers import setup_test_lib
class EulerDecompositions(unittest.TestCase):
N_test = 1000
def setUp(self):
pass
#setup_test_lib()
#self.q1 = QubitFactory('q1')
def test_zyz_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = zyz_unitary(*zyz_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = xyx_unitary(*xyx_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_cliffords(self):
for j in range(24):
Uxyx = xyx_unitary(*xyx_angles(C1[j]))
assert is_close(Uxyx, C1[j]), f"{j}"
if __name__ == "__main__":
unittest.main()
|
Add Euler angle conversion tests.
|
Add Euler angle conversion tests.
|
Python
|
apache-2.0
|
BBN-Q/QGL,BBN-Q/QGL
|
Add Euler angle conversion tests.
|
import unittest
from QGL import *
from QGL.Euler import *
from QGL.Cliffords import C1
import QGL.config
try:
from helpers import setup_test_lib
except:
from .helpers import setup_test_lib
class EulerDecompositions(unittest.TestCase):
N_test = 1000
def setUp(self):
pass
#setup_test_lib()
#self.q1 = QubitFactory('q1')
def test_zyz_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = zyz_unitary(*zyz_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = xyx_unitary(*xyx_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_cliffords(self):
for j in range(24):
Uxyx = xyx_unitary(*xyx_angles(C1[j]))
assert is_close(Uxyx, C1[j]), f"{j}"
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add Euler angle conversion tests.<commit_after>
|
import unittest
from QGL import *
from QGL.Euler import *
from QGL.Cliffords import C1
import QGL.config
try:
from helpers import setup_test_lib
except:
from .helpers import setup_test_lib
class EulerDecompositions(unittest.TestCase):
N_test = 1000
def setUp(self):
pass
#setup_test_lib()
#self.q1 = QubitFactory('q1')
def test_zyz_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = zyz_unitary(*zyz_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = xyx_unitary(*xyx_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_cliffords(self):
for j in range(24):
Uxyx = xyx_unitary(*xyx_angles(C1[j]))
assert is_close(Uxyx, C1[j]), f"{j}"
if __name__ == "__main__":
unittest.main()
|
Add Euler angle conversion tests.import unittest
from QGL import *
from QGL.Euler import *
from QGL.Cliffords import C1
import QGL.config
try:
from helpers import setup_test_lib
except:
from .helpers import setup_test_lib
class EulerDecompositions(unittest.TestCase):
N_test = 1000
def setUp(self):
pass
#setup_test_lib()
#self.q1 = QubitFactory('q1')
def test_zyz_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = zyz_unitary(*zyz_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = xyx_unitary(*xyx_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_cliffords(self):
for j in range(24):
Uxyx = xyx_unitary(*xyx_angles(C1[j]))
assert is_close(Uxyx, C1[j]), f"{j}"
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add Euler angle conversion tests.<commit_after>import unittest
from QGL import *
from QGL.Euler import *
from QGL.Cliffords import C1
import QGL.config
try:
from helpers import setup_test_lib
except:
from .helpers import setup_test_lib
class EulerDecompositions(unittest.TestCase):
N_test = 1000
def setUp(self):
pass
#setup_test_lib()
#self.q1 = QubitFactory('q1')
def test_zyz_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = zyz_unitary(*zyz_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_decomp(self):
for j in range(self.N_test):
Uh = haar_unitary(2)
Ux = xyx_unitary(*xyx_angles(Uh))
assert is_close(Uh, Ux)
def test_xyx_cliffords(self):
for j in range(24):
Uxyx = xyx_unitary(*xyx_angles(C1[j]))
assert is_close(Uxyx, C1[j]), f"{j}"
if __name__ == "__main__":
unittest.main()
|
|
7b6b1426015a83b96395f0c7c112dc53d373647f
|
fairness_indicators/remediation/__init__.py
|
fairness_indicators/remediation/__init__.py
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
Add init file for remediation module.
|
Add init file for remediation module.
PiperOrigin-RevId: 285058505
|
Python
|
apache-2.0
|
tensorflow/fairness-indicators,tensorflow/fairness-indicators,tensorflow/fairness-indicators
|
Add init file for remediation module.
PiperOrigin-RevId: 285058505
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
<commit_before><commit_msg>Add init file for remediation module.
PiperOrigin-RevId: 285058505<commit_after>
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
Add init file for remediation module.
PiperOrigin-RevId: 285058505# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
<commit_before><commit_msg>Add init file for remediation module.
PiperOrigin-RevId: 285058505<commit_after># Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
|
205362c2f068ca22fe40cb6399b071849727ee55
|
tests/test_parse.py
|
tests/test_parse.py
|
import pytest
from rinoh.dimension import DimensionBase, PT, PICA, INCH, MM, CM, PERCENT
from rinoh.style import OptionSet, Bool, Integer
def test_optionset_from_string():
ONE = 'one'
TWO = 'two'
THREE = 'three'
class TestSet1(OptionSet):
values = ONE, TWO, THREE
assert TestSet1.from_string('one') == ONE
assert TestSet1.from_string('TWO') == TWO
assert TestSet1.from_string('tHRee') == THREE
with pytest.raises(ValueError):
TestSet1.from_string('four')
with pytest.raises(ValueError):
TestSet1.from_string('none')
class TestSet2(OptionSet):
values = None, TWO
assert TestSet2.from_string('none') == None
assert TestSet2.from_string('nONe') == None
assert TestSet2.from_string('two') == TWO
with pytest.raises(ValueError):
TestSet2.from_string('one')
with pytest.raises(ValueError):
TestSet2.from_string('False')
def test_bool_from_string():
assert Bool.from_string('true') == True
assert Bool.from_string('false') == False
assert Bool.from_string('TRUE') == True
assert Bool.from_string('FALSE') == False
assert Bool.from_string('True') == True
assert Bool.from_string('FaLSE') == False
with pytest.raises(ValueError):
Bool.from_string('1')
with pytest.raises(ValueError):
Bool.from_string('0')
with pytest.raises(ValueError):
Bool.from_string('T')
with pytest.raises(ValueError):
Bool.from_string('f')
def test_integer_from_string():
assert Integer.from_string('1') == 1
assert Integer.from_string('001') == 1
assert Integer.from_string('873654354') == 873654354
assert Integer.from_string('-9') == -9
with pytest.raises(ValueError):
assert Integer.from_string('1e5')
with pytest.raises(ValueError):
assert Integer.from_string('0.5')
def test_dimensionbase_from_string():
assert DimensionBase.from_string('0') == 0
assert DimensionBase.from_string('1pt') == 1*PT
assert DimensionBase.from_string('10 pt') == 10*PT
assert DimensionBase.from_string('25pc') == 25*PICA
assert DimensionBase.from_string('1.5 in') == 1.5*INCH
assert DimensionBase.from_string('99999mm') == 99999*MM
assert DimensionBase.from_string('-2.1 cm') == -2.1*CM
assert DimensionBase.from_string('21%') == 21.00*PERCENT
assert DimensionBase.from_string('-16.12%') == -16.12*PERCENT
with pytest.raises(ValueError):
assert DimensionBase.from_string('20inch')
|
Test cases for style attribute parsing
|
Test cases for style attribute parsing
OptionSet, Bool, Integer, DimensionBase
|
Python
|
agpl-3.0
|
brechtm/rinohtype,brechtm/rinohtype,brechtm/rinohtype
|
Test cases for style attribute parsing
OptionSet, Bool, Integer, DimensionBase
|
import pytest
from rinoh.dimension import DimensionBase, PT, PICA, INCH, MM, CM, PERCENT
from rinoh.style import OptionSet, Bool, Integer
def test_optionset_from_string():
ONE = 'one'
TWO = 'two'
THREE = 'three'
class TestSet1(OptionSet):
values = ONE, TWO, THREE
assert TestSet1.from_string('one') == ONE
assert TestSet1.from_string('TWO') == TWO
assert TestSet1.from_string('tHRee') == THREE
with pytest.raises(ValueError):
TestSet1.from_string('four')
with pytest.raises(ValueError):
TestSet1.from_string('none')
class TestSet2(OptionSet):
values = None, TWO
assert TestSet2.from_string('none') == None
assert TestSet2.from_string('nONe') == None
assert TestSet2.from_string('two') == TWO
with pytest.raises(ValueError):
TestSet2.from_string('one')
with pytest.raises(ValueError):
TestSet2.from_string('False')
def test_bool_from_string():
assert Bool.from_string('true') == True
assert Bool.from_string('false') == False
assert Bool.from_string('TRUE') == True
assert Bool.from_string('FALSE') == False
assert Bool.from_string('True') == True
assert Bool.from_string('FaLSE') == False
with pytest.raises(ValueError):
Bool.from_string('1')
with pytest.raises(ValueError):
Bool.from_string('0')
with pytest.raises(ValueError):
Bool.from_string('T')
with pytest.raises(ValueError):
Bool.from_string('f')
def test_integer_from_string():
assert Integer.from_string('1') == 1
assert Integer.from_string('001') == 1
assert Integer.from_string('873654354') == 873654354
assert Integer.from_string('-9') == -9
with pytest.raises(ValueError):
assert Integer.from_string('1e5')
with pytest.raises(ValueError):
assert Integer.from_string('0.5')
def test_dimensionbase_from_string():
assert DimensionBase.from_string('0') == 0
assert DimensionBase.from_string('1pt') == 1*PT
assert DimensionBase.from_string('10 pt') == 10*PT
assert DimensionBase.from_string('25pc') == 25*PICA
assert DimensionBase.from_string('1.5 in') == 1.5*INCH
assert DimensionBase.from_string('99999mm') == 99999*MM
assert DimensionBase.from_string('-2.1 cm') == -2.1*CM
assert DimensionBase.from_string('21%') == 21.00*PERCENT
assert DimensionBase.from_string('-16.12%') == -16.12*PERCENT
with pytest.raises(ValueError):
assert DimensionBase.from_string('20inch')
|
<commit_before><commit_msg>Test cases for style attribute parsing
OptionSet, Bool, Integer, DimensionBase<commit_after>
|
import pytest
from rinoh.dimension import DimensionBase, PT, PICA, INCH, MM, CM, PERCENT
from rinoh.style import OptionSet, Bool, Integer
def test_optionset_from_string():
ONE = 'one'
TWO = 'two'
THREE = 'three'
class TestSet1(OptionSet):
values = ONE, TWO, THREE
assert TestSet1.from_string('one') == ONE
assert TestSet1.from_string('TWO') == TWO
assert TestSet1.from_string('tHRee') == THREE
with pytest.raises(ValueError):
TestSet1.from_string('four')
with pytest.raises(ValueError):
TestSet1.from_string('none')
class TestSet2(OptionSet):
values = None, TWO
assert TestSet2.from_string('none') == None
assert TestSet2.from_string('nONe') == None
assert TestSet2.from_string('two') == TWO
with pytest.raises(ValueError):
TestSet2.from_string('one')
with pytest.raises(ValueError):
TestSet2.from_string('False')
def test_bool_from_string():
assert Bool.from_string('true') == True
assert Bool.from_string('false') == False
assert Bool.from_string('TRUE') == True
assert Bool.from_string('FALSE') == False
assert Bool.from_string('True') == True
assert Bool.from_string('FaLSE') == False
with pytest.raises(ValueError):
Bool.from_string('1')
with pytest.raises(ValueError):
Bool.from_string('0')
with pytest.raises(ValueError):
Bool.from_string('T')
with pytest.raises(ValueError):
Bool.from_string('f')
def test_integer_from_string():
assert Integer.from_string('1') == 1
assert Integer.from_string('001') == 1
assert Integer.from_string('873654354') == 873654354
assert Integer.from_string('-9') == -9
with pytest.raises(ValueError):
assert Integer.from_string('1e5')
with pytest.raises(ValueError):
assert Integer.from_string('0.5')
def test_dimensionbase_from_string():
assert DimensionBase.from_string('0') == 0
assert DimensionBase.from_string('1pt') == 1*PT
assert DimensionBase.from_string('10 pt') == 10*PT
assert DimensionBase.from_string('25pc') == 25*PICA
assert DimensionBase.from_string('1.5 in') == 1.5*INCH
assert DimensionBase.from_string('99999mm') == 99999*MM
assert DimensionBase.from_string('-2.1 cm') == -2.1*CM
assert DimensionBase.from_string('21%') == 21.00*PERCENT
assert DimensionBase.from_string('-16.12%') == -16.12*PERCENT
with pytest.raises(ValueError):
assert DimensionBase.from_string('20inch')
|
Test cases for style attribute parsing
OptionSet, Bool, Integer, DimensionBase
import pytest
from rinoh.dimension import DimensionBase, PT, PICA, INCH, MM, CM, PERCENT
from rinoh.style import OptionSet, Bool, Integer
def test_optionset_from_string():
ONE = 'one'
TWO = 'two'
THREE = 'three'
class TestSet1(OptionSet):
values = ONE, TWO, THREE
assert TestSet1.from_string('one') == ONE
assert TestSet1.from_string('TWO') == TWO
assert TestSet1.from_string('tHRee') == THREE
with pytest.raises(ValueError):
TestSet1.from_string('four')
with pytest.raises(ValueError):
TestSet1.from_string('none')
class TestSet2(OptionSet):
values = None, TWO
assert TestSet2.from_string('none') == None
assert TestSet2.from_string('nONe') == None
assert TestSet2.from_string('two') == TWO
with pytest.raises(ValueError):
TestSet2.from_string('one')
with pytest.raises(ValueError):
TestSet2.from_string('False')
def test_bool_from_string():
assert Bool.from_string('true') == True
assert Bool.from_string('false') == False
assert Bool.from_string('TRUE') == True
assert Bool.from_string('FALSE') == False
assert Bool.from_string('True') == True
assert Bool.from_string('FaLSE') == False
with pytest.raises(ValueError):
Bool.from_string('1')
with pytest.raises(ValueError):
Bool.from_string('0')
with pytest.raises(ValueError):
Bool.from_string('T')
with pytest.raises(ValueError):
Bool.from_string('f')
def test_integer_from_string():
assert Integer.from_string('1') == 1
assert Integer.from_string('001') == 1
assert Integer.from_string('873654354') == 873654354
assert Integer.from_string('-9') == -9
with pytest.raises(ValueError):
assert Integer.from_string('1e5')
with pytest.raises(ValueError):
assert Integer.from_string('0.5')
def test_dimensionbase_from_string():
assert DimensionBase.from_string('0') == 0
assert DimensionBase.from_string('1pt') == 1*PT
assert DimensionBase.from_string('10 pt') == 10*PT
assert DimensionBase.from_string('25pc') == 25*PICA
assert DimensionBase.from_string('1.5 in') == 1.5*INCH
assert DimensionBase.from_string('99999mm') == 99999*MM
assert DimensionBase.from_string('-2.1 cm') == -2.1*CM
assert DimensionBase.from_string('21%') == 21.00*PERCENT
assert DimensionBase.from_string('-16.12%') == -16.12*PERCENT
with pytest.raises(ValueError):
assert DimensionBase.from_string('20inch')
|
<commit_before><commit_msg>Test cases for style attribute parsing
OptionSet, Bool, Integer, DimensionBase<commit_after>
import pytest
from rinoh.dimension import DimensionBase, PT, PICA, INCH, MM, CM, PERCENT
from rinoh.style import OptionSet, Bool, Integer
def test_optionset_from_string():
ONE = 'one'
TWO = 'two'
THREE = 'three'
class TestSet1(OptionSet):
values = ONE, TWO, THREE
assert TestSet1.from_string('one') == ONE
assert TestSet1.from_string('TWO') == TWO
assert TestSet1.from_string('tHRee') == THREE
with pytest.raises(ValueError):
TestSet1.from_string('four')
with pytest.raises(ValueError):
TestSet1.from_string('none')
class TestSet2(OptionSet):
values = None, TWO
assert TestSet2.from_string('none') == None
assert TestSet2.from_string('nONe') == None
assert TestSet2.from_string('two') == TWO
with pytest.raises(ValueError):
TestSet2.from_string('one')
with pytest.raises(ValueError):
TestSet2.from_string('False')
def test_bool_from_string():
assert Bool.from_string('true') == True
assert Bool.from_string('false') == False
assert Bool.from_string('TRUE') == True
assert Bool.from_string('FALSE') == False
assert Bool.from_string('True') == True
assert Bool.from_string('FaLSE') == False
with pytest.raises(ValueError):
Bool.from_string('1')
with pytest.raises(ValueError):
Bool.from_string('0')
with pytest.raises(ValueError):
Bool.from_string('T')
with pytest.raises(ValueError):
Bool.from_string('f')
def test_integer_from_string():
assert Integer.from_string('1') == 1
assert Integer.from_string('001') == 1
assert Integer.from_string('873654354') == 873654354
assert Integer.from_string('-9') == -9
with pytest.raises(ValueError):
assert Integer.from_string('1e5')
with pytest.raises(ValueError):
assert Integer.from_string('0.5')
def test_dimensionbase_from_string():
assert DimensionBase.from_string('0') == 0
assert DimensionBase.from_string('1pt') == 1*PT
assert DimensionBase.from_string('10 pt') == 10*PT
assert DimensionBase.from_string('25pc') == 25*PICA
assert DimensionBase.from_string('1.5 in') == 1.5*INCH
assert DimensionBase.from_string('99999mm') == 99999*MM
assert DimensionBase.from_string('-2.1 cm') == -2.1*CM
assert DimensionBase.from_string('21%') == 21.00*PERCENT
assert DimensionBase.from_string('-16.12%') == -16.12*PERCENT
with pytest.raises(ValueError):
assert DimensionBase.from_string('20inch')
|
|
4628adc38789f52e8e2ef0cdf600b9fbed7b30ab
|
test/test_events.py
|
test/test_events.py
|
import pytest
from h11 import Request
from wsproto.events import (
ConnectionClosed,
ConnectionEstablished,
ConnectionRequested,
)
from wsproto.frame_protocol import CloseReason
def test_connection_requested_repr_no_subprotocol():
method = b'GET'
target = b'/foo'
headers = {
b'host': b'localhost',
b'sec-websocket-version': b'13',
}
http_version = b'1.1'
req = Request(method=method, target=target, headers=list(headers.items()),
http_version=http_version)
event = ConnectionRequested([], req)
r = repr(event)
assert 'ConnectionRequested' in r
assert target.decode('ascii') in r
def test_connection_requested_repr_with_subprotocol():
method = b'GET'
target = b'/foo'
headers = {
b'host': b'localhost',
b'sec-websocket-version': b'13',
b'sec-websocket-protocol': b'fnord',
}
http_version = b'1.1'
req = Request(method=method, target=target, headers=list(headers.items()),
http_version=http_version)
event = ConnectionRequested([], req)
r = repr(event)
assert 'ConnectionRequested' in r
assert target.decode('ascii') in r
assert headers[b'sec-websocket-protocol'].decode('ascii') in r
@pytest.mark.parametrize('subprotocol,extensions', [
('sproto', None),
(None, ['fake']),
('sprout', ['pretend']),
])
def test_connection_established_repr(subprotocol, extensions):
event = ConnectionEstablished(subprotocol, extensions)
r = repr(event)
if subprotocol:
assert subprotocol in r
if extensions:
for extension in extensions:
assert extension in r
@pytest.mark.parametrize('code,reason', [
(CloseReason.NORMAL_CLOSURE, None),
(CloseReason.NORMAL_CLOSURE, 'because i felt like it'),
(CloseReason.INVALID_FRAME_PAYLOAD_DATA, 'GOOD GOD WHAT DID YOU DO'),
])
def test_connection_closed_repr(code, reason):
event = ConnectionClosed(code, reason)
r = repr(event)
assert repr(code) in r
if reason:
assert reason in r
|
Test events (really event __repr__)
|
Test events (really event __repr__)
|
Python
|
mit
|
python-hyper/wsproto
|
Test events (really event __repr__)
|
import pytest
from h11 import Request
from wsproto.events import (
ConnectionClosed,
ConnectionEstablished,
ConnectionRequested,
)
from wsproto.frame_protocol import CloseReason
def test_connection_requested_repr_no_subprotocol():
method = b'GET'
target = b'/foo'
headers = {
b'host': b'localhost',
b'sec-websocket-version': b'13',
}
http_version = b'1.1'
req = Request(method=method, target=target, headers=list(headers.items()),
http_version=http_version)
event = ConnectionRequested([], req)
r = repr(event)
assert 'ConnectionRequested' in r
assert target.decode('ascii') in r
def test_connection_requested_repr_with_subprotocol():
method = b'GET'
target = b'/foo'
headers = {
b'host': b'localhost',
b'sec-websocket-version': b'13',
b'sec-websocket-protocol': b'fnord',
}
http_version = b'1.1'
req = Request(method=method, target=target, headers=list(headers.items()),
http_version=http_version)
event = ConnectionRequested([], req)
r = repr(event)
assert 'ConnectionRequested' in r
assert target.decode('ascii') in r
assert headers[b'sec-websocket-protocol'].decode('ascii') in r
@pytest.mark.parametrize('subprotocol,extensions', [
('sproto', None),
(None, ['fake']),
('sprout', ['pretend']),
])
def test_connection_established_repr(subprotocol, extensions):
event = ConnectionEstablished(subprotocol, extensions)
r = repr(event)
if subprotocol:
assert subprotocol in r
if extensions:
for extension in extensions:
assert extension in r
@pytest.mark.parametrize('code,reason', [
(CloseReason.NORMAL_CLOSURE, None),
(CloseReason.NORMAL_CLOSURE, 'because i felt like it'),
(CloseReason.INVALID_FRAME_PAYLOAD_DATA, 'GOOD GOD WHAT DID YOU DO'),
])
def test_connection_closed_repr(code, reason):
event = ConnectionClosed(code, reason)
r = repr(event)
assert repr(code) in r
if reason:
assert reason in r
|
<commit_before><commit_msg>Test events (really event __repr__)<commit_after>
|
import pytest
from h11 import Request
from wsproto.events import (
ConnectionClosed,
ConnectionEstablished,
ConnectionRequested,
)
from wsproto.frame_protocol import CloseReason
def test_connection_requested_repr_no_subprotocol():
method = b'GET'
target = b'/foo'
headers = {
b'host': b'localhost',
b'sec-websocket-version': b'13',
}
http_version = b'1.1'
req = Request(method=method, target=target, headers=list(headers.items()),
http_version=http_version)
event = ConnectionRequested([], req)
r = repr(event)
assert 'ConnectionRequested' in r
assert target.decode('ascii') in r
def test_connection_requested_repr_with_subprotocol():
method = b'GET'
target = b'/foo'
headers = {
b'host': b'localhost',
b'sec-websocket-version': b'13',
b'sec-websocket-protocol': b'fnord',
}
http_version = b'1.1'
req = Request(method=method, target=target, headers=list(headers.items()),
http_version=http_version)
event = ConnectionRequested([], req)
r = repr(event)
assert 'ConnectionRequested' in r
assert target.decode('ascii') in r
assert headers[b'sec-websocket-protocol'].decode('ascii') in r
@pytest.mark.parametrize('subprotocol,extensions', [
('sproto', None),
(None, ['fake']),
('sprout', ['pretend']),
])
def test_connection_established_repr(subprotocol, extensions):
event = ConnectionEstablished(subprotocol, extensions)
r = repr(event)
if subprotocol:
assert subprotocol in r
if extensions:
for extension in extensions:
assert extension in r
@pytest.mark.parametrize('code,reason', [
(CloseReason.NORMAL_CLOSURE, None),
(CloseReason.NORMAL_CLOSURE, 'because i felt like it'),
(CloseReason.INVALID_FRAME_PAYLOAD_DATA, 'GOOD GOD WHAT DID YOU DO'),
])
def test_connection_closed_repr(code, reason):
event = ConnectionClosed(code, reason)
r = repr(event)
assert repr(code) in r
if reason:
assert reason in r
|
Test events (really event __repr__)import pytest
from h11 import Request
from wsproto.events import (
ConnectionClosed,
ConnectionEstablished,
ConnectionRequested,
)
from wsproto.frame_protocol import CloseReason
def test_connection_requested_repr_no_subprotocol():
method = b'GET'
target = b'/foo'
headers = {
b'host': b'localhost',
b'sec-websocket-version': b'13',
}
http_version = b'1.1'
req = Request(method=method, target=target, headers=list(headers.items()),
http_version=http_version)
event = ConnectionRequested([], req)
r = repr(event)
assert 'ConnectionRequested' in r
assert target.decode('ascii') in r
def test_connection_requested_repr_with_subprotocol():
method = b'GET'
target = b'/foo'
headers = {
b'host': b'localhost',
b'sec-websocket-version': b'13',
b'sec-websocket-protocol': b'fnord',
}
http_version = b'1.1'
req = Request(method=method, target=target, headers=list(headers.items()),
http_version=http_version)
event = ConnectionRequested([], req)
r = repr(event)
assert 'ConnectionRequested' in r
assert target.decode('ascii') in r
assert headers[b'sec-websocket-protocol'].decode('ascii') in r
@pytest.mark.parametrize('subprotocol,extensions', [
('sproto', None),
(None, ['fake']),
('sprout', ['pretend']),
])
def test_connection_established_repr(subprotocol, extensions):
event = ConnectionEstablished(subprotocol, extensions)
r = repr(event)
if subprotocol:
assert subprotocol in r
if extensions:
for extension in extensions:
assert extension in r
@pytest.mark.parametrize('code,reason', [
(CloseReason.NORMAL_CLOSURE, None),
(CloseReason.NORMAL_CLOSURE, 'because i felt like it'),
(CloseReason.INVALID_FRAME_PAYLOAD_DATA, 'GOOD GOD WHAT DID YOU DO'),
])
def test_connection_closed_repr(code, reason):
event = ConnectionClosed(code, reason)
r = repr(event)
assert repr(code) in r
if reason:
assert reason in r
|
<commit_before><commit_msg>Test events (really event __repr__)<commit_after>import pytest
from h11 import Request
from wsproto.events import (
ConnectionClosed,
ConnectionEstablished,
ConnectionRequested,
)
from wsproto.frame_protocol import CloseReason
def test_connection_requested_repr_no_subprotocol():
method = b'GET'
target = b'/foo'
headers = {
b'host': b'localhost',
b'sec-websocket-version': b'13',
}
http_version = b'1.1'
req = Request(method=method, target=target, headers=list(headers.items()),
http_version=http_version)
event = ConnectionRequested([], req)
r = repr(event)
assert 'ConnectionRequested' in r
assert target.decode('ascii') in r
def test_connection_requested_repr_with_subprotocol():
method = b'GET'
target = b'/foo'
headers = {
b'host': b'localhost',
b'sec-websocket-version': b'13',
b'sec-websocket-protocol': b'fnord',
}
http_version = b'1.1'
req = Request(method=method, target=target, headers=list(headers.items()),
http_version=http_version)
event = ConnectionRequested([], req)
r = repr(event)
assert 'ConnectionRequested' in r
assert target.decode('ascii') in r
assert headers[b'sec-websocket-protocol'].decode('ascii') in r
@pytest.mark.parametrize('subprotocol,extensions', [
('sproto', None),
(None, ['fake']),
('sprout', ['pretend']),
])
def test_connection_established_repr(subprotocol, extensions):
event = ConnectionEstablished(subprotocol, extensions)
r = repr(event)
if subprotocol:
assert subprotocol in r
if extensions:
for extension in extensions:
assert extension in r
@pytest.mark.parametrize('code,reason', [
(CloseReason.NORMAL_CLOSURE, None),
(CloseReason.NORMAL_CLOSURE, 'because i felt like it'),
(CloseReason.INVALID_FRAME_PAYLOAD_DATA, 'GOOD GOD WHAT DID YOU DO'),
])
def test_connection_closed_repr(code, reason):
event = ConnectionClosed(code, reason)
r = repr(event)
assert repr(code) in r
if reason:
assert reason in r
|
|
d67b685340cf2db7cd31b50a4484c29625b8fea5
|
content/test/gpu/gpu_tests/pixel_expectations.py
|
content/test/gpu/gpu_tests/pixel_expectations.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=384551)
self.Fail('Pixel.CSS3DBlueBox', bug=384551)
self.Fail('Pixel.WebGLGreenTriangle', bug=384551)
pass
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
Remove pixel test fail expectation
|
Remove pixel test fail expectation
This patch undo the failure expectation in
https://codereview.chromium.org/340603002/ and completes the rebaseline of the
pixel tests.
BUG=384551
Review URL: https://codereview.chromium.org/348853003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@278961 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
jaruba/chromium.src,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,markYoungH/chromium.src,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,M4sse/chromium.src,littlstar/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,dednal/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,Fireblend/chromium-crosswalk,dushu1203/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,ltilve/chromium,jaruba/chromium.src,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,ltilve/chromium,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,jaruba/chromium.src,M4sse/chromium.src,markYoungH/chromium.src,dednal/chromium.src,M4sse/chromium.src,dushu1203/chromium.src,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,littlstar/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,jaruba/chromium.src,dednal/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,dushu1203/chromium.src,M4sse/chromium.src,axinging/chromium-crosswalk,axinging/chromium-crosswalk,littlstar/chromium.src,jaruba/chromium.src,Chilledheart/chromium,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,ltilve/chromium,jaruba/chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dednal/chromium.src,markYoungH/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Jonekee/chromium.src,M4sse/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,jaruba/chromium.src,Just-D/chromium-1,M4sse/chromium.src,Just-D/chromium-1,dushu1203/chromium.src,Just-D/chromium-1,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,ltilve/chromium,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,chuan9/chromium-crosswalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,fujunwei/chromium-crosswalk,markYoungH/chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,dushu1203/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,Chilledheart/chromium,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dednal/chromium.src,krieger-od/nwjs_chromium.src,littlstar/chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,ltilve/chromium,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,ltilve/chromium,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,axinging/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,M4sse/chromium.src,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,Fireblend/chromium-crosswalk
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=384551)
self.Fail('Pixel.CSS3DBlueBox', bug=384551)
self.Fail('Pixel.WebGLGreenTriangle', bug=384551)
pass
Remove pixel test fail expectation
This patch undo the failure expectation in
https://codereview.chromium.org/340603002/ and completes the rebaseline of the
pixel tests.
BUG=384551
Review URL: https://codereview.chromium.org/348853003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@278961 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
<commit_before># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=384551)
self.Fail('Pixel.CSS3DBlueBox', bug=384551)
self.Fail('Pixel.WebGLGreenTriangle', bug=384551)
pass
<commit_msg>Remove pixel test fail expectation
This patch undo the failure expectation in
https://codereview.chromium.org/340603002/ and completes the rebaseline of the
pixel tests.
BUG=384551
Review URL: https://codereview.chromium.org/348853003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@278961 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=384551)
self.Fail('Pixel.CSS3DBlueBox', bug=384551)
self.Fail('Pixel.WebGLGreenTriangle', bug=384551)
pass
Remove pixel test fail expectation
This patch undo the failure expectation in
https://codereview.chromium.org/340603002/ and completes the rebaseline of the
pixel tests.
BUG=384551
Review URL: https://codereview.chromium.org/348853003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@278961 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
<commit_before># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=384551)
self.Fail('Pixel.CSS3DBlueBox', bug=384551)
self.Fail('Pixel.WebGLGreenTriangle', bug=384551)
pass
<commit_msg>Remove pixel test fail expectation
This patch undo the failure expectation in
https://codereview.chromium.org/340603002/ and completes the rebaseline of the
pixel tests.
BUG=384551
Review URL: https://codereview.chromium.org/348853003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@278961 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
90b92a1977c32dd660533567c0d5034b93d5c9c7
|
pombola/core/management/commands/core_create_places_from_mapit_entries.py
|
pombola/core/management/commands/core_create_places_from_mapit_entries.py
|
# This script will copy areas from mapit to core.places, including creating the
# place kind if required.
# import re
# import sys
from django.core.management.base import LabelCommand
from mapit.models import Type
from pombola.core.models import Place, PlaceKind
from django.template.defaultfilters import slugify
class Command(LabelCommand):
help = 'Copy mapit.areas to core.places'
args = '<mapit.type.code>'
def handle_label(self, mapit_type_code, **options):
# load the mapit type
mapit_type = Type.objects.get(code=mapit_type_code)
# if needed create the core placetype
placekind, created = PlaceKind.objects.get_or_create(
name=mapit_type.description,
defaults={
'slug': slugify(mapit_type.description)
}
)
# create all the places as needed for all mapit areas of that type
for area in mapit_type.areas.all():
print area.name
place, created = Place.objects.get_or_create(
name=area.name,
kind=placekind,
defaults={
'slug': slugify(area.name),
}
)
place.mapit_area = area
place.save()
|
# This script will copy areas from mapit to core.places, including creating the
# place kind if required.
# import re
# import sys
from django.core.management.base import LabelCommand
from mapit.models import Type
from pombola.core.models import Place, PlaceKind
from django.template.defaultfilters import slugify
class Command(LabelCommand):
help = 'Copy mapit.areas to core.places'
args = '<mapit.type.code>'
def handle_label(self, mapit_type_code, **options):
# load the mapit type
mapit_type = Type.objects.get(code=mapit_type_code)
# if needed create the core placetype
placekind, created = PlaceKind.objects.get_or_create(
name=mapit_type.description,
defaults={
'slug': slugify(mapit_type.description)
}
)
# create all the places as needed for all mapit areas of that type
for area in mapit_type.areas.all():
# There may be a slug clash as several areas have the same name but
# are different placekinds. Create the slug and then check to see
# if the slug is already in use for a placekind other than ours. If
# it is append the placekind to the slug.
slug = slugify(area.name)
if Place.objects.filter(slug=slug).exclude(kind=placekind).exists():
slug = slug + '-' + placekind.slug
print "'%s' (%s)" % (area.name, slug)
place, created = Place.objects.get_or_create(
name=area.name,
kind=placekind,
defaults={
'slug': slug,
}
)
place.mapit_area = area
place.save()
|
Add smarts to cope with slug clashes with other places with the same names.
|
Add smarts to cope with slug clashes with other places with the same names.
|
Python
|
agpl-3.0
|
patricmutwiri/pombola,hzj123/56th,geoffkilpin/pombola,patricmutwiri/pombola,hzj123/56th,ken-muturi/pombola,hzj123/56th,hzj123/56th,mysociety/pombola,mysociety/pombola,hzj123/56th,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,hzj123/56th,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,patricmutwiri/pombola,geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,geoffkilpin/pombola,ken-muturi/pombola,geoffkilpin/pombola,patricmutwiri/pombola
|
# This script will copy areas from mapit to core.places, including creating the
# place kind if required.
# import re
# import sys
from django.core.management.base import LabelCommand
from mapit.models import Type
from pombola.core.models import Place, PlaceKind
from django.template.defaultfilters import slugify
class Command(LabelCommand):
help = 'Copy mapit.areas to core.places'
args = '<mapit.type.code>'
def handle_label(self, mapit_type_code, **options):
# load the mapit type
mapit_type = Type.objects.get(code=mapit_type_code)
# if needed create the core placetype
placekind, created = PlaceKind.objects.get_or_create(
name=mapit_type.description,
defaults={
'slug': slugify(mapit_type.description)
}
)
# create all the places as needed for all mapit areas of that type
for area in mapit_type.areas.all():
print area.name
place, created = Place.objects.get_or_create(
name=area.name,
kind=placekind,
defaults={
'slug': slugify(area.name),
}
)
place.mapit_area = area
place.save()
Add smarts to cope with slug clashes with other places with the same names.
|
# This script will copy areas from mapit to core.places, including creating the
# place kind if required.
# import re
# import sys
from django.core.management.base import LabelCommand
from mapit.models import Type
from pombola.core.models import Place, PlaceKind
from django.template.defaultfilters import slugify
class Command(LabelCommand):
help = 'Copy mapit.areas to core.places'
args = '<mapit.type.code>'
def handle_label(self, mapit_type_code, **options):
# load the mapit type
mapit_type = Type.objects.get(code=mapit_type_code)
# if needed create the core placetype
placekind, created = PlaceKind.objects.get_or_create(
name=mapit_type.description,
defaults={
'slug': slugify(mapit_type.description)
}
)
# create all the places as needed for all mapit areas of that type
for area in mapit_type.areas.all():
# There may be a slug clash as several areas have the same name but
# are different placekinds. Create the slug and then check to see
# if the slug is already in use for a placekind other than ours. If
# it is append the placekind to the slug.
slug = slugify(area.name)
if Place.objects.filter(slug=slug).exclude(kind=placekind).exists():
slug = slug + '-' + placekind.slug
print "'%s' (%s)" % (area.name, slug)
place, created = Place.objects.get_or_create(
name=area.name,
kind=placekind,
defaults={
'slug': slug,
}
)
place.mapit_area = area
place.save()
|
<commit_before># This script will copy areas from mapit to core.places, including creating the
# place kind if required.
# import re
# import sys
from django.core.management.base import LabelCommand
from mapit.models import Type
from pombola.core.models import Place, PlaceKind
from django.template.defaultfilters import slugify
class Command(LabelCommand):
help = 'Copy mapit.areas to core.places'
args = '<mapit.type.code>'
def handle_label(self, mapit_type_code, **options):
# load the mapit type
mapit_type = Type.objects.get(code=mapit_type_code)
# if needed create the core placetype
placekind, created = PlaceKind.objects.get_or_create(
name=mapit_type.description,
defaults={
'slug': slugify(mapit_type.description)
}
)
# create all the places as needed for all mapit areas of that type
for area in mapit_type.areas.all():
print area.name
place, created = Place.objects.get_or_create(
name=area.name,
kind=placekind,
defaults={
'slug': slugify(area.name),
}
)
place.mapit_area = area
place.save()
<commit_msg>Add smarts to cope with slug clashes with other places with the same names.<commit_after>
|
# This script will copy areas from mapit to core.places, including creating the
# place kind if required.
# import re
# import sys
from django.core.management.base import LabelCommand
from mapit.models import Type
from pombola.core.models import Place, PlaceKind
from django.template.defaultfilters import slugify
class Command(LabelCommand):
help = 'Copy mapit.areas to core.places'
args = '<mapit.type.code>'
def handle_label(self, mapit_type_code, **options):
# load the mapit type
mapit_type = Type.objects.get(code=mapit_type_code)
# if needed create the core placetype
placekind, created = PlaceKind.objects.get_or_create(
name=mapit_type.description,
defaults={
'slug': slugify(mapit_type.description)
}
)
# create all the places as needed for all mapit areas of that type
for area in mapit_type.areas.all():
# There may be a slug clash as several areas have the same name but
# are different placekinds. Create the slug and then check to see
# if the slug is already in use for a placekind other than ours. If
# it is append the placekind to the slug.
slug = slugify(area.name)
if Place.objects.filter(slug=slug).exclude(kind=placekind).exists():
slug = slug + '-' + placekind.slug
print "'%s' (%s)" % (area.name, slug)
place, created = Place.objects.get_or_create(
name=area.name,
kind=placekind,
defaults={
'slug': slug,
}
)
place.mapit_area = area
place.save()
|
# This script will copy areas from mapit to core.places, including creating the
# place kind if required.
# import re
# import sys
from django.core.management.base import LabelCommand
from mapit.models import Type
from pombola.core.models import Place, PlaceKind
from django.template.defaultfilters import slugify
class Command(LabelCommand):
help = 'Copy mapit.areas to core.places'
args = '<mapit.type.code>'
def handle_label(self, mapit_type_code, **options):
# load the mapit type
mapit_type = Type.objects.get(code=mapit_type_code)
# if needed create the core placetype
placekind, created = PlaceKind.objects.get_or_create(
name=mapit_type.description,
defaults={
'slug': slugify(mapit_type.description)
}
)
# create all the places as needed for all mapit areas of that type
for area in mapit_type.areas.all():
print area.name
place, created = Place.objects.get_or_create(
name=area.name,
kind=placekind,
defaults={
'slug': slugify(area.name),
}
)
place.mapit_area = area
place.save()
Add smarts to cope with slug clashes with other places with the same names.# This script will copy areas from mapit to core.places, including creating the
# place kind if required.
# import re
# import sys
from django.core.management.base import LabelCommand
from mapit.models import Type
from pombola.core.models import Place, PlaceKind
from django.template.defaultfilters import slugify
class Command(LabelCommand):
help = 'Copy mapit.areas to core.places'
args = '<mapit.type.code>'
def handle_label(self, mapit_type_code, **options):
# load the mapit type
mapit_type = Type.objects.get(code=mapit_type_code)
# if needed create the core placetype
placekind, created = PlaceKind.objects.get_or_create(
name=mapit_type.description,
defaults={
'slug': slugify(mapit_type.description)
}
)
# create all the places as needed for all mapit areas of that type
for area in mapit_type.areas.all():
# There may be a slug clash as several areas have the same name but
# are different placekinds. Create the slug and then check to see
# if the slug is already in use for a placekind other than ours. If
# it is append the placekind to the slug.
slug = slugify(area.name)
if Place.objects.filter(slug=slug).exclude(kind=placekind).exists():
slug = slug + '-' + placekind.slug
print "'%s' (%s)" % (area.name, slug)
place, created = Place.objects.get_or_create(
name=area.name,
kind=placekind,
defaults={
'slug': slug,
}
)
place.mapit_area = area
place.save()
|
<commit_before># This script will copy areas from mapit to core.places, including creating the
# place kind if required.
# import re
# import sys
from django.core.management.base import LabelCommand
from mapit.models import Type
from pombola.core.models import Place, PlaceKind
from django.template.defaultfilters import slugify
class Command(LabelCommand):
help = 'Copy mapit.areas to core.places'
args = '<mapit.type.code>'
def handle_label(self, mapit_type_code, **options):
# load the mapit type
mapit_type = Type.objects.get(code=mapit_type_code)
# if needed create the core placetype
placekind, created = PlaceKind.objects.get_or_create(
name=mapit_type.description,
defaults={
'slug': slugify(mapit_type.description)
}
)
# create all the places as needed for all mapit areas of that type
for area in mapit_type.areas.all():
print area.name
place, created = Place.objects.get_or_create(
name=area.name,
kind=placekind,
defaults={
'slug': slugify(area.name),
}
)
place.mapit_area = area
place.save()
<commit_msg>Add smarts to cope with slug clashes with other places with the same names.<commit_after># This script will copy areas from mapit to core.places, including creating the
# place kind if required.
# import re
# import sys
from django.core.management.base import LabelCommand
from mapit.models import Type
from pombola.core.models import Place, PlaceKind
from django.template.defaultfilters import slugify
class Command(LabelCommand):
help = 'Copy mapit.areas to core.places'
args = '<mapit.type.code>'
def handle_label(self, mapit_type_code, **options):
# load the mapit type
mapit_type = Type.objects.get(code=mapit_type_code)
# if needed create the core placetype
placekind, created = PlaceKind.objects.get_or_create(
name=mapit_type.description,
defaults={
'slug': slugify(mapit_type.description)
}
)
# create all the places as needed for all mapit areas of that type
for area in mapit_type.areas.all():
# There may be a slug clash as several areas have the same name but
# are different placekinds. Create the slug and then check to see
# if the slug is already in use for a placekind other than ours. If
# it is append the placekind to the slug.
slug = slugify(area.name)
if Place.objects.filter(slug=slug).exclude(kind=placekind).exists():
slug = slug + '-' + placekind.slug
print "'%s' (%s)" % (area.name, slug)
place, created = Place.objects.get_or_create(
name=area.name,
kind=placekind,
defaults={
'slug': slug,
}
)
place.mapit_area = area
place.save()
|
0be0d20fc667f0734b85d98f1d359130f7ed5b98
|
plotly/tests/test_core/test_graph_objs/test_graph_objs.py
|
plotly/tests/test_core/test_graph_objs/test_graph_objs.py
|
from unittest import TestCase
import plotly.graph_objs as go
import plotly.graph_reference as gr
OLD_CLASS_NAMES = ['AngularAxis', 'Annotation', 'Annotations', 'Area',
'Bar', 'Box', 'ColorBar', 'Contour', 'Contours',
'Data', 'ErrorX', 'ErrorY', 'ErrorZ', 'Figure',
'Font', 'Heatmap', 'Histogram', 'Histogram2d',
'Histogram2dContour', 'Layout', 'Legend', 'Line',
'Margin', 'Marker', 'RadialAxis', 'Scatter',
'Scatter3d', 'Scene', 'Stream', 'Surface', 'Trace',
'XAxis', 'XBins', 'YAxis', 'YBins', 'ZAxis']
class TestBackwardsCompat(TestCase):
def test_old_class_names(self):
# these were all defined at one point, we want to maintain backwards
# compat, so we basically just create a checkpoint with this test.
for class_name in OLD_CLASS_NAMES:
self.assertIn(class_name, go.__dict__.keys())
class TestGraphObjs(TestCase):
def test_traces_should_be_defined(self):
# we *always* want to create classes for traces
class_names = [gr.string_to_class_name(object_name)
for object_name in gr.TRACE_NAMES]
for class_name in class_names:
self.assertIn(class_name, go.__dict__.keys())
def test_no_new_classes(self):
# for maintenance reasons, we don't want to generate new class defs
expected_class_names = {gr.string_to_class_name(object_name)
for object_name in gr.TRACE_NAMES}
expected_class_names.update(OLD_CLASS_NAMES)
# assume that CapitalCased keys are the classes we defined
current_class_names = {key for key in go.__dict__.keys()
if key[0].isupper()}
self.assertEqual(current_class_names, expected_class_names)
|
Add failing specs for current/future class names.
|
Add failing specs for current/future class names.
|
Python
|
mit
|
plotly/python-api,plotly/python-api,plotly/plotly.py,plotly/plotly.py,plotly/plotly.py,plotly/python-api
|
Add failing specs for current/future class names.
|
from unittest import TestCase
import plotly.graph_objs as go
import plotly.graph_reference as gr
OLD_CLASS_NAMES = ['AngularAxis', 'Annotation', 'Annotations', 'Area',
'Bar', 'Box', 'ColorBar', 'Contour', 'Contours',
'Data', 'ErrorX', 'ErrorY', 'ErrorZ', 'Figure',
'Font', 'Heatmap', 'Histogram', 'Histogram2d',
'Histogram2dContour', 'Layout', 'Legend', 'Line',
'Margin', 'Marker', 'RadialAxis', 'Scatter',
'Scatter3d', 'Scene', 'Stream', 'Surface', 'Trace',
'XAxis', 'XBins', 'YAxis', 'YBins', 'ZAxis']
class TestBackwardsCompat(TestCase):
def test_old_class_names(self):
# these were all defined at one point, we want to maintain backwards
# compat, so we basically just create a checkpoint with this test.
for class_name in OLD_CLASS_NAMES:
self.assertIn(class_name, go.__dict__.keys())
class TestGraphObjs(TestCase):
def test_traces_should_be_defined(self):
# we *always* want to create classes for traces
class_names = [gr.string_to_class_name(object_name)
for object_name in gr.TRACE_NAMES]
for class_name in class_names:
self.assertIn(class_name, go.__dict__.keys())
def test_no_new_classes(self):
# for maintenance reasons, we don't want to generate new class defs
expected_class_names = {gr.string_to_class_name(object_name)
for object_name in gr.TRACE_NAMES}
expected_class_names.update(OLD_CLASS_NAMES)
# assume that CapitalCased keys are the classes we defined
current_class_names = {key for key in go.__dict__.keys()
if key[0].isupper()}
self.assertEqual(current_class_names, expected_class_names)
|
<commit_before><commit_msg>Add failing specs for current/future class names.<commit_after>
|
from unittest import TestCase
import plotly.graph_objs as go
import plotly.graph_reference as gr
OLD_CLASS_NAMES = ['AngularAxis', 'Annotation', 'Annotations', 'Area',
'Bar', 'Box', 'ColorBar', 'Contour', 'Contours',
'Data', 'ErrorX', 'ErrorY', 'ErrorZ', 'Figure',
'Font', 'Heatmap', 'Histogram', 'Histogram2d',
'Histogram2dContour', 'Layout', 'Legend', 'Line',
'Margin', 'Marker', 'RadialAxis', 'Scatter',
'Scatter3d', 'Scene', 'Stream', 'Surface', 'Trace',
'XAxis', 'XBins', 'YAxis', 'YBins', 'ZAxis']
class TestBackwardsCompat(TestCase):
def test_old_class_names(self):
# these were all defined at one point, we want to maintain backwards
# compat, so we basically just create a checkpoint with this test.
for class_name in OLD_CLASS_NAMES:
self.assertIn(class_name, go.__dict__.keys())
class TestGraphObjs(TestCase):
def test_traces_should_be_defined(self):
# we *always* want to create classes for traces
class_names = [gr.string_to_class_name(object_name)
for object_name in gr.TRACE_NAMES]
for class_name in class_names:
self.assertIn(class_name, go.__dict__.keys())
def test_no_new_classes(self):
# for maintenance reasons, we don't want to generate new class defs
expected_class_names = {gr.string_to_class_name(object_name)
for object_name in gr.TRACE_NAMES}
expected_class_names.update(OLD_CLASS_NAMES)
# assume that CapitalCased keys are the classes we defined
current_class_names = {key for key in go.__dict__.keys()
if key[0].isupper()}
self.assertEqual(current_class_names, expected_class_names)
|
Add failing specs for current/future class names.from unittest import TestCase
import plotly.graph_objs as go
import plotly.graph_reference as gr
OLD_CLASS_NAMES = ['AngularAxis', 'Annotation', 'Annotations', 'Area',
'Bar', 'Box', 'ColorBar', 'Contour', 'Contours',
'Data', 'ErrorX', 'ErrorY', 'ErrorZ', 'Figure',
'Font', 'Heatmap', 'Histogram', 'Histogram2d',
'Histogram2dContour', 'Layout', 'Legend', 'Line',
'Margin', 'Marker', 'RadialAxis', 'Scatter',
'Scatter3d', 'Scene', 'Stream', 'Surface', 'Trace',
'XAxis', 'XBins', 'YAxis', 'YBins', 'ZAxis']
class TestBackwardsCompat(TestCase):
def test_old_class_names(self):
# these were all defined at one point, we want to maintain backwards
# compat, so we basically just create a checkpoint with this test.
for class_name in OLD_CLASS_NAMES:
self.assertIn(class_name, go.__dict__.keys())
class TestGraphObjs(TestCase):
def test_traces_should_be_defined(self):
# we *always* want to create classes for traces
class_names = [gr.string_to_class_name(object_name)
for object_name in gr.TRACE_NAMES]
for class_name in class_names:
self.assertIn(class_name, go.__dict__.keys())
def test_no_new_classes(self):
# for maintenance reasons, we don't want to generate new class defs
expected_class_names = {gr.string_to_class_name(object_name)
for object_name in gr.TRACE_NAMES}
expected_class_names.update(OLD_CLASS_NAMES)
# assume that CapitalCased keys are the classes we defined
current_class_names = {key for key in go.__dict__.keys()
if key[0].isupper()}
self.assertEqual(current_class_names, expected_class_names)
|
<commit_before><commit_msg>Add failing specs for current/future class names.<commit_after>from unittest import TestCase
import plotly.graph_objs as go
import plotly.graph_reference as gr
OLD_CLASS_NAMES = ['AngularAxis', 'Annotation', 'Annotations', 'Area',
'Bar', 'Box', 'ColorBar', 'Contour', 'Contours',
'Data', 'ErrorX', 'ErrorY', 'ErrorZ', 'Figure',
'Font', 'Heatmap', 'Histogram', 'Histogram2d',
'Histogram2dContour', 'Layout', 'Legend', 'Line',
'Margin', 'Marker', 'RadialAxis', 'Scatter',
'Scatter3d', 'Scene', 'Stream', 'Surface', 'Trace',
'XAxis', 'XBins', 'YAxis', 'YBins', 'ZAxis']
class TestBackwardsCompat(TestCase):
def test_old_class_names(self):
# these were all defined at one point, we want to maintain backwards
# compat, so we basically just create a checkpoint with this test.
for class_name in OLD_CLASS_NAMES:
self.assertIn(class_name, go.__dict__.keys())
class TestGraphObjs(TestCase):
def test_traces_should_be_defined(self):
# we *always* want to create classes for traces
class_names = [gr.string_to_class_name(object_name)
for object_name in gr.TRACE_NAMES]
for class_name in class_names:
self.assertIn(class_name, go.__dict__.keys())
def test_no_new_classes(self):
# for maintenance reasons, we don't want to generate new class defs
expected_class_names = {gr.string_to_class_name(object_name)
for object_name in gr.TRACE_NAMES}
expected_class_names.update(OLD_CLASS_NAMES)
# assume that CapitalCased keys are the classes we defined
current_class_names = {key for key in go.__dict__.keys()
if key[0].isupper()}
self.assertEqual(current_class_names, expected_class_names)
|
|
22b2446546ce59b99980e98e81b3571d81085304
|
tests/test_westminster_daily.py
|
tests/test_westminster_daily.py
|
import datetime as dt
from flask_application import app
def test_daily_westminster_pages_exist():
start_date = dt.date(2015, 01, 01)
with app.test_client() as c:
for days in range(365):
date = start_date + dt.timedelta(days=days)
month, day = date.month, date.day
response = c.get('/{month:02d}/{day:02d}/'.format(month=month, day=day))
assert response.status_code == 200
def test_daily_westminster_bad_days():
with app.test_client() as c:
response = c.get('/01/32/')
assert response.status_code == 404
response = c.get('/02/30/')
assert response.status_code == 404
response = c.get('/04/31/')
assert response.status_code == 404
def test_daily_leap_day():
with app.test_client() as c:
response = c.get('/02/29/')
assert response.status_code == 200
|
Test that daily westminster pages load
|
Test that daily westminster pages load
|
Python
|
bsd-3-clause
|
tdhopper/westminster-daily,olneyhymn/westminster-daily,olneyhymn/westminster-daily,olneyhymn/westminster-daily,olneyhymn/westminster-daily,tdhopper/westminster-daily,tdhopper/westminster-daily
|
Test that daily westminster pages load
|
import datetime as dt
from flask_application import app
def test_daily_westminster_pages_exist():
start_date = dt.date(2015, 01, 01)
with app.test_client() as c:
for days in range(365):
date = start_date + dt.timedelta(days=days)
month, day = date.month, date.day
response = c.get('/{month:02d}/{day:02d}/'.format(month=month, day=day))
assert response.status_code == 200
def test_daily_westminster_bad_days():
with app.test_client() as c:
response = c.get('/01/32/')
assert response.status_code == 404
response = c.get('/02/30/')
assert response.status_code == 404
response = c.get('/04/31/')
assert response.status_code == 404
def test_daily_leap_day():
with app.test_client() as c:
response = c.get('/02/29/')
assert response.status_code == 200
|
<commit_before><commit_msg>Test that daily westminster pages load<commit_after>
|
import datetime as dt
from flask_application import app
def test_daily_westminster_pages_exist():
start_date = dt.date(2015, 01, 01)
with app.test_client() as c:
for days in range(365):
date = start_date + dt.timedelta(days=days)
month, day = date.month, date.day
response = c.get('/{month:02d}/{day:02d}/'.format(month=month, day=day))
assert response.status_code == 200
def test_daily_westminster_bad_days():
with app.test_client() as c:
response = c.get('/01/32/')
assert response.status_code == 404
response = c.get('/02/30/')
assert response.status_code == 404
response = c.get('/04/31/')
assert response.status_code == 404
def test_daily_leap_day():
with app.test_client() as c:
response = c.get('/02/29/')
assert response.status_code == 200
|
Test that daily westminster pages loadimport datetime as dt
from flask_application import app
def test_daily_westminster_pages_exist():
start_date = dt.date(2015, 01, 01)
with app.test_client() as c:
for days in range(365):
date = start_date + dt.timedelta(days=days)
month, day = date.month, date.day
response = c.get('/{month:02d}/{day:02d}/'.format(month=month, day=day))
assert response.status_code == 200
def test_daily_westminster_bad_days():
with app.test_client() as c:
response = c.get('/01/32/')
assert response.status_code == 404
response = c.get('/02/30/')
assert response.status_code == 404
response = c.get('/04/31/')
assert response.status_code == 404
def test_daily_leap_day():
with app.test_client() as c:
response = c.get('/02/29/')
assert response.status_code == 200
|
<commit_before><commit_msg>Test that daily westminster pages load<commit_after>import datetime as dt
from flask_application import app
def test_daily_westminster_pages_exist():
start_date = dt.date(2015, 01, 01)
with app.test_client() as c:
for days in range(365):
date = start_date + dt.timedelta(days=days)
month, day = date.month, date.day
response = c.get('/{month:02d}/{day:02d}/'.format(month=month, day=day))
assert response.status_code == 200
def test_daily_westminster_bad_days():
with app.test_client() as c:
response = c.get('/01/32/')
assert response.status_code == 404
response = c.get('/02/30/')
assert response.status_code == 404
response = c.get('/04/31/')
assert response.status_code == 404
def test_daily_leap_day():
with app.test_client() as c:
response = c.get('/02/29/')
assert response.status_code == 200
|
|
eb828764ddbe3988f71b98082e1560e594c3f65d
|
ci/teamcity/comment_on_pr.py
|
ci/teamcity/comment_on_pr.py
|
"""
Post the comment like the following to the PR:
```
:robot: TeamCity test results bot :robot:
<Logs from pytest>
```
"""
from github import Github
import os
import sys
# Check if this is a pull request or not based on the environment variable
try:
pr_id = int(os.environ["GITHUB_PR_NUMBER"].split("/")[-1])
except Exception:
sys.exit(0)
header = """
<h2 align="center">:robot: TeamCity test results bot :robot:</h2>
"""
pytest_outputs = ["ray_tests.log", "dask_tests.log", "python_tests.log"]
full_comment = header
for out in pytest_outputs:
full_comment += "<details><summary>{} Tests</summary>\n".format(
out.split("_")[0].title()
)
full_comment += "\n\n```\n"
full_comment += open(out, "r").read()
full_comment += "\n```\n\n</details>\n"
token = os.environ["GITHUB_TOKEN"]
g = Github(token)
repo = g.get_repo("modin-project/modin")
pr = repo.get_pull(pr_id)
if any(i.user.login == "modin-bot" for i in pr.get_issue_comments()):
pr_comment_list = [
i for i in list(pr.get_issue_comments()) if i.user.login == "modin-bot"
]
assert len(pr_comment_list) == 1, "Too many comments from modin-bot already"
pr_comment_list[0].edit(full_comment)
else:
pr.create_issue_comment(full_comment)
|
Add a bot message to display TeamCity test results
|
Add a bot message to display TeamCity test results
* Make modin-bot update the comment instead of creating a new one
* Also only run the comment bot when a PR is being evaluated
Signed-off-by: Devin Petersohn <9526cbaf745870a4c947dfa2bd5208403b463ac3@gmail.com>
|
Python
|
apache-2.0
|
modin-project/modin,modin-project/modin
|
Add a bot message to display TeamCity test results
* Make modin-bot update the comment instead of creating a new one
* Also only run the comment bot when a PR is being evaluated
Signed-off-by: Devin Petersohn <9526cbaf745870a4c947dfa2bd5208403b463ac3@gmail.com>
|
"""
Post the comment like the following to the PR:
```
:robot: TeamCity test results bot :robot:
<Logs from pytest>
```
"""
from github import Github
import os
import sys
# Check if this is a pull request or not based on the environment variable
try:
pr_id = int(os.environ["GITHUB_PR_NUMBER"].split("/")[-1])
except Exception:
sys.exit(0)
header = """
<h2 align="center">:robot: TeamCity test results bot :robot:</h2>
"""
pytest_outputs = ["ray_tests.log", "dask_tests.log", "python_tests.log"]
full_comment = header
for out in pytest_outputs:
full_comment += "<details><summary>{} Tests</summary>\n".format(
out.split("_")[0].title()
)
full_comment += "\n\n```\n"
full_comment += open(out, "r").read()
full_comment += "\n```\n\n</details>\n"
token = os.environ["GITHUB_TOKEN"]
g = Github(token)
repo = g.get_repo("modin-project/modin")
pr = repo.get_pull(pr_id)
if any(i.user.login == "modin-bot" for i in pr.get_issue_comments()):
pr_comment_list = [
i for i in list(pr.get_issue_comments()) if i.user.login == "modin-bot"
]
assert len(pr_comment_list) == 1, "Too many comments from modin-bot already"
pr_comment_list[0].edit(full_comment)
else:
pr.create_issue_comment(full_comment)
|
<commit_before><commit_msg>Add a bot message to display TeamCity test results
* Make modin-bot update the comment instead of creating a new one
* Also only run the comment bot when a PR is being evaluated
Signed-off-by: Devin Petersohn <9526cbaf745870a4c947dfa2bd5208403b463ac3@gmail.com><commit_after>
|
"""
Post the comment like the following to the PR:
```
:robot: TeamCity test results bot :robot:
<Logs from pytest>
```
"""
from github import Github
import os
import sys
# Check if this is a pull request or not based on the environment variable
try:
pr_id = int(os.environ["GITHUB_PR_NUMBER"].split("/")[-1])
except Exception:
sys.exit(0)
header = """
<h2 align="center">:robot: TeamCity test results bot :robot:</h2>
"""
pytest_outputs = ["ray_tests.log", "dask_tests.log", "python_tests.log"]
full_comment = header
for out in pytest_outputs:
full_comment += "<details><summary>{} Tests</summary>\n".format(
out.split("_")[0].title()
)
full_comment += "\n\n```\n"
full_comment += open(out, "r").read()
full_comment += "\n```\n\n</details>\n"
token = os.environ["GITHUB_TOKEN"]
g = Github(token)
repo = g.get_repo("modin-project/modin")
pr = repo.get_pull(pr_id)
if any(i.user.login == "modin-bot" for i in pr.get_issue_comments()):
pr_comment_list = [
i for i in list(pr.get_issue_comments()) if i.user.login == "modin-bot"
]
assert len(pr_comment_list) == 1, "Too many comments from modin-bot already"
pr_comment_list[0].edit(full_comment)
else:
pr.create_issue_comment(full_comment)
|
Add a bot message to display TeamCity test results
* Make modin-bot update the comment instead of creating a new one
* Also only run the comment bot when a PR is being evaluated
Signed-off-by: Devin Petersohn <9526cbaf745870a4c947dfa2bd5208403b463ac3@gmail.com>"""
Post the comment like the following to the PR:
```
:robot: TeamCity test results bot :robot:
<Logs from pytest>
```
"""
from github import Github
import os
import sys
# Check if this is a pull request or not based on the environment variable
try:
pr_id = int(os.environ["GITHUB_PR_NUMBER"].split("/")[-1])
except Exception:
sys.exit(0)
header = """
<h2 align="center">:robot: TeamCity test results bot :robot:</h2>
"""
pytest_outputs = ["ray_tests.log", "dask_tests.log", "python_tests.log"]
full_comment = header
for out in pytest_outputs:
full_comment += "<details><summary>{} Tests</summary>\n".format(
out.split("_")[0].title()
)
full_comment += "\n\n```\n"
full_comment += open(out, "r").read()
full_comment += "\n```\n\n</details>\n"
token = os.environ["GITHUB_TOKEN"]
g = Github(token)
repo = g.get_repo("modin-project/modin")
pr = repo.get_pull(pr_id)
if any(i.user.login == "modin-bot" for i in pr.get_issue_comments()):
pr_comment_list = [
i for i in list(pr.get_issue_comments()) if i.user.login == "modin-bot"
]
assert len(pr_comment_list) == 1, "Too many comments from modin-bot already"
pr_comment_list[0].edit(full_comment)
else:
pr.create_issue_comment(full_comment)
|
<commit_before><commit_msg>Add a bot message to display TeamCity test results
* Make modin-bot update the comment instead of creating a new one
* Also only run the comment bot when a PR is being evaluated
Signed-off-by: Devin Petersohn <9526cbaf745870a4c947dfa2bd5208403b463ac3@gmail.com><commit_after>"""
Post the comment like the following to the PR:
```
:robot: TeamCity test results bot :robot:
<Logs from pytest>
```
"""
from github import Github
import os
import sys
# Check if this is a pull request or not based on the environment variable
try:
pr_id = int(os.environ["GITHUB_PR_NUMBER"].split("/")[-1])
except Exception:
sys.exit(0)
header = """
<h2 align="center">:robot: TeamCity test results bot :robot:</h2>
"""
pytest_outputs = ["ray_tests.log", "dask_tests.log", "python_tests.log"]
full_comment = header
for out in pytest_outputs:
full_comment += "<details><summary>{} Tests</summary>\n".format(
out.split("_")[0].title()
)
full_comment += "\n\n```\n"
full_comment += open(out, "r").read()
full_comment += "\n```\n\n</details>\n"
token = os.environ["GITHUB_TOKEN"]
g = Github(token)
repo = g.get_repo("modin-project/modin")
pr = repo.get_pull(pr_id)
if any(i.user.login == "modin-bot" for i in pr.get_issue_comments()):
pr_comment_list = [
i for i in list(pr.get_issue_comments()) if i.user.login == "modin-bot"
]
assert len(pr_comment_list) == 1, "Too many comments from modin-bot already"
pr_comment_list[0].edit(full_comment)
else:
pr.create_issue_comment(full_comment)
|
|
dbdb247ad03ca6b9168f193eadaf28638d718072
|
scrubadub/filth/named_entity.py
|
scrubadub/filth/named_entity.py
|
from .base import Filth
class NamedEntityFilth(Filth):
"""
Named entity filth. Upon initialisation provide a label for named entity (e.g. name, org)
"""
type = 'named_entity'
def __init__(self, *args, label: str, **kwargs):
super(NamedEntityFilth, self).__init__(*args, **kwargs)
self.label = label.lower()
self.replacement_string = "{}_{}".format(self.type, self.label)
|
from .base import Filth
class NamedEntityFilth(Filth):
"""
Default filth type, for named entities (e.g. the ones in https://nightly.spacy.io/models/en#en_core_web_lg-labels),
except the ones represented in any other filth.
"""
type = 'named_entity'
def __init__(self, *args, label: str, **kwargs):
super(NamedEntityFilth, self).__init__(*args, **kwargs)
self.label = label.lower()
self.replacement_string = "{}_{}".format(self.type, self.label)
|
Change docstring for NamedEntity filth
|
Change docstring for NamedEntity filth
|
Python
|
mit
|
deanmalmgren/scrubadub,datascopeanalytics/scrubadub,datascopeanalytics/scrubadub,deanmalmgren/scrubadub
|
from .base import Filth
class NamedEntityFilth(Filth):
"""
Named entity filth. Upon initialisation provide a label for named entity (e.g. name, org)
"""
type = 'named_entity'
def __init__(self, *args, label: str, **kwargs):
super(NamedEntityFilth, self).__init__(*args, **kwargs)
self.label = label.lower()
self.replacement_string = "{}_{}".format(self.type, self.label)
Change docstring for NamedEntity filth
|
from .base import Filth
class NamedEntityFilth(Filth):
"""
Default filth type, for named entities (e.g. the ones in https://nightly.spacy.io/models/en#en_core_web_lg-labels),
except the ones represented in any other filth.
"""
type = 'named_entity'
def __init__(self, *args, label: str, **kwargs):
super(NamedEntityFilth, self).__init__(*args, **kwargs)
self.label = label.lower()
self.replacement_string = "{}_{}".format(self.type, self.label)
|
<commit_before>from .base import Filth
class NamedEntityFilth(Filth):
"""
Named entity filth. Upon initialisation provide a label for named entity (e.g. name, org)
"""
type = 'named_entity'
def __init__(self, *args, label: str, **kwargs):
super(NamedEntityFilth, self).__init__(*args, **kwargs)
self.label = label.lower()
self.replacement_string = "{}_{}".format(self.type, self.label)
<commit_msg>Change docstring for NamedEntity filth<commit_after>
|
from .base import Filth
class NamedEntityFilth(Filth):
"""
Default filth type, for named entities (e.g. the ones in https://nightly.spacy.io/models/en#en_core_web_lg-labels),
except the ones represented in any other filth.
"""
type = 'named_entity'
def __init__(self, *args, label: str, **kwargs):
super(NamedEntityFilth, self).__init__(*args, **kwargs)
self.label = label.lower()
self.replacement_string = "{}_{}".format(self.type, self.label)
|
from .base import Filth
class NamedEntityFilth(Filth):
"""
Named entity filth. Upon initialisation provide a label for named entity (e.g. name, org)
"""
type = 'named_entity'
def __init__(self, *args, label: str, **kwargs):
super(NamedEntityFilth, self).__init__(*args, **kwargs)
self.label = label.lower()
self.replacement_string = "{}_{}".format(self.type, self.label)
Change docstring for NamedEntity filthfrom .base import Filth
class NamedEntityFilth(Filth):
"""
Default filth type, for named entities (e.g. the ones in https://nightly.spacy.io/models/en#en_core_web_lg-labels),
except the ones represented in any other filth.
"""
type = 'named_entity'
def __init__(self, *args, label: str, **kwargs):
super(NamedEntityFilth, self).__init__(*args, **kwargs)
self.label = label.lower()
self.replacement_string = "{}_{}".format(self.type, self.label)
|
<commit_before>from .base import Filth
class NamedEntityFilth(Filth):
"""
Named entity filth. Upon initialisation provide a label for named entity (e.g. name, org)
"""
type = 'named_entity'
def __init__(self, *args, label: str, **kwargs):
super(NamedEntityFilth, self).__init__(*args, **kwargs)
self.label = label.lower()
self.replacement_string = "{}_{}".format(self.type, self.label)
<commit_msg>Change docstring for NamedEntity filth<commit_after>from .base import Filth
class NamedEntityFilth(Filth):
"""
Default filth type, for named entities (e.g. the ones in https://nightly.spacy.io/models/en#en_core_web_lg-labels),
except the ones represented in any other filth.
"""
type = 'named_entity'
def __init__(self, *args, label: str, **kwargs):
super(NamedEntityFilth, self).__init__(*args, **kwargs)
self.label = label.lower()
self.replacement_string = "{}_{}".format(self.type, self.label)
|
9bfb182f92b8ac82ddb1b35c886b4a3f79708696
|
scripts/split_train_and_test.py
|
scripts/split_train_and_test.py
|
import os
import shutil
import argparse
import random
random.seed(47297)
parser = argparse.ArgumentParser(description='Split data into train and test sets.')
parser.add_argument('subjects_root_path', type=str, help='Directory containing subject sub-directories.')
args, _ = parser.parse_known_args()
def move_to_partition(patients, partition):
if not os.path.exists(os.path.join(args.subjects_root_path, partition)):
os.mkdir(os.path.join(args.subjects_root_path, partition))
for patient in patients:
src = os.path.join(args.subjects_root_path, patient)
dest = os.path.join(args.subjects_root_path, partition, patient)
shutil.move(src, dest)
folders = os.listdir(args.subjects_root_path)
folders = filter(str.isdigit, folders)
random.shuffle(folders)
train_cnt = int(0.85 * len(folders))
train_patients = sorted(folders[:train_cnt])
test_patients = sorted(folders[train_cnt:])
assert len(set(train_patients) & set(test_patients)) == 0
move_to_partition(train_patients, "train")
move_to_partition(test_patients, "test")
|
Add script for train/test split
|
Add script for train/test split
|
Python
|
mit
|
YerevaNN/mimic3-benchmarks
|
Add script for train/test split
|
import os
import shutil
import argparse
import random
random.seed(47297)
parser = argparse.ArgumentParser(description='Split data into train and test sets.')
parser.add_argument('subjects_root_path', type=str, help='Directory containing subject sub-directories.')
args, _ = parser.parse_known_args()
def move_to_partition(patients, partition):
if not os.path.exists(os.path.join(args.subjects_root_path, partition)):
os.mkdir(os.path.join(args.subjects_root_path, partition))
for patient in patients:
src = os.path.join(args.subjects_root_path, patient)
dest = os.path.join(args.subjects_root_path, partition, patient)
shutil.move(src, dest)
folders = os.listdir(args.subjects_root_path)
folders = filter(str.isdigit, folders)
random.shuffle(folders)
train_cnt = int(0.85 * len(folders))
train_patients = sorted(folders[:train_cnt])
test_patients = sorted(folders[train_cnt:])
assert len(set(train_patients) & set(test_patients)) == 0
move_to_partition(train_patients, "train")
move_to_partition(test_patients, "test")
|
<commit_before><commit_msg>Add script for train/test split<commit_after>
|
import os
import shutil
import argparse
import random
random.seed(47297)
parser = argparse.ArgumentParser(description='Split data into train and test sets.')
parser.add_argument('subjects_root_path', type=str, help='Directory containing subject sub-directories.')
args, _ = parser.parse_known_args()
def move_to_partition(patients, partition):
if not os.path.exists(os.path.join(args.subjects_root_path, partition)):
os.mkdir(os.path.join(args.subjects_root_path, partition))
for patient in patients:
src = os.path.join(args.subjects_root_path, patient)
dest = os.path.join(args.subjects_root_path, partition, patient)
shutil.move(src, dest)
folders = os.listdir(args.subjects_root_path)
folders = filter(str.isdigit, folders)
random.shuffle(folders)
train_cnt = int(0.85 * len(folders))
train_patients = sorted(folders[:train_cnt])
test_patients = sorted(folders[train_cnt:])
assert len(set(train_patients) & set(test_patients)) == 0
move_to_partition(train_patients, "train")
move_to_partition(test_patients, "test")
|
Add script for train/test splitimport os
import shutil
import argparse
import random
random.seed(47297)
parser = argparse.ArgumentParser(description='Split data into train and test sets.')
parser.add_argument('subjects_root_path', type=str, help='Directory containing subject sub-directories.')
args, _ = parser.parse_known_args()
def move_to_partition(patients, partition):
if not os.path.exists(os.path.join(args.subjects_root_path, partition)):
os.mkdir(os.path.join(args.subjects_root_path, partition))
for patient in patients:
src = os.path.join(args.subjects_root_path, patient)
dest = os.path.join(args.subjects_root_path, partition, patient)
shutil.move(src, dest)
folders = os.listdir(args.subjects_root_path)
folders = filter(str.isdigit, folders)
random.shuffle(folders)
train_cnt = int(0.85 * len(folders))
train_patients = sorted(folders[:train_cnt])
test_patients = sorted(folders[train_cnt:])
assert len(set(train_patients) & set(test_patients)) == 0
move_to_partition(train_patients, "train")
move_to_partition(test_patients, "test")
|
<commit_before><commit_msg>Add script for train/test split<commit_after>import os
import shutil
import argparse
import random
random.seed(47297)
parser = argparse.ArgumentParser(description='Split data into train and test sets.')
parser.add_argument('subjects_root_path', type=str, help='Directory containing subject sub-directories.')
args, _ = parser.parse_known_args()
def move_to_partition(patients, partition):
if not os.path.exists(os.path.join(args.subjects_root_path, partition)):
os.mkdir(os.path.join(args.subjects_root_path, partition))
for patient in patients:
src = os.path.join(args.subjects_root_path, patient)
dest = os.path.join(args.subjects_root_path, partition, patient)
shutil.move(src, dest)
folders = os.listdir(args.subjects_root_path)
folders = filter(str.isdigit, folders)
random.shuffle(folders)
train_cnt = int(0.85 * len(folders))
train_patients = sorted(folders[:train_cnt])
test_patients = sorted(folders[train_cnt:])
assert len(set(train_patients) & set(test_patients)) == 0
move_to_partition(train_patients, "train")
move_to_partition(test_patients, "test")
|
|
fa375d06128e493f86524e82fa93c892f4d925b7
|
corehq/apps/data_pipeline_audit/management/commands/find_sql_forms_not_in_es.py
|
corehq/apps/data_pipeline_audit/management/commands/find_sql_forms_not_in_es.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from datetime import datetime
from django.core.management.base import BaseCommand
import sys
from django.db.models import Q, F
from django.db.models.functions import Greatest
from corehq.form_processor.models import XFormInstanceSQL
from corehq.apps.es import FormES
import argparse
from dimagi.utils.chunked import chunked
DATE_FORMAT = "%Y-%m-%d"
def valid_date(s):
try:
return datetime.strptime(s, DATE_FORMAT)
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
class Command(BaseCommand):
help = "Print IDs of sql forms that are in the primary DB but not in ES."
def add_arguments(self, parser):
parser.add_argument(
'-s',
'--startdate',
dest='start',
type=valid_date,
help="The start date. Only applicable to forms on SQL domains. - format YYYY-MM-DD",
)
parser.add_argument(
'-e',
'--enddate',
dest='end',
type=valid_date,
help="The end date. Only applicable to forms on SQL domains. - format YYYY-MM-DD",
)
def handle(self, **options):
startdate = options.get('start')
enddate = options.get('end')
print("Fetching all form ids...", file=sys.stderr)
all_ids = list(iter_form_ids_by_last_modified(startdate, enddate))
print("Woo! Done fetching. Here we go", file=sys.stderr)
for doc_ids in chunked(all_ids, 100):
es_ids = (FormES()
.remove_default_filter('is_xform_instance')
.doc_id(doc_ids).values_list('_id', flat=True))
missing_ids = set(doc_ids) - set(es_ids)
for form_id in missing_ids:
print(form_id)
def iter_form_ids_by_last_modified(start_datetime, end_datetime):
from corehq.sql_db.util import run_query_across_partitioned_databases
annotate = {
'last_modified': Greatest('received_on', 'edited_on', 'deleted_on'),
}
return run_query_across_partitioned_databases(
XFormInstanceSQL,
(Q(last_modified__gt=start_datetime, last_modified__lt=end_datetime) &
Q(state=F('state').bitand(XFormInstanceSQL.DELETED) +
F('state').bitand(XFormInstanceSQL.DEPRECATED) +
F('state').bitand(XFormInstanceSQL.DUPLICATE) +
F('state').bitand(XFormInstanceSQL.ERROR) +
F('state').bitand(XFormInstanceSQL.SUBMISSION_ERROR_LOG) +
F('state'))),
annotate=annotate,
values=['form_id'],
)
|
Add script to find forms missing in ES
|
Add script to find forms missing in ES
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add script to find forms missing in ES
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from datetime import datetime
from django.core.management.base import BaseCommand
import sys
from django.db.models import Q, F
from django.db.models.functions import Greatest
from corehq.form_processor.models import XFormInstanceSQL
from corehq.apps.es import FormES
import argparse
from dimagi.utils.chunked import chunked
DATE_FORMAT = "%Y-%m-%d"
def valid_date(s):
try:
return datetime.strptime(s, DATE_FORMAT)
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
class Command(BaseCommand):
help = "Print IDs of sql forms that are in the primary DB but not in ES."
def add_arguments(self, parser):
parser.add_argument(
'-s',
'--startdate',
dest='start',
type=valid_date,
help="The start date. Only applicable to forms on SQL domains. - format YYYY-MM-DD",
)
parser.add_argument(
'-e',
'--enddate',
dest='end',
type=valid_date,
help="The end date. Only applicable to forms on SQL domains. - format YYYY-MM-DD",
)
def handle(self, **options):
startdate = options.get('start')
enddate = options.get('end')
print("Fetching all form ids...", file=sys.stderr)
all_ids = list(iter_form_ids_by_last_modified(startdate, enddate))
print("Woo! Done fetching. Here we go", file=sys.stderr)
for doc_ids in chunked(all_ids, 100):
es_ids = (FormES()
.remove_default_filter('is_xform_instance')
.doc_id(doc_ids).values_list('_id', flat=True))
missing_ids = set(doc_ids) - set(es_ids)
for form_id in missing_ids:
print(form_id)
def iter_form_ids_by_last_modified(start_datetime, end_datetime):
from corehq.sql_db.util import run_query_across_partitioned_databases
annotate = {
'last_modified': Greatest('received_on', 'edited_on', 'deleted_on'),
}
return run_query_across_partitioned_databases(
XFormInstanceSQL,
(Q(last_modified__gt=start_datetime, last_modified__lt=end_datetime) &
Q(state=F('state').bitand(XFormInstanceSQL.DELETED) +
F('state').bitand(XFormInstanceSQL.DEPRECATED) +
F('state').bitand(XFormInstanceSQL.DUPLICATE) +
F('state').bitand(XFormInstanceSQL.ERROR) +
F('state').bitand(XFormInstanceSQL.SUBMISSION_ERROR_LOG) +
F('state'))),
annotate=annotate,
values=['form_id'],
)
|
<commit_before><commit_msg>Add script to find forms missing in ES<commit_after>
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from datetime import datetime
from django.core.management.base import BaseCommand
import sys
from django.db.models import Q, F
from django.db.models.functions import Greatest
from corehq.form_processor.models import XFormInstanceSQL
from corehq.apps.es import FormES
import argparse
from dimagi.utils.chunked import chunked
DATE_FORMAT = "%Y-%m-%d"
def valid_date(s):
try:
return datetime.strptime(s, DATE_FORMAT)
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
class Command(BaseCommand):
help = "Print IDs of sql forms that are in the primary DB but not in ES."
def add_arguments(self, parser):
parser.add_argument(
'-s',
'--startdate',
dest='start',
type=valid_date,
help="The start date. Only applicable to forms on SQL domains. - format YYYY-MM-DD",
)
parser.add_argument(
'-e',
'--enddate',
dest='end',
type=valid_date,
help="The end date. Only applicable to forms on SQL domains. - format YYYY-MM-DD",
)
def handle(self, **options):
startdate = options.get('start')
enddate = options.get('end')
print("Fetching all form ids...", file=sys.stderr)
all_ids = list(iter_form_ids_by_last_modified(startdate, enddate))
print("Woo! Done fetching. Here we go", file=sys.stderr)
for doc_ids in chunked(all_ids, 100):
es_ids = (FormES()
.remove_default_filter('is_xform_instance')
.doc_id(doc_ids).values_list('_id', flat=True))
missing_ids = set(doc_ids) - set(es_ids)
for form_id in missing_ids:
print(form_id)
def iter_form_ids_by_last_modified(start_datetime, end_datetime):
from corehq.sql_db.util import run_query_across_partitioned_databases
annotate = {
'last_modified': Greatest('received_on', 'edited_on', 'deleted_on'),
}
return run_query_across_partitioned_databases(
XFormInstanceSQL,
(Q(last_modified__gt=start_datetime, last_modified__lt=end_datetime) &
Q(state=F('state').bitand(XFormInstanceSQL.DELETED) +
F('state').bitand(XFormInstanceSQL.DEPRECATED) +
F('state').bitand(XFormInstanceSQL.DUPLICATE) +
F('state').bitand(XFormInstanceSQL.ERROR) +
F('state').bitand(XFormInstanceSQL.SUBMISSION_ERROR_LOG) +
F('state'))),
annotate=annotate,
values=['form_id'],
)
|
Add script to find forms missing in ESfrom __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from datetime import datetime
from django.core.management.base import BaseCommand
import sys
from django.db.models import Q, F
from django.db.models.functions import Greatest
from corehq.form_processor.models import XFormInstanceSQL
from corehq.apps.es import FormES
import argparse
from dimagi.utils.chunked import chunked
DATE_FORMAT = "%Y-%m-%d"
def valid_date(s):
try:
return datetime.strptime(s, DATE_FORMAT)
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
class Command(BaseCommand):
help = "Print IDs of sql forms that are in the primary DB but not in ES."
def add_arguments(self, parser):
parser.add_argument(
'-s',
'--startdate',
dest='start',
type=valid_date,
help="The start date. Only applicable to forms on SQL domains. - format YYYY-MM-DD",
)
parser.add_argument(
'-e',
'--enddate',
dest='end',
type=valid_date,
help="The end date. Only applicable to forms on SQL domains. - format YYYY-MM-DD",
)
def handle(self, **options):
startdate = options.get('start')
enddate = options.get('end')
print("Fetching all form ids...", file=sys.stderr)
all_ids = list(iter_form_ids_by_last_modified(startdate, enddate))
print("Woo! Done fetching. Here we go", file=sys.stderr)
for doc_ids in chunked(all_ids, 100):
es_ids = (FormES()
.remove_default_filter('is_xform_instance')
.doc_id(doc_ids).values_list('_id', flat=True))
missing_ids = set(doc_ids) - set(es_ids)
for form_id in missing_ids:
print(form_id)
def iter_form_ids_by_last_modified(start_datetime, end_datetime):
from corehq.sql_db.util import run_query_across_partitioned_databases
annotate = {
'last_modified': Greatest('received_on', 'edited_on', 'deleted_on'),
}
return run_query_across_partitioned_databases(
XFormInstanceSQL,
(Q(last_modified__gt=start_datetime, last_modified__lt=end_datetime) &
Q(state=F('state').bitand(XFormInstanceSQL.DELETED) +
F('state').bitand(XFormInstanceSQL.DEPRECATED) +
F('state').bitand(XFormInstanceSQL.DUPLICATE) +
F('state').bitand(XFormInstanceSQL.ERROR) +
F('state').bitand(XFormInstanceSQL.SUBMISSION_ERROR_LOG) +
F('state'))),
annotate=annotate,
values=['form_id'],
)
|
<commit_before><commit_msg>Add script to find forms missing in ES<commit_after>from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from datetime import datetime
from django.core.management.base import BaseCommand
import sys
from django.db.models import Q, F
from django.db.models.functions import Greatest
from corehq.form_processor.models import XFormInstanceSQL
from corehq.apps.es import FormES
import argparse
from dimagi.utils.chunked import chunked
DATE_FORMAT = "%Y-%m-%d"
def valid_date(s):
try:
return datetime.strptime(s, DATE_FORMAT)
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
class Command(BaseCommand):
help = "Print IDs of sql forms that are in the primary DB but not in ES."
def add_arguments(self, parser):
parser.add_argument(
'-s',
'--startdate',
dest='start',
type=valid_date,
help="The start date. Only applicable to forms on SQL domains. - format YYYY-MM-DD",
)
parser.add_argument(
'-e',
'--enddate',
dest='end',
type=valid_date,
help="The end date. Only applicable to forms on SQL domains. - format YYYY-MM-DD",
)
def handle(self, **options):
startdate = options.get('start')
enddate = options.get('end')
print("Fetching all form ids...", file=sys.stderr)
all_ids = list(iter_form_ids_by_last_modified(startdate, enddate))
print("Woo! Done fetching. Here we go", file=sys.stderr)
for doc_ids in chunked(all_ids, 100):
es_ids = (FormES()
.remove_default_filter('is_xform_instance')
.doc_id(doc_ids).values_list('_id', flat=True))
missing_ids = set(doc_ids) - set(es_ids)
for form_id in missing_ids:
print(form_id)
def iter_form_ids_by_last_modified(start_datetime, end_datetime):
from corehq.sql_db.util import run_query_across_partitioned_databases
annotate = {
'last_modified': Greatest('received_on', 'edited_on', 'deleted_on'),
}
return run_query_across_partitioned_databases(
XFormInstanceSQL,
(Q(last_modified__gt=start_datetime, last_modified__lt=end_datetime) &
Q(state=F('state').bitand(XFormInstanceSQL.DELETED) +
F('state').bitand(XFormInstanceSQL.DEPRECATED) +
F('state').bitand(XFormInstanceSQL.DUPLICATE) +
F('state').bitand(XFormInstanceSQL.ERROR) +
F('state').bitand(XFormInstanceSQL.SUBMISSION_ERROR_LOG) +
F('state'))),
annotate=annotate,
values=['form_id'],
)
|
|
d8521011d5be28812c222b58901a07e8f30e87ac
|
neuralstyle/testing-train.py
|
neuralstyle/testing-train.py
|
from __future__ import print_function
import argparse
import numpy as np
import torch
from torch.autograd import Variable
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from transformernet import TransformerNet
from vgg16 import Vgg16
import utils
import os
def main():
parser = argparse.ArgumentParser(description="parser for fast-neural-style")
parser.add_argument("--batch-size", "-b", type=int, default=4)
parser.add_argument("--epochs", "-e", type=int, default=2)
parser.add_argument("--cuda", type=int, default=0)
parser.add_argument("--dataset", type=str, default="MSCOCO")
parser.add_argument("--image-size", type=int, default=256)
args = parser.parse_args()
if args.cuda and not torch.cuda.is_available():
print("WARNING: torch.cuda not available, using CPU.")
args.cuda = 0
if args.cuda:
kwargs = {'num_workers': 1, 'pin_memory': False}
else:
kwargs = {}
print("=====================")
print("TEST MODE")
print("using 1 worker")
print("=====================")
print("=====================")
print("TORCH VERSION:", torch.__version__)
print("BATCH SIZE:", args.batch_size)
print("EPOCHS:", args.epochs)
print("CUDA:", args.cuda)
print("DATASET:", args.dataset)
print("IMAGE SIZE:", args.image_size)
print("=====================\n")
transform = transforms.Compose([transforms.Scale(args.image_size),
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))])
train_dataset = datasets.ImageFolder(args.dataset, transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, **kwargs)
for e in range(args.epochs):
batch_id = 0
for x in train_loader:
if batch_id < 10 or batch_id % 500 == 0:
print("Processing batch:", batch_id)
batch_id += 1
print("\nDone :)")
if __name__ == "__main__":
main()
|
Add testing code for memory leak.
|
Add testing code for memory leak.
|
Python
|
mit
|
abhiskk/fast-neural-style,onai/fast-neural-style,abhiskk/fast-neural-style,darkstar112358/fast-neural-style,darkstar112358/fast-neural-style
|
Add testing code for memory leak.
|
from __future__ import print_function
import argparse
import numpy as np
import torch
from torch.autograd import Variable
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from transformernet import TransformerNet
from vgg16 import Vgg16
import utils
import os
def main():
parser = argparse.ArgumentParser(description="parser for fast-neural-style")
parser.add_argument("--batch-size", "-b", type=int, default=4)
parser.add_argument("--epochs", "-e", type=int, default=2)
parser.add_argument("--cuda", type=int, default=0)
parser.add_argument("--dataset", type=str, default="MSCOCO")
parser.add_argument("--image-size", type=int, default=256)
args = parser.parse_args()
if args.cuda and not torch.cuda.is_available():
print("WARNING: torch.cuda not available, using CPU.")
args.cuda = 0
if args.cuda:
kwargs = {'num_workers': 1, 'pin_memory': False}
else:
kwargs = {}
print("=====================")
print("TEST MODE")
print("using 1 worker")
print("=====================")
print("=====================")
print("TORCH VERSION:", torch.__version__)
print("BATCH SIZE:", args.batch_size)
print("EPOCHS:", args.epochs)
print("CUDA:", args.cuda)
print("DATASET:", args.dataset)
print("IMAGE SIZE:", args.image_size)
print("=====================\n")
transform = transforms.Compose([transforms.Scale(args.image_size),
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))])
train_dataset = datasets.ImageFolder(args.dataset, transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, **kwargs)
for e in range(args.epochs):
batch_id = 0
for x in train_loader:
if batch_id < 10 or batch_id % 500 == 0:
print("Processing batch:", batch_id)
batch_id += 1
print("\nDone :)")
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add testing code for memory leak.<commit_after>
|
from __future__ import print_function
import argparse
import numpy as np
import torch
from torch.autograd import Variable
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from transformernet import TransformerNet
from vgg16 import Vgg16
import utils
import os
def main():
parser = argparse.ArgumentParser(description="parser for fast-neural-style")
parser.add_argument("--batch-size", "-b", type=int, default=4)
parser.add_argument("--epochs", "-e", type=int, default=2)
parser.add_argument("--cuda", type=int, default=0)
parser.add_argument("--dataset", type=str, default="MSCOCO")
parser.add_argument("--image-size", type=int, default=256)
args = parser.parse_args()
if args.cuda and not torch.cuda.is_available():
print("WARNING: torch.cuda not available, using CPU.")
args.cuda = 0
if args.cuda:
kwargs = {'num_workers': 1, 'pin_memory': False}
else:
kwargs = {}
print("=====================")
print("TEST MODE")
print("using 1 worker")
print("=====================")
print("=====================")
print("TORCH VERSION:", torch.__version__)
print("BATCH SIZE:", args.batch_size)
print("EPOCHS:", args.epochs)
print("CUDA:", args.cuda)
print("DATASET:", args.dataset)
print("IMAGE SIZE:", args.image_size)
print("=====================\n")
transform = transforms.Compose([transforms.Scale(args.image_size),
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))])
train_dataset = datasets.ImageFolder(args.dataset, transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, **kwargs)
for e in range(args.epochs):
batch_id = 0
for x in train_loader:
if batch_id < 10 or batch_id % 500 == 0:
print("Processing batch:", batch_id)
batch_id += 1
print("\nDone :)")
if __name__ == "__main__":
main()
|
Add testing code for memory leak.from __future__ import print_function
import argparse
import numpy as np
import torch
from torch.autograd import Variable
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from transformernet import TransformerNet
from vgg16 import Vgg16
import utils
import os
def main():
parser = argparse.ArgumentParser(description="parser for fast-neural-style")
parser.add_argument("--batch-size", "-b", type=int, default=4)
parser.add_argument("--epochs", "-e", type=int, default=2)
parser.add_argument("--cuda", type=int, default=0)
parser.add_argument("--dataset", type=str, default="MSCOCO")
parser.add_argument("--image-size", type=int, default=256)
args = parser.parse_args()
if args.cuda and not torch.cuda.is_available():
print("WARNING: torch.cuda not available, using CPU.")
args.cuda = 0
if args.cuda:
kwargs = {'num_workers': 1, 'pin_memory': False}
else:
kwargs = {}
print("=====================")
print("TEST MODE")
print("using 1 worker")
print("=====================")
print("=====================")
print("TORCH VERSION:", torch.__version__)
print("BATCH SIZE:", args.batch_size)
print("EPOCHS:", args.epochs)
print("CUDA:", args.cuda)
print("DATASET:", args.dataset)
print("IMAGE SIZE:", args.image_size)
print("=====================\n")
transform = transforms.Compose([transforms.Scale(args.image_size),
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))])
train_dataset = datasets.ImageFolder(args.dataset, transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, **kwargs)
for e in range(args.epochs):
batch_id = 0
for x in train_loader:
if batch_id < 10 or batch_id % 500 == 0:
print("Processing batch:", batch_id)
batch_id += 1
print("\nDone :)")
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add testing code for memory leak.<commit_after>from __future__ import print_function
import argparse
import numpy as np
import torch
from torch.autograd import Variable
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from transformernet import TransformerNet
from vgg16 import Vgg16
import utils
import os
def main():
parser = argparse.ArgumentParser(description="parser for fast-neural-style")
parser.add_argument("--batch-size", "-b", type=int, default=4)
parser.add_argument("--epochs", "-e", type=int, default=2)
parser.add_argument("--cuda", type=int, default=0)
parser.add_argument("--dataset", type=str, default="MSCOCO")
parser.add_argument("--image-size", type=int, default=256)
args = parser.parse_args()
if args.cuda and not torch.cuda.is_available():
print("WARNING: torch.cuda not available, using CPU.")
args.cuda = 0
if args.cuda:
kwargs = {'num_workers': 1, 'pin_memory': False}
else:
kwargs = {}
print("=====================")
print("TEST MODE")
print("using 1 worker")
print("=====================")
print("=====================")
print("TORCH VERSION:", torch.__version__)
print("BATCH SIZE:", args.batch_size)
print("EPOCHS:", args.epochs)
print("CUDA:", args.cuda)
print("DATASET:", args.dataset)
print("IMAGE SIZE:", args.image_size)
print("=====================\n")
transform = transforms.Compose([transforms.Scale(args.image_size),
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))])
train_dataset = datasets.ImageFolder(args.dataset, transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, **kwargs)
for e in range(args.epochs):
batch_id = 0
for x in train_loader:
if batch_id < 10 or batch_id % 500 == 0:
print("Processing batch:", batch_id)
batch_id += 1
print("\nDone :)")
if __name__ == "__main__":
main()
|
|
f3e91020f0426fedfe229e94bf1ddc69dd64a136
|
doc/examples/plot_template_alt.py
|
doc/examples/plot_template_alt.py
|
"""
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on a single coin). Here, we
return a single match (the exact same coin), so the maximum value in the
``match_template`` result corresponds to the coin location. The other coins
look similar, and thus have local maxima; if you expect multiple matches, you
should use a proper peak-finding function.
The ``match_template`` function uses fast, normalized cross-correlation [1]_
to find instances of the template in the image. Note that the peaks in the
output of ``match_template`` correspond to the origin (i.e. top-left corner) of
the template.
.. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light and
Magic.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.coins()
coin = image[170:220, 75:130]
result = match_template(image, coin)
ij = np.unravel_index(np.argmax(result), result.shape)
x, y = ij[::-1]
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
ax1.imshow(coin)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
hcoin, wcoin = coin.shape
rect = plt.Rectangle((x, y), wcoin, hcoin, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
ax3.imshow(result)
ax3.set_axis_off()
ax3.set_title('`match_template`\nresult')
# highlight matched region
ax3.autoscale(False)
ax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
|
Add new example plot for `match_template`.
|
Add new example plot for `match_template`.
|
Python
|
bsd-3-clause
|
SamHames/scikit-image,warmspringwinds/scikit-image,ClinicalGraphics/scikit-image,paalge/scikit-image,emmanuelle/scikits.image,newville/scikit-image,almarklein/scikit-image,chintak/scikit-image,warmspringwinds/scikit-image,rjeli/scikit-image,chriscrosscutler/scikit-image,Britefury/scikit-image,GaZ3ll3/scikit-image,Britefury/scikit-image,vighneshbirodkar/scikit-image,emon10005/scikit-image,youprofit/scikit-image,michaelaye/scikit-image,robintw/scikit-image,jwiggins/scikit-image,youprofit/scikit-image,paalge/scikit-image,chriscrosscutler/scikit-image,almarklein/scikit-image,ofgulban/scikit-image,bennlich/scikit-image,robintw/scikit-image,ofgulban/scikit-image,SamHames/scikit-image,keflavich/scikit-image,keflavich/scikit-image,blink1073/scikit-image,bennlich/scikit-image,dpshelio/scikit-image,ajaybhat/scikit-image,almarklein/scikit-image,michaelpacer/scikit-image,almarklein/scikit-image,vighneshbirodkar/scikit-image,Hiyorimi/scikit-image,vighneshbirodkar/scikit-image,emmanuelle/scikits.image,michaelpacer/scikit-image,newville/scikit-image,ajaybhat/scikit-image,GaZ3ll3/scikit-image,michaelaye/scikit-image,rjeli/scikit-image,WarrenWeckesser/scikits-image,chintak/scikit-image,chintak/scikit-image,SamHames/scikit-image,oew1v07/scikit-image,juliusbierk/scikit-image,Hiyorimi/scikit-image,jwiggins/scikit-image,emmanuelle/scikits.image,emon10005/scikit-image,emmanuelle/scikits.image,ClinicalGraphics/scikit-image,bsipocz/scikit-image,juliusbierk/scikit-image,bsipocz/scikit-image,blink1073/scikit-image,ofgulban/scikit-image,chintak/scikit-image,rjeli/scikit-image,Midafi/scikit-image,SamHames/scikit-image,Midafi/scikit-image,WarrenWeckesser/scikits-image,pratapvardhan/scikit-image,oew1v07/scikit-image,dpshelio/scikit-image,paalge/scikit-image,pratapvardhan/scikit-image
|
Add new example plot for `match_template`.
|
"""
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on a single coin). Here, we
return a single match (the exact same coin), so the maximum value in the
``match_template`` result corresponds to the coin location. The other coins
look similar, and thus have local maxima; if you expect multiple matches, you
should use a proper peak-finding function.
The ``match_template`` function uses fast, normalized cross-correlation [1]_
to find instances of the template in the image. Note that the peaks in the
output of ``match_template`` correspond to the origin (i.e. top-left corner) of
the template.
.. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light and
Magic.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.coins()
coin = image[170:220, 75:130]
result = match_template(image, coin)
ij = np.unravel_index(np.argmax(result), result.shape)
x, y = ij[::-1]
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
ax1.imshow(coin)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
hcoin, wcoin = coin.shape
rect = plt.Rectangle((x, y), wcoin, hcoin, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
ax3.imshow(result)
ax3.set_axis_off()
ax3.set_title('`match_template`\nresult')
# highlight matched region
ax3.autoscale(False)
ax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
|
<commit_before><commit_msg>Add new example plot for `match_template`.<commit_after>
|
"""
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on a single coin). Here, we
return a single match (the exact same coin), so the maximum value in the
``match_template`` result corresponds to the coin location. The other coins
look similar, and thus have local maxima; if you expect multiple matches, you
should use a proper peak-finding function.
The ``match_template`` function uses fast, normalized cross-correlation [1]_
to find instances of the template in the image. Note that the peaks in the
output of ``match_template`` correspond to the origin (i.e. top-left corner) of
the template.
.. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light and
Magic.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.coins()
coin = image[170:220, 75:130]
result = match_template(image, coin)
ij = np.unravel_index(np.argmax(result), result.shape)
x, y = ij[::-1]
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
ax1.imshow(coin)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
hcoin, wcoin = coin.shape
rect = plt.Rectangle((x, y), wcoin, hcoin, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
ax3.imshow(result)
ax3.set_axis_off()
ax3.set_title('`match_template`\nresult')
# highlight matched region
ax3.autoscale(False)
ax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
|
Add new example plot for `match_template`."""
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on a single coin). Here, we
return a single match (the exact same coin), so the maximum value in the
``match_template`` result corresponds to the coin location. The other coins
look similar, and thus have local maxima; if you expect multiple matches, you
should use a proper peak-finding function.
The ``match_template`` function uses fast, normalized cross-correlation [1]_
to find instances of the template in the image. Note that the peaks in the
output of ``match_template`` correspond to the origin (i.e. top-left corner) of
the template.
.. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light and
Magic.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.coins()
coin = image[170:220, 75:130]
result = match_template(image, coin)
ij = np.unravel_index(np.argmax(result), result.shape)
x, y = ij[::-1]
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
ax1.imshow(coin)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
hcoin, wcoin = coin.shape
rect = plt.Rectangle((x, y), wcoin, hcoin, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
ax3.imshow(result)
ax3.set_axis_off()
ax3.set_title('`match_template`\nresult')
# highlight matched region
ax3.autoscale(False)
ax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
|
<commit_before><commit_msg>Add new example plot for `match_template`.<commit_after>"""
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on a single coin). Here, we
return a single match (the exact same coin), so the maximum value in the
``match_template`` result corresponds to the coin location. The other coins
look similar, and thus have local maxima; if you expect multiple matches, you
should use a proper peak-finding function.
The ``match_template`` function uses fast, normalized cross-correlation [1]_
to find instances of the template in the image. Note that the peaks in the
output of ``match_template`` correspond to the origin (i.e. top-left corner) of
the template.
.. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light and
Magic.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.coins()
coin = image[170:220, 75:130]
result = match_template(image, coin)
ij = np.unravel_index(np.argmax(result), result.shape)
x, y = ij[::-1]
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
ax1.imshow(coin)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
hcoin, wcoin = coin.shape
rect = plt.Rectangle((x, y), wcoin, hcoin, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
ax3.imshow(result)
ax3.set_axis_off()
ax3.set_title('`match_template`\nresult')
# highlight matched region
ax3.autoscale(False)
ax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
|
|
9b5f070705de9896c8c6f8347dc0f733ae748793
|
harvesting_blog_data.py
|
harvesting_blog_data.py
|
import os
import sys
import json
import feedparser
from bs4 import BeautifulSoup
FEED_URL = 'http://g1.globo.com/dynamo/rss2.xml'
def cleanHtml(html):
return BeautifulSoup(html, 'lxml').get_text()
fp = feedparser.parse(FEED_URL)
print "Fetched %s entries from '%s'" % (len(fp.entries[0].title), fp.feed.title)
blog_posts = []
for e in fp.entries:
blog_posts.append({'title': e.title,
'published': e.published,
'summary': cleanHtml(e.summary),
'link': e.link})
out_file = os.path.join('./', 'feed.json')
f = open(out_file, 'w')
f.write(json.dumps(blog_posts, indent=1))
f.close()
print 'Wrote output file to %s' % (f.name, )
|
Add harvesting blog data example
|
Add harvesting blog data example
|
Python
|
apache-2.0
|
fabriciojoc/redes-sociais-web,fabriciojoc/redes-sociais-web
|
Add harvesting blog data example
|
import os
import sys
import json
import feedparser
from bs4 import BeautifulSoup
FEED_URL = 'http://g1.globo.com/dynamo/rss2.xml'
def cleanHtml(html):
return BeautifulSoup(html, 'lxml').get_text()
fp = feedparser.parse(FEED_URL)
print "Fetched %s entries from '%s'" % (len(fp.entries[0].title), fp.feed.title)
blog_posts = []
for e in fp.entries:
blog_posts.append({'title': e.title,
'published': e.published,
'summary': cleanHtml(e.summary),
'link': e.link})
out_file = os.path.join('./', 'feed.json')
f = open(out_file, 'w')
f.write(json.dumps(blog_posts, indent=1))
f.close()
print 'Wrote output file to %s' % (f.name, )
|
<commit_before><commit_msg>Add harvesting blog data example<commit_after>
|
import os
import sys
import json
import feedparser
from bs4 import BeautifulSoup
FEED_URL = 'http://g1.globo.com/dynamo/rss2.xml'
def cleanHtml(html):
return BeautifulSoup(html, 'lxml').get_text()
fp = feedparser.parse(FEED_URL)
print "Fetched %s entries from '%s'" % (len(fp.entries[0].title), fp.feed.title)
blog_posts = []
for e in fp.entries:
blog_posts.append({'title': e.title,
'published': e.published,
'summary': cleanHtml(e.summary),
'link': e.link})
out_file = os.path.join('./', 'feed.json')
f = open(out_file, 'w')
f.write(json.dumps(blog_posts, indent=1))
f.close()
print 'Wrote output file to %s' % (f.name, )
|
Add harvesting blog data exampleimport os
import sys
import json
import feedparser
from bs4 import BeautifulSoup
FEED_URL = 'http://g1.globo.com/dynamo/rss2.xml'
def cleanHtml(html):
return BeautifulSoup(html, 'lxml').get_text()
fp = feedparser.parse(FEED_URL)
print "Fetched %s entries from '%s'" % (len(fp.entries[0].title), fp.feed.title)
blog_posts = []
for e in fp.entries:
blog_posts.append({'title': e.title,
'published': e.published,
'summary': cleanHtml(e.summary),
'link': e.link})
out_file = os.path.join('./', 'feed.json')
f = open(out_file, 'w')
f.write(json.dumps(blog_posts, indent=1))
f.close()
print 'Wrote output file to %s' % (f.name, )
|
<commit_before><commit_msg>Add harvesting blog data example<commit_after>import os
import sys
import json
import feedparser
from bs4 import BeautifulSoup
FEED_URL = 'http://g1.globo.com/dynamo/rss2.xml'
def cleanHtml(html):
return BeautifulSoup(html, 'lxml').get_text()
fp = feedparser.parse(FEED_URL)
print "Fetched %s entries from '%s'" % (len(fp.entries[0].title), fp.feed.title)
blog_posts = []
for e in fp.entries:
blog_posts.append({'title': e.title,
'published': e.published,
'summary': cleanHtml(e.summary),
'link': e.link})
out_file = os.path.join('./', 'feed.json')
f = open(out_file, 'w')
f.write(json.dumps(blog_posts, indent=1))
f.close()
print 'Wrote output file to %s' % (f.name, )
|
|
42f66ea6e1921040d6e3055c41372b02511e6a5a
|
tests/CYK/__init__.py
|
tests/CYK/__init__.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 14:50
:Licence GNUv3
Part of pyparsers
"""
|
Add directory for CYK tests
|
Add directory for CYK tests
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add directory for CYK tests
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 14:50
:Licence GNUv3
Part of pyparsers
"""
|
<commit_before><commit_msg>Add directory for CYK tests<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 14:50
:Licence GNUv3
Part of pyparsers
"""
|
Add directory for CYK tests#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 14:50
:Licence GNUv3
Part of pyparsers
"""
|
<commit_before><commit_msg>Add directory for CYK tests<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 14:50
:Licence GNUv3
Part of pyparsers
"""
|
|
4ae114dd1da8118cc9d2ee87e30f5e0a1f3324f2
|
tests/test_monitor.py
|
tests/test_monitor.py
|
import unittest
import Monitors.monitor
class TestMonitor(unittest.TestCase):
safe_config = {'partition': '/', 'limit': '10G'}
one_KB = 1024
one_MB = one_KB * 1024
one_GB = one_MB * 1024
one_TB = one_GB * 1024
def test_MonitorInit(self):
m = Monitors.monitor.Monitor(config_options={
'depend': 'a, b',
'urgent': 0,
'tolerance': 2,
'remote_alert': 1,
'recover_command': 'true'
})
self.assertEqual(m.name, 'unnamed', 'Monitor did not set name')
self.assertEqual(m.urgent, 0, 'Monitor did not set urgent')
self.assertEqual(m.tolerance, 2, 'Monitor did not set tolerance')
self.assertTrue(m.remote_alerting, 'Monitor did not set remote_alerting')
self.assertEqual(m.recover_command, 'true', 'Monitor did not set recover_command')
def test_MonitorSuccess(self):
m = Monitors.monitor.Monitor()
m.record_success('yay')
self.assertEqual(m.get_error_count(), 0, 'Error count is not 0')
self.assertEqual(m.get_success_count(), 1, 'Success count is not 1')
self.assertEqual(m.tests_run, 1, 'Tests run is not 1')
self.assertFalse(m.was_skipped, 'was_skipped is not false')
self.assertEqual(m.last_result, 'yay', 'Last result is not correct')
def test_MonitorFail(self):
m = Monitors.monitor.Monitor()
m.record_fail('boo')
self.assertEqual(m.get_error_count(), 1, 'Error count is not 1')
self.assertEqual(m.get_success_count(), 0, 'Success count is not 0')
self.assertEqual(m.tests_run, 1, 'Tests run is not 1')
self.assertFalse(m.was_skipped, 'was_skipped is not false')
self.assertEqual(m.last_result, 'boo', 'Last result is not correct')
def test_MonitorWindows(self):
m = Monitors.monitor.Monitor()
self.assertFalse(m.is_windows())
def test_MonitorSkip(self):
m = Monitors.monitor.Monitor()
m.record_skip('a')
self.assertEqual(m.get_success_count(), 1, 'Success count is not 1')
self.assertTrue(m.was_skipped, 'was_skipped is not true')
self.assertEqual(m.skip_dep, 'a', 'skip_dep is not correct')
self.assertTrue(m.skipped(), 'skipped() is not true')
|
Add some tests for monitor class
|
Add some tests for monitor class
|
Python
|
bsd-3-clause
|
jamesoff/simplemonitor,jamesoff/simplemonitor,jamesoff/simplemonitor,jamesoff/simplemonitor,jamesoff/simplemonitor
|
Add some tests for monitor class
|
import unittest
import Monitors.monitor
class TestMonitor(unittest.TestCase):
safe_config = {'partition': '/', 'limit': '10G'}
one_KB = 1024
one_MB = one_KB * 1024
one_GB = one_MB * 1024
one_TB = one_GB * 1024
def test_MonitorInit(self):
m = Monitors.monitor.Monitor(config_options={
'depend': 'a, b',
'urgent': 0,
'tolerance': 2,
'remote_alert': 1,
'recover_command': 'true'
})
self.assertEqual(m.name, 'unnamed', 'Monitor did not set name')
self.assertEqual(m.urgent, 0, 'Monitor did not set urgent')
self.assertEqual(m.tolerance, 2, 'Monitor did not set tolerance')
self.assertTrue(m.remote_alerting, 'Monitor did not set remote_alerting')
self.assertEqual(m.recover_command, 'true', 'Monitor did not set recover_command')
def test_MonitorSuccess(self):
m = Monitors.monitor.Monitor()
m.record_success('yay')
self.assertEqual(m.get_error_count(), 0, 'Error count is not 0')
self.assertEqual(m.get_success_count(), 1, 'Success count is not 1')
self.assertEqual(m.tests_run, 1, 'Tests run is not 1')
self.assertFalse(m.was_skipped, 'was_skipped is not false')
self.assertEqual(m.last_result, 'yay', 'Last result is not correct')
def test_MonitorFail(self):
m = Monitors.monitor.Monitor()
m.record_fail('boo')
self.assertEqual(m.get_error_count(), 1, 'Error count is not 1')
self.assertEqual(m.get_success_count(), 0, 'Success count is not 0')
self.assertEqual(m.tests_run, 1, 'Tests run is not 1')
self.assertFalse(m.was_skipped, 'was_skipped is not false')
self.assertEqual(m.last_result, 'boo', 'Last result is not correct')
def test_MonitorWindows(self):
m = Monitors.monitor.Monitor()
self.assertFalse(m.is_windows())
def test_MonitorSkip(self):
m = Monitors.monitor.Monitor()
m.record_skip('a')
self.assertEqual(m.get_success_count(), 1, 'Success count is not 1')
self.assertTrue(m.was_skipped, 'was_skipped is not true')
self.assertEqual(m.skip_dep, 'a', 'skip_dep is not correct')
self.assertTrue(m.skipped(), 'skipped() is not true')
|
<commit_before><commit_msg>Add some tests for monitor class<commit_after>
|
import unittest
import Monitors.monitor
class TestMonitor(unittest.TestCase):
safe_config = {'partition': '/', 'limit': '10G'}
one_KB = 1024
one_MB = one_KB * 1024
one_GB = one_MB * 1024
one_TB = one_GB * 1024
def test_MonitorInit(self):
m = Monitors.monitor.Monitor(config_options={
'depend': 'a, b',
'urgent': 0,
'tolerance': 2,
'remote_alert': 1,
'recover_command': 'true'
})
self.assertEqual(m.name, 'unnamed', 'Monitor did not set name')
self.assertEqual(m.urgent, 0, 'Monitor did not set urgent')
self.assertEqual(m.tolerance, 2, 'Monitor did not set tolerance')
self.assertTrue(m.remote_alerting, 'Monitor did not set remote_alerting')
self.assertEqual(m.recover_command, 'true', 'Monitor did not set recover_command')
def test_MonitorSuccess(self):
m = Monitors.monitor.Monitor()
m.record_success('yay')
self.assertEqual(m.get_error_count(), 0, 'Error count is not 0')
self.assertEqual(m.get_success_count(), 1, 'Success count is not 1')
self.assertEqual(m.tests_run, 1, 'Tests run is not 1')
self.assertFalse(m.was_skipped, 'was_skipped is not false')
self.assertEqual(m.last_result, 'yay', 'Last result is not correct')
def test_MonitorFail(self):
m = Monitors.monitor.Monitor()
m.record_fail('boo')
self.assertEqual(m.get_error_count(), 1, 'Error count is not 1')
self.assertEqual(m.get_success_count(), 0, 'Success count is not 0')
self.assertEqual(m.tests_run, 1, 'Tests run is not 1')
self.assertFalse(m.was_skipped, 'was_skipped is not false')
self.assertEqual(m.last_result, 'boo', 'Last result is not correct')
def test_MonitorWindows(self):
m = Monitors.monitor.Monitor()
self.assertFalse(m.is_windows())
def test_MonitorSkip(self):
m = Monitors.monitor.Monitor()
m.record_skip('a')
self.assertEqual(m.get_success_count(), 1, 'Success count is not 1')
self.assertTrue(m.was_skipped, 'was_skipped is not true')
self.assertEqual(m.skip_dep, 'a', 'skip_dep is not correct')
self.assertTrue(m.skipped(), 'skipped() is not true')
|
Add some tests for monitor classimport unittest
import Monitors.monitor
class TestMonitor(unittest.TestCase):
safe_config = {'partition': '/', 'limit': '10G'}
one_KB = 1024
one_MB = one_KB * 1024
one_GB = one_MB * 1024
one_TB = one_GB * 1024
def test_MonitorInit(self):
m = Monitors.monitor.Monitor(config_options={
'depend': 'a, b',
'urgent': 0,
'tolerance': 2,
'remote_alert': 1,
'recover_command': 'true'
})
self.assertEqual(m.name, 'unnamed', 'Monitor did not set name')
self.assertEqual(m.urgent, 0, 'Monitor did not set urgent')
self.assertEqual(m.tolerance, 2, 'Monitor did not set tolerance')
self.assertTrue(m.remote_alerting, 'Monitor did not set remote_alerting')
self.assertEqual(m.recover_command, 'true', 'Monitor did not set recover_command')
def test_MonitorSuccess(self):
m = Monitors.monitor.Monitor()
m.record_success('yay')
self.assertEqual(m.get_error_count(), 0, 'Error count is not 0')
self.assertEqual(m.get_success_count(), 1, 'Success count is not 1')
self.assertEqual(m.tests_run, 1, 'Tests run is not 1')
self.assertFalse(m.was_skipped, 'was_skipped is not false')
self.assertEqual(m.last_result, 'yay', 'Last result is not correct')
def test_MonitorFail(self):
m = Monitors.monitor.Monitor()
m.record_fail('boo')
self.assertEqual(m.get_error_count(), 1, 'Error count is not 1')
self.assertEqual(m.get_success_count(), 0, 'Success count is not 0')
self.assertEqual(m.tests_run, 1, 'Tests run is not 1')
self.assertFalse(m.was_skipped, 'was_skipped is not false')
self.assertEqual(m.last_result, 'boo', 'Last result is not correct')
def test_MonitorWindows(self):
m = Monitors.monitor.Monitor()
self.assertFalse(m.is_windows())
def test_MonitorSkip(self):
m = Monitors.monitor.Monitor()
m.record_skip('a')
self.assertEqual(m.get_success_count(), 1, 'Success count is not 1')
self.assertTrue(m.was_skipped, 'was_skipped is not true')
self.assertEqual(m.skip_dep, 'a', 'skip_dep is not correct')
self.assertTrue(m.skipped(), 'skipped() is not true')
|
<commit_before><commit_msg>Add some tests for monitor class<commit_after>import unittest
import Monitors.monitor
class TestMonitor(unittest.TestCase):
safe_config = {'partition': '/', 'limit': '10G'}
one_KB = 1024
one_MB = one_KB * 1024
one_GB = one_MB * 1024
one_TB = one_GB * 1024
def test_MonitorInit(self):
m = Monitors.monitor.Monitor(config_options={
'depend': 'a, b',
'urgent': 0,
'tolerance': 2,
'remote_alert': 1,
'recover_command': 'true'
})
self.assertEqual(m.name, 'unnamed', 'Monitor did not set name')
self.assertEqual(m.urgent, 0, 'Monitor did not set urgent')
self.assertEqual(m.tolerance, 2, 'Monitor did not set tolerance')
self.assertTrue(m.remote_alerting, 'Monitor did not set remote_alerting')
self.assertEqual(m.recover_command, 'true', 'Monitor did not set recover_command')
def test_MonitorSuccess(self):
m = Monitors.monitor.Monitor()
m.record_success('yay')
self.assertEqual(m.get_error_count(), 0, 'Error count is not 0')
self.assertEqual(m.get_success_count(), 1, 'Success count is not 1')
self.assertEqual(m.tests_run, 1, 'Tests run is not 1')
self.assertFalse(m.was_skipped, 'was_skipped is not false')
self.assertEqual(m.last_result, 'yay', 'Last result is not correct')
def test_MonitorFail(self):
m = Monitors.monitor.Monitor()
m.record_fail('boo')
self.assertEqual(m.get_error_count(), 1, 'Error count is not 1')
self.assertEqual(m.get_success_count(), 0, 'Success count is not 0')
self.assertEqual(m.tests_run, 1, 'Tests run is not 1')
self.assertFalse(m.was_skipped, 'was_skipped is not false')
self.assertEqual(m.last_result, 'boo', 'Last result is not correct')
def test_MonitorWindows(self):
m = Monitors.monitor.Monitor()
self.assertFalse(m.is_windows())
def test_MonitorSkip(self):
m = Monitors.monitor.Monitor()
m.record_skip('a')
self.assertEqual(m.get_success_count(), 1, 'Success count is not 1')
self.assertTrue(m.was_skipped, 'was_skipped is not true')
self.assertEqual(m.skip_dep, 'a', 'skip_dep is not correct')
self.assertTrue(m.skipped(), 'skipped() is not true')
|
|
f9b2bba394ad6ce31ffae5cf6ccf445dc280ba95
|
solutions/beecrowd/2486/2486.py
|
solutions/beecrowd/2486/2486.py
|
import sys
MIN_VITAMIN_C = 110
MAX_VITAMIN_C = 130
vitamin_c_catalogue = {
'suco de laranja': 120,
'morango fresco': 85,
'mamao': 85,
'goiaba vermelha': 70,
'manga': 56,
'laranja': 50,
'brocolis': 34,
}
for test in sys.stdin:
t = int(test)
if not t:
break
total_c_vitamin = 0
for _ in range(t):
line = input()
n, food = line.split(' ', 1)
n = int(n)
total_c_vitamin += n * vitamin_c_catalogue[food]
if total_c_vitamin < MIN_VITAMIN_C:
print(f'Mais {MIN_VITAMIN_C - total_c_vitamin} mg')
elif total_c_vitamin > MAX_VITAMIN_C:
print(f'Menos {total_c_vitamin - MAX_VITAMIN_C} mg')
else:
print(f'{total_c_vitamin} mg')
|
Solve C Mais ou Menos? in python
|
Solve C Mais ou Menos? in python
|
Python
|
mit
|
deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground
|
Solve C Mais ou Menos? in python
|
import sys
MIN_VITAMIN_C = 110
MAX_VITAMIN_C = 130
vitamin_c_catalogue = {
'suco de laranja': 120,
'morango fresco': 85,
'mamao': 85,
'goiaba vermelha': 70,
'manga': 56,
'laranja': 50,
'brocolis': 34,
}
for test in sys.stdin:
t = int(test)
if not t:
break
total_c_vitamin = 0
for _ in range(t):
line = input()
n, food = line.split(' ', 1)
n = int(n)
total_c_vitamin += n * vitamin_c_catalogue[food]
if total_c_vitamin < MIN_VITAMIN_C:
print(f'Mais {MIN_VITAMIN_C - total_c_vitamin} mg')
elif total_c_vitamin > MAX_VITAMIN_C:
print(f'Menos {total_c_vitamin - MAX_VITAMIN_C} mg')
else:
print(f'{total_c_vitamin} mg')
|
<commit_before><commit_msg>Solve C Mais ou Menos? in python<commit_after>
|
import sys
MIN_VITAMIN_C = 110
MAX_VITAMIN_C = 130
vitamin_c_catalogue = {
'suco de laranja': 120,
'morango fresco': 85,
'mamao': 85,
'goiaba vermelha': 70,
'manga': 56,
'laranja': 50,
'brocolis': 34,
}
for test in sys.stdin:
t = int(test)
if not t:
break
total_c_vitamin = 0
for _ in range(t):
line = input()
n, food = line.split(' ', 1)
n = int(n)
total_c_vitamin += n * vitamin_c_catalogue[food]
if total_c_vitamin < MIN_VITAMIN_C:
print(f'Mais {MIN_VITAMIN_C - total_c_vitamin} mg')
elif total_c_vitamin > MAX_VITAMIN_C:
print(f'Menos {total_c_vitamin - MAX_VITAMIN_C} mg')
else:
print(f'{total_c_vitamin} mg')
|
Solve C Mais ou Menos? in pythonimport sys
MIN_VITAMIN_C = 110
MAX_VITAMIN_C = 130
vitamin_c_catalogue = {
'suco de laranja': 120,
'morango fresco': 85,
'mamao': 85,
'goiaba vermelha': 70,
'manga': 56,
'laranja': 50,
'brocolis': 34,
}
for test in sys.stdin:
t = int(test)
if not t:
break
total_c_vitamin = 0
for _ in range(t):
line = input()
n, food = line.split(' ', 1)
n = int(n)
total_c_vitamin += n * vitamin_c_catalogue[food]
if total_c_vitamin < MIN_VITAMIN_C:
print(f'Mais {MIN_VITAMIN_C - total_c_vitamin} mg')
elif total_c_vitamin > MAX_VITAMIN_C:
print(f'Menos {total_c_vitamin - MAX_VITAMIN_C} mg')
else:
print(f'{total_c_vitamin} mg')
|
<commit_before><commit_msg>Solve C Mais ou Menos? in python<commit_after>import sys
MIN_VITAMIN_C = 110
MAX_VITAMIN_C = 130
vitamin_c_catalogue = {
'suco de laranja': 120,
'morango fresco': 85,
'mamao': 85,
'goiaba vermelha': 70,
'manga': 56,
'laranja': 50,
'brocolis': 34,
}
for test in sys.stdin:
t = int(test)
if not t:
break
total_c_vitamin = 0
for _ in range(t):
line = input()
n, food = line.split(' ', 1)
n = int(n)
total_c_vitamin += n * vitamin_c_catalogue[food]
if total_c_vitamin < MIN_VITAMIN_C:
print(f'Mais {MIN_VITAMIN_C - total_c_vitamin} mg')
elif total_c_vitamin > MAX_VITAMIN_C:
print(f'Menos {total_c_vitamin - MAX_VITAMIN_C} mg')
else:
print(f'{total_c_vitamin} mg')
|
|
27fca35a08278a44bb7ba693f222c6c182061872
|
Enemy.py
|
Enemy.py
|
import pygame
class Enemy(pygame.sprite.Sprite):
def __init__(self, x, y):
super().__init__()
self.image = pygame.image.load("images/enemy.png").convert_alpha()
self.rect = self.image.get_rect(center=(x, y))
def
|
Add the enemy file and start it up.
|
Add the enemy file and start it up.
|
Python
|
mit
|
di1111/mlg-fite
|
Add the enemy file and start it up.
|
import pygame
class Enemy(pygame.sprite.Sprite):
def __init__(self, x, y):
super().__init__()
self.image = pygame.image.load("images/enemy.png").convert_alpha()
self.rect = self.image.get_rect(center=(x, y))
def
|
<commit_before><commit_msg>Add the enemy file and start it up.<commit_after>
|
import pygame
class Enemy(pygame.sprite.Sprite):
def __init__(self, x, y):
super().__init__()
self.image = pygame.image.load("images/enemy.png").convert_alpha()
self.rect = self.image.get_rect(center=(x, y))
def
|
Add the enemy file and start it up.import pygame
class Enemy(pygame.sprite.Sprite):
def __init__(self, x, y):
super().__init__()
self.image = pygame.image.load("images/enemy.png").convert_alpha()
self.rect = self.image.get_rect(center=(x, y))
def
|
<commit_before><commit_msg>Add the enemy file and start it up.<commit_after>import pygame
class Enemy(pygame.sprite.Sprite):
def __init__(self, x, y):
super().__init__()
self.image = pygame.image.load("images/enemy.png").convert_alpha()
self.rect = self.image.get_rect(center=(x, y))
def
|
|
982cd61d7532365d9de56b308c7a4d8308302c15
|
tests/testapp/tests/test_model_create_with_generic.py
|
tests/testapp/tests/test_model_create_with_generic.py
|
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
# Django 1.6
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.test import TestCase
from django_fsm import FSMField, transition
class Ticket(models.Model):
class Meta:
app_label = 'testapp'
class Task(models.Model):
class STATE:
NEW = 'new'
DONE = 'done'
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
causality = GenericForeignKey('content_type', 'object_id')
state = FSMField(default=STATE.NEW)
@transition(field=state, source=STATE.NEW, target=STATE.DONE)
def do(self):
pass
class Meta:
app_label = 'testapp'
class Test(TestCase):
def setUp(self):
self.ticket = Ticket.objects.create()
def test_model_objects_create(self):
"""Check a model with state field can be created
if one of the other fields is a property or a virtual field.
"""
Task.objects.create(causality=self.ticket)
|
Add a test to demonstrate issue with django 1.11
|
Add a test to demonstrate issue with django 1.11
If model with state field has other fields that access their field value
via a property or a Virtal Field, then creation of instances will fail.
|
Python
|
mit
|
kmmbvnr/django-fsm,kmmbvnr/django-fsm
|
Add a test to demonstrate issue with django 1.11
If model with state field has other fields that access their field value
via a property or a Virtal Field, then creation of instances will fail.
|
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
# Django 1.6
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.test import TestCase
from django_fsm import FSMField, transition
class Ticket(models.Model):
class Meta:
app_label = 'testapp'
class Task(models.Model):
class STATE:
NEW = 'new'
DONE = 'done'
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
causality = GenericForeignKey('content_type', 'object_id')
state = FSMField(default=STATE.NEW)
@transition(field=state, source=STATE.NEW, target=STATE.DONE)
def do(self):
pass
class Meta:
app_label = 'testapp'
class Test(TestCase):
def setUp(self):
self.ticket = Ticket.objects.create()
def test_model_objects_create(self):
"""Check a model with state field can be created
if one of the other fields is a property or a virtual field.
"""
Task.objects.create(causality=self.ticket)
|
<commit_before><commit_msg>Add a test to demonstrate issue with django 1.11
If model with state field has other fields that access their field value
via a property or a Virtal Field, then creation of instances will fail.<commit_after>
|
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
# Django 1.6
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.test import TestCase
from django_fsm import FSMField, transition
class Ticket(models.Model):
class Meta:
app_label = 'testapp'
class Task(models.Model):
class STATE:
NEW = 'new'
DONE = 'done'
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
causality = GenericForeignKey('content_type', 'object_id')
state = FSMField(default=STATE.NEW)
@transition(field=state, source=STATE.NEW, target=STATE.DONE)
def do(self):
pass
class Meta:
app_label = 'testapp'
class Test(TestCase):
def setUp(self):
self.ticket = Ticket.objects.create()
def test_model_objects_create(self):
"""Check a model with state field can be created
if one of the other fields is a property or a virtual field.
"""
Task.objects.create(causality=self.ticket)
|
Add a test to demonstrate issue with django 1.11
If model with state field has other fields that access their field value
via a property or a Virtal Field, then creation of instances will fail.try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
# Django 1.6
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.test import TestCase
from django_fsm import FSMField, transition
class Ticket(models.Model):
class Meta:
app_label = 'testapp'
class Task(models.Model):
class STATE:
NEW = 'new'
DONE = 'done'
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
causality = GenericForeignKey('content_type', 'object_id')
state = FSMField(default=STATE.NEW)
@transition(field=state, source=STATE.NEW, target=STATE.DONE)
def do(self):
pass
class Meta:
app_label = 'testapp'
class Test(TestCase):
def setUp(self):
self.ticket = Ticket.objects.create()
def test_model_objects_create(self):
"""Check a model with state field can be created
if one of the other fields is a property or a virtual field.
"""
Task.objects.create(causality=self.ticket)
|
<commit_before><commit_msg>Add a test to demonstrate issue with django 1.11
If model with state field has other fields that access their field value
via a property or a Virtal Field, then creation of instances will fail.<commit_after>try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
# Django 1.6
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.test import TestCase
from django_fsm import FSMField, transition
class Ticket(models.Model):
class Meta:
app_label = 'testapp'
class Task(models.Model):
class STATE:
NEW = 'new'
DONE = 'done'
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
causality = GenericForeignKey('content_type', 'object_id')
state = FSMField(default=STATE.NEW)
@transition(field=state, source=STATE.NEW, target=STATE.DONE)
def do(self):
pass
class Meta:
app_label = 'testapp'
class Test(TestCase):
def setUp(self):
self.ticket = Ticket.objects.create()
def test_model_objects_create(self):
"""Check a model with state field can be created
if one of the other fields is a property or a virtual field.
"""
Task.objects.create(causality=self.ticket)
|
|
6e0f585a8f8433d4f6800cb1f093f97f8a1d4ff7
|
imageutils/__init__.py
|
imageutils/__init__.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Image processing utilities for Astropy.
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import *
# ----------------------------------------------------------------------------
# For egg_info test builds to pass, put package imports here.
if not _ASTROPY_SETUP_:
from .scale_img import *
from .array_utils import *
from .sampling import *
__all__ = ['find_imgcuts', 'img_stats', 'rescale_img', 'scale_linear',
'scale_sqrt', 'scale_power', 'scale_log', 'scale_asinh',
'downsample', 'upsample', 'extract_array_2d', 'add_array_2d',
'subpixel_indices', 'fix_prf_nan']
|
Update imports for new functions
|
Update imports for new functions
|
Python
|
bsd-3-clause
|
mhvk/astropy,saimn/astropy,funbaker/astropy,bsipocz/astropy,tbabej/astropy,pllim/astropy,stargaser/astropy,AustereCuriosity/astropy,aleksandr-bakanov/astropy,dhomeier/astropy,funbaker/astropy,kelle/astropy,saimn/astropy,larrybradley/astropy,lpsinger/astropy,pllim/astropy,dhomeier/astropy,bsipocz/astropy,larrybradley/astropy,lpsinger/astropy,pllim/astropy,MSeifert04/astropy,astropy/astropy,mhvk/astropy,aleksandr-bakanov/astropy,MSeifert04/astropy,tbabej/astropy,tbabej/astropy,tbabej/astropy,saimn/astropy,stargaser/astropy,joergdietrich/astropy,astropy/astropy,kelle/astropy,kelle/astropy,dhomeier/astropy,MSeifert04/astropy,mhvk/astropy,StuartLittlefair/astropy,saimn/astropy,StuartLittlefair/astropy,lpsinger/astropy,larrybradley/astropy,tbabej/astropy,DougBurke/astropy,AustereCuriosity/astropy,dhomeier/astropy,lpsinger/astropy,AustereCuriosity/astropy,DougBurke/astropy,lpsinger/astropy,StuartLittlefair/astropy,aleksandr-bakanov/astropy,kelle/astropy,funbaker/astropy,DougBurke/astropy,joergdietrich/astropy,larrybradley/astropy,aleksandr-bakanov/astropy,astropy/astropy,joergdietrich/astropy,StuartLittlefair/astropy,mhvk/astropy,bsipocz/astropy,joergdietrich/astropy,AustereCuriosity/astropy,DougBurke/astropy,dhomeier/astropy,pllim/astropy,AustereCuriosity/astropy,bsipocz/astropy,funbaker/astropy,pllim/astropy,saimn/astropy,astropy/astropy,mhvk/astropy,kelle/astropy,stargaser/astropy,astropy/astropy,larrybradley/astropy,stargaser/astropy,MSeifert04/astropy,StuartLittlefair/astropy,joergdietrich/astropy
|
Update imports for new functions
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Image processing utilities for Astropy.
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import *
# ----------------------------------------------------------------------------
# For egg_info test builds to pass, put package imports here.
if not _ASTROPY_SETUP_:
from .scale_img import *
from .array_utils import *
from .sampling import *
__all__ = ['find_imgcuts', 'img_stats', 'rescale_img', 'scale_linear',
'scale_sqrt', 'scale_power', 'scale_log', 'scale_asinh',
'downsample', 'upsample', 'extract_array_2d', 'add_array_2d',
'subpixel_indices', 'fix_prf_nan']
|
<commit_before><commit_msg>Update imports for new functions<commit_after>
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Image processing utilities for Astropy.
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import *
# ----------------------------------------------------------------------------
# For egg_info test builds to pass, put package imports here.
if not _ASTROPY_SETUP_:
from .scale_img import *
from .array_utils import *
from .sampling import *
__all__ = ['find_imgcuts', 'img_stats', 'rescale_img', 'scale_linear',
'scale_sqrt', 'scale_power', 'scale_log', 'scale_asinh',
'downsample', 'upsample', 'extract_array_2d', 'add_array_2d',
'subpixel_indices', 'fix_prf_nan']
|
Update imports for new functions# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Image processing utilities for Astropy.
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import *
# ----------------------------------------------------------------------------
# For egg_info test builds to pass, put package imports here.
if not _ASTROPY_SETUP_:
from .scale_img import *
from .array_utils import *
from .sampling import *
__all__ = ['find_imgcuts', 'img_stats', 'rescale_img', 'scale_linear',
'scale_sqrt', 'scale_power', 'scale_log', 'scale_asinh',
'downsample', 'upsample', 'extract_array_2d', 'add_array_2d',
'subpixel_indices', 'fix_prf_nan']
|
<commit_before><commit_msg>Update imports for new functions<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Image processing utilities for Astropy.
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import *
# ----------------------------------------------------------------------------
# For egg_info test builds to pass, put package imports here.
if not _ASTROPY_SETUP_:
from .scale_img import *
from .array_utils import *
from .sampling import *
__all__ = ['find_imgcuts', 'img_stats', 'rescale_img', 'scale_linear',
'scale_sqrt', 'scale_power', 'scale_log', 'scale_asinh',
'downsample', 'upsample', 'extract_array_2d', 'add_array_2d',
'subpixel_indices', 'fix_prf_nan']
|
|
54b94346d2669347cf2a9a2b24df6b657cf80c5b
|
nisl/mask.py
|
nisl/mask.py
|
import numpy as np
from scipy import ndimage
###############################################################################
# Operating on connect component
###############################################################################
def largest_cc(mask):
""" Return the largest connected component of a 3D mask array.
Parameters
-----------
mask: 3D boolean array
3D array indicating a mask.
Returns
--------
mask: 3D boolean array
3D array indicating a mask, with only one connected component.
"""
# We use asarray to be able to work with masked arrays.
mask = np.asarray(mask)
labels, label_nb = ndimage.label(mask)
if not label_nb:
raise ValueError('No non-zero values: no connected components')
if label_nb == 1:
return mask.astype(np.bool)
label_count = np.bincount(labels.ravel())
# discard 0 the 0 label
label_count[0] = 0
return labels == label_count.argmax()
###############################################################################
# Utilities to calculate masks
###############################################################################
def compute_mask(mean_volume, m=0.2, M=0.9, cc=True,
exclude_zeros=False):
"""
Compute a mask file from fMRI data in 3D or 4D ndarrays.
Compute and write the mask of an image based on the grey level
This is based on an heuristic proposed by T.Nichols:
find the least dense point of the histogram, between fractions
m and M of the total image histogram.
In case of failure, it is usually advisable to increase m.
Parameters
----------
mean_volume : 3D ndarray
mean EPI image, used to compute the threshold for the mask.
m : float, optional
lower fraction of the histogram to be discarded.
M: float, optional
upper fraction of the histogram to be discarded.
cc: boolean, optional
if cc is True, only the largest connect component is kept.
exclude_zeros: boolean, optional
Consider zeros as missing values for the computation of the
threshold. This option is useful if the images have been
resliced with a large padding of zeros.
Returns
-------
mask : 3D boolean ndarray
The brain mask
"""
sorted_input = np.sort(mean_volume.reshape(-1))
if exclude_zeros:
sorted_input = sorted_input[sorted_input != 0]
limite_inf = np.floor(m * len(sorted_input))
limite_sup = np.floor(M * len(sorted_input))
delta = sorted_input[limite_inf + 1:limite_sup + 1] \
- sorted_input[limite_inf:limite_sup]
ia = delta.argmax()
threshold = 0.5 * (sorted_input[ia + limite_inf]
+ sorted_input[ia + limite_inf + 1])
mask = (mean_volume >= threshold)
if cc:
mask = largest_cc(mask)
return mask.astype(bool)
|
Mask computation utilities (from nipy).
|
Mask computation utilities (from nipy).
|
Python
|
bsd-3-clause
|
abenicho/isvr
|
Mask computation utilities (from nipy).
|
import numpy as np
from scipy import ndimage
###############################################################################
# Operating on connect component
###############################################################################
def largest_cc(mask):
""" Return the largest connected component of a 3D mask array.
Parameters
-----------
mask: 3D boolean array
3D array indicating a mask.
Returns
--------
mask: 3D boolean array
3D array indicating a mask, with only one connected component.
"""
# We use asarray to be able to work with masked arrays.
mask = np.asarray(mask)
labels, label_nb = ndimage.label(mask)
if not label_nb:
raise ValueError('No non-zero values: no connected components')
if label_nb == 1:
return mask.astype(np.bool)
label_count = np.bincount(labels.ravel())
# discard 0 the 0 label
label_count[0] = 0
return labels == label_count.argmax()
###############################################################################
# Utilities to calculate masks
###############################################################################
def compute_mask(mean_volume, m=0.2, M=0.9, cc=True,
exclude_zeros=False):
"""
Compute a mask file from fMRI data in 3D or 4D ndarrays.
Compute and write the mask of an image based on the grey level
This is based on an heuristic proposed by T.Nichols:
find the least dense point of the histogram, between fractions
m and M of the total image histogram.
In case of failure, it is usually advisable to increase m.
Parameters
----------
mean_volume : 3D ndarray
mean EPI image, used to compute the threshold for the mask.
m : float, optional
lower fraction of the histogram to be discarded.
M: float, optional
upper fraction of the histogram to be discarded.
cc: boolean, optional
if cc is True, only the largest connect component is kept.
exclude_zeros: boolean, optional
Consider zeros as missing values for the computation of the
threshold. This option is useful if the images have been
resliced with a large padding of zeros.
Returns
-------
mask : 3D boolean ndarray
The brain mask
"""
sorted_input = np.sort(mean_volume.reshape(-1))
if exclude_zeros:
sorted_input = sorted_input[sorted_input != 0]
limite_inf = np.floor(m * len(sorted_input))
limite_sup = np.floor(M * len(sorted_input))
delta = sorted_input[limite_inf + 1:limite_sup + 1] \
- sorted_input[limite_inf:limite_sup]
ia = delta.argmax()
threshold = 0.5 * (sorted_input[ia + limite_inf]
+ sorted_input[ia + limite_inf + 1])
mask = (mean_volume >= threshold)
if cc:
mask = largest_cc(mask)
return mask.astype(bool)
|
<commit_before><commit_msg>Mask computation utilities (from nipy).<commit_after>
|
import numpy as np
from scipy import ndimage
###############################################################################
# Operating on connect component
###############################################################################
def largest_cc(mask):
""" Return the largest connected component of a 3D mask array.
Parameters
-----------
mask: 3D boolean array
3D array indicating a mask.
Returns
--------
mask: 3D boolean array
3D array indicating a mask, with only one connected component.
"""
# We use asarray to be able to work with masked arrays.
mask = np.asarray(mask)
labels, label_nb = ndimage.label(mask)
if not label_nb:
raise ValueError('No non-zero values: no connected components')
if label_nb == 1:
return mask.astype(np.bool)
label_count = np.bincount(labels.ravel())
# discard 0 the 0 label
label_count[0] = 0
return labels == label_count.argmax()
###############################################################################
# Utilities to calculate masks
###############################################################################
def compute_mask(mean_volume, m=0.2, M=0.9, cc=True,
exclude_zeros=False):
"""
Compute a mask file from fMRI data in 3D or 4D ndarrays.
Compute and write the mask of an image based on the grey level
This is based on an heuristic proposed by T.Nichols:
find the least dense point of the histogram, between fractions
m and M of the total image histogram.
In case of failure, it is usually advisable to increase m.
Parameters
----------
mean_volume : 3D ndarray
mean EPI image, used to compute the threshold for the mask.
m : float, optional
lower fraction of the histogram to be discarded.
M: float, optional
upper fraction of the histogram to be discarded.
cc: boolean, optional
if cc is True, only the largest connect component is kept.
exclude_zeros: boolean, optional
Consider zeros as missing values for the computation of the
threshold. This option is useful if the images have been
resliced with a large padding of zeros.
Returns
-------
mask : 3D boolean ndarray
The brain mask
"""
sorted_input = np.sort(mean_volume.reshape(-1))
if exclude_zeros:
sorted_input = sorted_input[sorted_input != 0]
limite_inf = np.floor(m * len(sorted_input))
limite_sup = np.floor(M * len(sorted_input))
delta = sorted_input[limite_inf + 1:limite_sup + 1] \
- sorted_input[limite_inf:limite_sup]
ia = delta.argmax()
threshold = 0.5 * (sorted_input[ia + limite_inf]
+ sorted_input[ia + limite_inf + 1])
mask = (mean_volume >= threshold)
if cc:
mask = largest_cc(mask)
return mask.astype(bool)
|
Mask computation utilities (from nipy).import numpy as np
from scipy import ndimage
###############################################################################
# Operating on connect component
###############################################################################
def largest_cc(mask):
""" Return the largest connected component of a 3D mask array.
Parameters
-----------
mask: 3D boolean array
3D array indicating a mask.
Returns
--------
mask: 3D boolean array
3D array indicating a mask, with only one connected component.
"""
# We use asarray to be able to work with masked arrays.
mask = np.asarray(mask)
labels, label_nb = ndimage.label(mask)
if not label_nb:
raise ValueError('No non-zero values: no connected components')
if label_nb == 1:
return mask.astype(np.bool)
label_count = np.bincount(labels.ravel())
# discard 0 the 0 label
label_count[0] = 0
return labels == label_count.argmax()
###############################################################################
# Utilities to calculate masks
###############################################################################
def compute_mask(mean_volume, m=0.2, M=0.9, cc=True,
exclude_zeros=False):
"""
Compute a mask file from fMRI data in 3D or 4D ndarrays.
Compute and write the mask of an image based on the grey level
This is based on an heuristic proposed by T.Nichols:
find the least dense point of the histogram, between fractions
m and M of the total image histogram.
In case of failure, it is usually advisable to increase m.
Parameters
----------
mean_volume : 3D ndarray
mean EPI image, used to compute the threshold for the mask.
m : float, optional
lower fraction of the histogram to be discarded.
M: float, optional
upper fraction of the histogram to be discarded.
cc: boolean, optional
if cc is True, only the largest connect component is kept.
exclude_zeros: boolean, optional
Consider zeros as missing values for the computation of the
threshold. This option is useful if the images have been
resliced with a large padding of zeros.
Returns
-------
mask : 3D boolean ndarray
The brain mask
"""
sorted_input = np.sort(mean_volume.reshape(-1))
if exclude_zeros:
sorted_input = sorted_input[sorted_input != 0]
limite_inf = np.floor(m * len(sorted_input))
limite_sup = np.floor(M * len(sorted_input))
delta = sorted_input[limite_inf + 1:limite_sup + 1] \
- sorted_input[limite_inf:limite_sup]
ia = delta.argmax()
threshold = 0.5 * (sorted_input[ia + limite_inf]
+ sorted_input[ia + limite_inf + 1])
mask = (mean_volume >= threshold)
if cc:
mask = largest_cc(mask)
return mask.astype(bool)
|
<commit_before><commit_msg>Mask computation utilities (from nipy).<commit_after>import numpy as np
from scipy import ndimage
###############################################################################
# Operating on connect component
###############################################################################
def largest_cc(mask):
""" Return the largest connected component of a 3D mask array.
Parameters
-----------
mask: 3D boolean array
3D array indicating a mask.
Returns
--------
mask: 3D boolean array
3D array indicating a mask, with only one connected component.
"""
# We use asarray to be able to work with masked arrays.
mask = np.asarray(mask)
labels, label_nb = ndimage.label(mask)
if not label_nb:
raise ValueError('No non-zero values: no connected components')
if label_nb == 1:
return mask.astype(np.bool)
label_count = np.bincount(labels.ravel())
# discard 0 the 0 label
label_count[0] = 0
return labels == label_count.argmax()
###############################################################################
# Utilities to calculate masks
###############################################################################
def compute_mask(mean_volume, m=0.2, M=0.9, cc=True,
exclude_zeros=False):
"""
Compute a mask file from fMRI data in 3D or 4D ndarrays.
Compute and write the mask of an image based on the grey level
This is based on an heuristic proposed by T.Nichols:
find the least dense point of the histogram, between fractions
m and M of the total image histogram.
In case of failure, it is usually advisable to increase m.
Parameters
----------
mean_volume : 3D ndarray
mean EPI image, used to compute the threshold for the mask.
m : float, optional
lower fraction of the histogram to be discarded.
M: float, optional
upper fraction of the histogram to be discarded.
cc: boolean, optional
if cc is True, only the largest connect component is kept.
exclude_zeros: boolean, optional
Consider zeros as missing values for the computation of the
threshold. This option is useful if the images have been
resliced with a large padding of zeros.
Returns
-------
mask : 3D boolean ndarray
The brain mask
"""
sorted_input = np.sort(mean_volume.reshape(-1))
if exclude_zeros:
sorted_input = sorted_input[sorted_input != 0]
limite_inf = np.floor(m * len(sorted_input))
limite_sup = np.floor(M * len(sorted_input))
delta = sorted_input[limite_inf + 1:limite_sup + 1] \
- sorted_input[limite_inf:limite_sup]
ia = delta.argmax()
threshold = 0.5 * (sorted_input[ia + limite_inf]
+ sorted_input[ia + limite_inf + 1])
mask = (mean_volume >= threshold)
if cc:
mask = largest_cc(mask)
return mask.astype(bool)
|
|
f333b9c5741a7ffbf49caa0a6130831a834b944f
|
test_dotfiles.py
|
test_dotfiles.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import tempfile
import unittest
from dotfiles import core
def touch(fname, times=None):
with file(fname, 'a'):
os.utime(fname, times)
class DotfilesTestCase(unittest.TestCase):
def setUp(self):
"""Create a temporary home directory."""
self.home = tempfile.mkdtemp()
# create a repository for the tests to use
self.repo = os.path.join(self.home, 'Dotfiles')
os.mkdir(self.repo)
def tearDown(self):
"""Delete the temporary home directory and its contents."""
shutil.rmtree(self.home)
def test_force_sync_directory(self):
"""Test forced sync when the dotfile is a directory.
I installed the lastpass chrome extension which stores a socket in
~/.lastpass. So I added that directory as an external to /tmp and
attempted a forced sync. An error occurred because sync() calls
os.remove() as it mistakenly assumes the dotfile is a file and not
a directory.
"""
os.mkdir(os.path.join(self.home, '.lastpass'))
externals = {'.lastpass': '/tmp'}
dotfiles = core.Dotfiles(home=self.home, repo=self.repo, prefix='',
ignore=[], externals=externals)
dotfiles.sync(force=True)
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.lastpass')), '/tmp')
def test_move_repository(self):
"""Test the move() method for a Dotfiles repository."""
touch(os.path.join(self.repo, 'bashrc'))
dotfiles = core.Dotfiles(
home=self.home, repo=self.repo, prefix='',
ignore=[], force=True, externals={})
dotfiles.sync()
# make sure sync() did the right thing
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.bashrc')),
os.path.join(self.repo, 'bashrc'))
target = os.path.join(self.home, 'MyDotfiles')
dotfiles.move(target)
self.assertTrue(os.path.exists(os.path.join(target, 'bashrc')))
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.bashrc')),
os.path.join(target, 'bashrc'))
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(DotfilesTestCase)
return suite
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
Add unit tests for recent bugfix and move operation
|
Add unit tests for recent bugfix and move operation
|
Python
|
isc
|
Bklyn/dotfiles,aparente/Dotfiles,nilehmann/dotfiles-1,aparente/Dotfiles,aparente/Dotfiles,aparente/Dotfiles
|
Add unit tests for recent bugfix and move operation
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import tempfile
import unittest
from dotfiles import core
def touch(fname, times=None):
with file(fname, 'a'):
os.utime(fname, times)
class DotfilesTestCase(unittest.TestCase):
def setUp(self):
"""Create a temporary home directory."""
self.home = tempfile.mkdtemp()
# create a repository for the tests to use
self.repo = os.path.join(self.home, 'Dotfiles')
os.mkdir(self.repo)
def tearDown(self):
"""Delete the temporary home directory and its contents."""
shutil.rmtree(self.home)
def test_force_sync_directory(self):
"""Test forced sync when the dotfile is a directory.
I installed the lastpass chrome extension which stores a socket in
~/.lastpass. So I added that directory as an external to /tmp and
attempted a forced sync. An error occurred because sync() calls
os.remove() as it mistakenly assumes the dotfile is a file and not
a directory.
"""
os.mkdir(os.path.join(self.home, '.lastpass'))
externals = {'.lastpass': '/tmp'}
dotfiles = core.Dotfiles(home=self.home, repo=self.repo, prefix='',
ignore=[], externals=externals)
dotfiles.sync(force=True)
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.lastpass')), '/tmp')
def test_move_repository(self):
"""Test the move() method for a Dotfiles repository."""
touch(os.path.join(self.repo, 'bashrc'))
dotfiles = core.Dotfiles(
home=self.home, repo=self.repo, prefix='',
ignore=[], force=True, externals={})
dotfiles.sync()
# make sure sync() did the right thing
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.bashrc')),
os.path.join(self.repo, 'bashrc'))
target = os.path.join(self.home, 'MyDotfiles')
dotfiles.move(target)
self.assertTrue(os.path.exists(os.path.join(target, 'bashrc')))
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.bashrc')),
os.path.join(target, 'bashrc'))
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(DotfilesTestCase)
return suite
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
<commit_before><commit_msg>Add unit tests for recent bugfix and move operation<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import tempfile
import unittest
from dotfiles import core
def touch(fname, times=None):
with file(fname, 'a'):
os.utime(fname, times)
class DotfilesTestCase(unittest.TestCase):
def setUp(self):
"""Create a temporary home directory."""
self.home = tempfile.mkdtemp()
# create a repository for the tests to use
self.repo = os.path.join(self.home, 'Dotfiles')
os.mkdir(self.repo)
def tearDown(self):
"""Delete the temporary home directory and its contents."""
shutil.rmtree(self.home)
def test_force_sync_directory(self):
"""Test forced sync when the dotfile is a directory.
I installed the lastpass chrome extension which stores a socket in
~/.lastpass. So I added that directory as an external to /tmp and
attempted a forced sync. An error occurred because sync() calls
os.remove() as it mistakenly assumes the dotfile is a file and not
a directory.
"""
os.mkdir(os.path.join(self.home, '.lastpass'))
externals = {'.lastpass': '/tmp'}
dotfiles = core.Dotfiles(home=self.home, repo=self.repo, prefix='',
ignore=[], externals=externals)
dotfiles.sync(force=True)
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.lastpass')), '/tmp')
def test_move_repository(self):
"""Test the move() method for a Dotfiles repository."""
touch(os.path.join(self.repo, 'bashrc'))
dotfiles = core.Dotfiles(
home=self.home, repo=self.repo, prefix='',
ignore=[], force=True, externals={})
dotfiles.sync()
# make sure sync() did the right thing
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.bashrc')),
os.path.join(self.repo, 'bashrc'))
target = os.path.join(self.home, 'MyDotfiles')
dotfiles.move(target)
self.assertTrue(os.path.exists(os.path.join(target, 'bashrc')))
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.bashrc')),
os.path.join(target, 'bashrc'))
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(DotfilesTestCase)
return suite
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
Add unit tests for recent bugfix and move operation#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import tempfile
import unittest
from dotfiles import core
def touch(fname, times=None):
with file(fname, 'a'):
os.utime(fname, times)
class DotfilesTestCase(unittest.TestCase):
def setUp(self):
"""Create a temporary home directory."""
self.home = tempfile.mkdtemp()
# create a repository for the tests to use
self.repo = os.path.join(self.home, 'Dotfiles')
os.mkdir(self.repo)
def tearDown(self):
"""Delete the temporary home directory and its contents."""
shutil.rmtree(self.home)
def test_force_sync_directory(self):
"""Test forced sync when the dotfile is a directory.
I installed the lastpass chrome extension which stores a socket in
~/.lastpass. So I added that directory as an external to /tmp and
attempted a forced sync. An error occurred because sync() calls
os.remove() as it mistakenly assumes the dotfile is a file and not
a directory.
"""
os.mkdir(os.path.join(self.home, '.lastpass'))
externals = {'.lastpass': '/tmp'}
dotfiles = core.Dotfiles(home=self.home, repo=self.repo, prefix='',
ignore=[], externals=externals)
dotfiles.sync(force=True)
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.lastpass')), '/tmp')
def test_move_repository(self):
"""Test the move() method for a Dotfiles repository."""
touch(os.path.join(self.repo, 'bashrc'))
dotfiles = core.Dotfiles(
home=self.home, repo=self.repo, prefix='',
ignore=[], force=True, externals={})
dotfiles.sync()
# make sure sync() did the right thing
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.bashrc')),
os.path.join(self.repo, 'bashrc'))
target = os.path.join(self.home, 'MyDotfiles')
dotfiles.move(target)
self.assertTrue(os.path.exists(os.path.join(target, 'bashrc')))
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.bashrc')),
os.path.join(target, 'bashrc'))
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(DotfilesTestCase)
return suite
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
<commit_before><commit_msg>Add unit tests for recent bugfix and move operation<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import tempfile
import unittest
from dotfiles import core
def touch(fname, times=None):
with file(fname, 'a'):
os.utime(fname, times)
class DotfilesTestCase(unittest.TestCase):
def setUp(self):
"""Create a temporary home directory."""
self.home = tempfile.mkdtemp()
# create a repository for the tests to use
self.repo = os.path.join(self.home, 'Dotfiles')
os.mkdir(self.repo)
def tearDown(self):
"""Delete the temporary home directory and its contents."""
shutil.rmtree(self.home)
def test_force_sync_directory(self):
"""Test forced sync when the dotfile is a directory.
I installed the lastpass chrome extension which stores a socket in
~/.lastpass. So I added that directory as an external to /tmp and
attempted a forced sync. An error occurred because sync() calls
os.remove() as it mistakenly assumes the dotfile is a file and not
a directory.
"""
os.mkdir(os.path.join(self.home, '.lastpass'))
externals = {'.lastpass': '/tmp'}
dotfiles = core.Dotfiles(home=self.home, repo=self.repo, prefix='',
ignore=[], externals=externals)
dotfiles.sync(force=True)
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.lastpass')), '/tmp')
def test_move_repository(self):
"""Test the move() method for a Dotfiles repository."""
touch(os.path.join(self.repo, 'bashrc'))
dotfiles = core.Dotfiles(
home=self.home, repo=self.repo, prefix='',
ignore=[], force=True, externals={})
dotfiles.sync()
# make sure sync() did the right thing
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.bashrc')),
os.path.join(self.repo, 'bashrc'))
target = os.path.join(self.home, 'MyDotfiles')
dotfiles.move(target)
self.assertTrue(os.path.exists(os.path.join(target, 'bashrc')))
self.assertEqual(
os.path.realpath(os.path.join(self.home, '.bashrc')),
os.path.join(target, 'bashrc'))
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(DotfilesTestCase)
return suite
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
|
2b0e13039dad8d116a5719540004bed317bb6960
|
tests/api/test_organizations.py
|
tests/api/test_organizations.py
|
# -*- coding: utf-8 -*-
"""pytest Licenses functions, fixtures and tests."""
import pytest
import ciscosparkapi
# Helper Functions
def list_organizations(api, max=None):
return list(api.organizations.list(max=max))
def get_organization_by_id(api, orgId):
return api.organizations.get(orgId)
def is_valid_organization(obj):
return isinstance(obj, ciscosparkapi.Organization) and obj.id is not None
def are_valid_organizations(iterable):
return all([is_valid_organization(obj) for obj in iterable])
# pytest Fixtures
@pytest.fixture(scope="session")
def organizations_list(api):
return list_organizations(api)
# Tests
class TestOrganizationsAPI(object):
"""Test OrganizationsAPI methods."""
def test_list_organizations(self, organizations_list):
assert are_valid_organizations(organizations_list)
def test_get_organization_by_id(self, api, organizations_list):
assert len(organizations_list) >= 1
org_id = organizations_list[0].id
org = get_organization_by_id(api, orgId=org_id)
assert is_valid_organization(org)
|
Add tests and fixtures for the Organizations API wrapper
|
Add tests and fixtures for the Organizations API wrapper
|
Python
|
mit
|
jbogarin/ciscosparkapi
|
Add tests and fixtures for the Organizations API wrapper
|
# -*- coding: utf-8 -*-
"""pytest Licenses functions, fixtures and tests."""
import pytest
import ciscosparkapi
# Helper Functions
def list_organizations(api, max=None):
return list(api.organizations.list(max=max))
def get_organization_by_id(api, orgId):
return api.organizations.get(orgId)
def is_valid_organization(obj):
return isinstance(obj, ciscosparkapi.Organization) and obj.id is not None
def are_valid_organizations(iterable):
return all([is_valid_organization(obj) for obj in iterable])
# pytest Fixtures
@pytest.fixture(scope="session")
def organizations_list(api):
return list_organizations(api)
# Tests
class TestOrganizationsAPI(object):
"""Test OrganizationsAPI methods."""
def test_list_organizations(self, organizations_list):
assert are_valid_organizations(organizations_list)
def test_get_organization_by_id(self, api, organizations_list):
assert len(organizations_list) >= 1
org_id = organizations_list[0].id
org = get_organization_by_id(api, orgId=org_id)
assert is_valid_organization(org)
|
<commit_before><commit_msg>Add tests and fixtures for the Organizations API wrapper<commit_after>
|
# -*- coding: utf-8 -*-
"""pytest Licenses functions, fixtures and tests."""
import pytest
import ciscosparkapi
# Helper Functions
def list_organizations(api, max=None):
return list(api.organizations.list(max=max))
def get_organization_by_id(api, orgId):
return api.organizations.get(orgId)
def is_valid_organization(obj):
return isinstance(obj, ciscosparkapi.Organization) and obj.id is not None
def are_valid_organizations(iterable):
return all([is_valid_organization(obj) for obj in iterable])
# pytest Fixtures
@pytest.fixture(scope="session")
def organizations_list(api):
return list_organizations(api)
# Tests
class TestOrganizationsAPI(object):
"""Test OrganizationsAPI methods."""
def test_list_organizations(self, organizations_list):
assert are_valid_organizations(organizations_list)
def test_get_organization_by_id(self, api, organizations_list):
assert len(organizations_list) >= 1
org_id = organizations_list[0].id
org = get_organization_by_id(api, orgId=org_id)
assert is_valid_organization(org)
|
Add tests and fixtures for the Organizations API wrapper# -*- coding: utf-8 -*-
"""pytest Licenses functions, fixtures and tests."""
import pytest
import ciscosparkapi
# Helper Functions
def list_organizations(api, max=None):
return list(api.organizations.list(max=max))
def get_organization_by_id(api, orgId):
return api.organizations.get(orgId)
def is_valid_organization(obj):
return isinstance(obj, ciscosparkapi.Organization) and obj.id is not None
def are_valid_organizations(iterable):
return all([is_valid_organization(obj) for obj in iterable])
# pytest Fixtures
@pytest.fixture(scope="session")
def organizations_list(api):
return list_organizations(api)
# Tests
class TestOrganizationsAPI(object):
"""Test OrganizationsAPI methods."""
def test_list_organizations(self, organizations_list):
assert are_valid_organizations(organizations_list)
def test_get_organization_by_id(self, api, organizations_list):
assert len(organizations_list) >= 1
org_id = organizations_list[0].id
org = get_organization_by_id(api, orgId=org_id)
assert is_valid_organization(org)
|
<commit_before><commit_msg>Add tests and fixtures for the Organizations API wrapper<commit_after># -*- coding: utf-8 -*-
"""pytest Licenses functions, fixtures and tests."""
import pytest
import ciscosparkapi
# Helper Functions
def list_organizations(api, max=None):
return list(api.organizations.list(max=max))
def get_organization_by_id(api, orgId):
return api.organizations.get(orgId)
def is_valid_organization(obj):
return isinstance(obj, ciscosparkapi.Organization) and obj.id is not None
def are_valid_organizations(iterable):
return all([is_valid_organization(obj) for obj in iterable])
# pytest Fixtures
@pytest.fixture(scope="session")
def organizations_list(api):
return list_organizations(api)
# Tests
class TestOrganizationsAPI(object):
"""Test OrganizationsAPI methods."""
def test_list_organizations(self, organizations_list):
assert are_valid_organizations(organizations_list)
def test_get_organization_by_id(self, api, organizations_list):
assert len(organizations_list) >= 1
org_id = organizations_list[0].id
org = get_organization_by_id(api, orgId=org_id)
assert is_valid_organization(org)
|
|
5d3918c885f430e79e8283533ad5eb3a84ffecc7
|
blazar/db/migration/alembic_migrations/versions/75a74e4539cb_update_lease_status.py
|
blazar/db/migration/alembic_migrations/versions/75a74e4539cb_update_lease_status.py
|
# Copyright 2018 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""update lease status
Revision ID: 75a74e4539cb
Revises: e66f199a5414
Create Date: 2018-01-23 11:05:56.753579
"""
# revision identifiers, used by Alembic.
revision = '75a74e4539cb'
down_revision = 'e66f199a5414'
from blazar.db import api as db_api
from blazar.status import LeaseStatus as ls
def upgrade():
leases = db_api.lease_get_all()
for lease in leases:
db_api.lease_update(lease['id'],
{'status': ls.derive_stable_status(lease['id'])})
def downgrade():
leases = db_api.lease_get_all()
for lease in leases:
db_api.lease_update(lease['id'],
{'status': None})
|
Add migration code for updating lease status
|
Add migration code for updating lease status
Lease status was None before the status module was introduced.
This patch is for updating the None status.
Partially Implements: blueprint state-machine
Change-Id: I64cc07737d5c1c83a4f91d485a21c9a459305b9a
|
Python
|
apache-2.0
|
openstack/blazar,stackforge/blazar,openstack/blazar,ChameleonCloud/blazar,ChameleonCloud/blazar,stackforge/blazar
|
Add migration code for updating lease status
Lease status was None before the status module was introduced.
This patch is for updating the None status.
Partially Implements: blueprint state-machine
Change-Id: I64cc07737d5c1c83a4f91d485a21c9a459305b9a
|
# Copyright 2018 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""update lease status
Revision ID: 75a74e4539cb
Revises: e66f199a5414
Create Date: 2018-01-23 11:05:56.753579
"""
# revision identifiers, used by Alembic.
revision = '75a74e4539cb'
down_revision = 'e66f199a5414'
from blazar.db import api as db_api
from blazar.status import LeaseStatus as ls
def upgrade():
leases = db_api.lease_get_all()
for lease in leases:
db_api.lease_update(lease['id'],
{'status': ls.derive_stable_status(lease['id'])})
def downgrade():
leases = db_api.lease_get_all()
for lease in leases:
db_api.lease_update(lease['id'],
{'status': None})
|
<commit_before><commit_msg>Add migration code for updating lease status
Lease status was None before the status module was introduced.
This patch is for updating the None status.
Partially Implements: blueprint state-machine
Change-Id: I64cc07737d5c1c83a4f91d485a21c9a459305b9a<commit_after>
|
# Copyright 2018 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""update lease status
Revision ID: 75a74e4539cb
Revises: e66f199a5414
Create Date: 2018-01-23 11:05:56.753579
"""
# revision identifiers, used by Alembic.
revision = '75a74e4539cb'
down_revision = 'e66f199a5414'
from blazar.db import api as db_api
from blazar.status import LeaseStatus as ls
def upgrade():
leases = db_api.lease_get_all()
for lease in leases:
db_api.lease_update(lease['id'],
{'status': ls.derive_stable_status(lease['id'])})
def downgrade():
leases = db_api.lease_get_all()
for lease in leases:
db_api.lease_update(lease['id'],
{'status': None})
|
Add migration code for updating lease status
Lease status was None before the status module was introduced.
This patch is for updating the None status.
Partially Implements: blueprint state-machine
Change-Id: I64cc07737d5c1c83a4f91d485a21c9a459305b9a# Copyright 2018 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""update lease status
Revision ID: 75a74e4539cb
Revises: e66f199a5414
Create Date: 2018-01-23 11:05:56.753579
"""
# revision identifiers, used by Alembic.
revision = '75a74e4539cb'
down_revision = 'e66f199a5414'
from blazar.db import api as db_api
from blazar.status import LeaseStatus as ls
def upgrade():
leases = db_api.lease_get_all()
for lease in leases:
db_api.lease_update(lease['id'],
{'status': ls.derive_stable_status(lease['id'])})
def downgrade():
leases = db_api.lease_get_all()
for lease in leases:
db_api.lease_update(lease['id'],
{'status': None})
|
<commit_before><commit_msg>Add migration code for updating lease status
Lease status was None before the status module was introduced.
This patch is for updating the None status.
Partially Implements: blueprint state-machine
Change-Id: I64cc07737d5c1c83a4f91d485a21c9a459305b9a<commit_after># Copyright 2018 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""update lease status
Revision ID: 75a74e4539cb
Revises: e66f199a5414
Create Date: 2018-01-23 11:05:56.753579
"""
# revision identifiers, used by Alembic.
revision = '75a74e4539cb'
down_revision = 'e66f199a5414'
from blazar.db import api as db_api
from blazar.status import LeaseStatus as ls
def upgrade():
leases = db_api.lease_get_all()
for lease in leases:
db_api.lease_update(lease['id'],
{'status': ls.derive_stable_status(lease['id'])})
def downgrade():
leases = db_api.lease_get_all()
for lease in leases:
db_api.lease_update(lease['id'],
{'status': None})
|
|
6279341682ae45a228302972dbd106a2e44e0b12
|
examples/example_test.py
|
examples/example_test.py
|
import unittest
from flask import Flask
from flask_json import json_response, FlaskJSON, JsonTestResponse
def our_app():
app = Flask(__name__)
app.test_value = 0
FlaskJSON(app)
@app.route('/increment')
def increment():
app.test_value += 1
return json_response(value=app.test_value)
return app
class OurAppTestCase(unittest.TestCase):
def setUp(self):
self.app = our_app()
self.app.config['TESTING'] = True
# We have to change response class manually since TESTING flag is
# set after Flask-JSON initialization.
self.app.response_class = JsonTestResponse
self.client = self.app.test_client()
def test_app(self):
r = self.client.get('/increment')
# Here is how we can access to JSON.
assert 'value' in r.json
assert r.json['value'] == 1
if __name__ == '__main__':
unittest.main()
|
Add example usage of the JsonTestResponse.
|
Add example usage of the JsonTestResponse.
|
Python
|
bsd-3-clause
|
craig552uk/flask-json
|
Add example usage of the JsonTestResponse.
|
import unittest
from flask import Flask
from flask_json import json_response, FlaskJSON, JsonTestResponse
def our_app():
app = Flask(__name__)
app.test_value = 0
FlaskJSON(app)
@app.route('/increment')
def increment():
app.test_value += 1
return json_response(value=app.test_value)
return app
class OurAppTestCase(unittest.TestCase):
def setUp(self):
self.app = our_app()
self.app.config['TESTING'] = True
# We have to change response class manually since TESTING flag is
# set after Flask-JSON initialization.
self.app.response_class = JsonTestResponse
self.client = self.app.test_client()
def test_app(self):
r = self.client.get('/increment')
# Here is how we can access to JSON.
assert 'value' in r.json
assert r.json['value'] == 1
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add example usage of the JsonTestResponse.<commit_after>
|
import unittest
from flask import Flask
from flask_json import json_response, FlaskJSON, JsonTestResponse
def our_app():
app = Flask(__name__)
app.test_value = 0
FlaskJSON(app)
@app.route('/increment')
def increment():
app.test_value += 1
return json_response(value=app.test_value)
return app
class OurAppTestCase(unittest.TestCase):
def setUp(self):
self.app = our_app()
self.app.config['TESTING'] = True
# We have to change response class manually since TESTING flag is
# set after Flask-JSON initialization.
self.app.response_class = JsonTestResponse
self.client = self.app.test_client()
def test_app(self):
r = self.client.get('/increment')
# Here is how we can access to JSON.
assert 'value' in r.json
assert r.json['value'] == 1
if __name__ == '__main__':
unittest.main()
|
Add example usage of the JsonTestResponse.import unittest
from flask import Flask
from flask_json import json_response, FlaskJSON, JsonTestResponse
def our_app():
app = Flask(__name__)
app.test_value = 0
FlaskJSON(app)
@app.route('/increment')
def increment():
app.test_value += 1
return json_response(value=app.test_value)
return app
class OurAppTestCase(unittest.TestCase):
def setUp(self):
self.app = our_app()
self.app.config['TESTING'] = True
# We have to change response class manually since TESTING flag is
# set after Flask-JSON initialization.
self.app.response_class = JsonTestResponse
self.client = self.app.test_client()
def test_app(self):
r = self.client.get('/increment')
# Here is how we can access to JSON.
assert 'value' in r.json
assert r.json['value'] == 1
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add example usage of the JsonTestResponse.<commit_after>import unittest
from flask import Flask
from flask_json import json_response, FlaskJSON, JsonTestResponse
def our_app():
app = Flask(__name__)
app.test_value = 0
FlaskJSON(app)
@app.route('/increment')
def increment():
app.test_value += 1
return json_response(value=app.test_value)
return app
class OurAppTestCase(unittest.TestCase):
def setUp(self):
self.app = our_app()
self.app.config['TESTING'] = True
# We have to change response class manually since TESTING flag is
# set after Flask-JSON initialization.
self.app.response_class = JsonTestResponse
self.client = self.app.test_client()
def test_app(self):
r = self.client.get('/increment')
# Here is how we can access to JSON.
assert 'value' in r.json
assert r.json['value'] == 1
if __name__ == '__main__':
unittest.main()
|
|
2427dbad4fc0cfe7685dc2767069748d37262796
|
movienamer/identify.py
|
movienamer/identify.py
|
import os.path as path
import re
import Levenshtein
from .sanitize import sanitize
from .tmdb import search
def _gather(filename, directory=None, titles={}):
# Sanitize the input filename
name, year = sanitize(filename)
# Start with a basic search
results = search(name, year)
if year is not None and len(results) == 0:
# If no results are found when year is present,
# allow a tolerance of 1 in the year
results = search(name, year + 1)
results = results + search(name, year - 1)
# Try to find a result with zero error and return
zero_distance_results = []
for i, result in enumerate(results):
distance = Levenshtein.distance(
unicode(re.sub('[^a-zA-Z0-9]', '', name.lower())),
unicode(re.sub('[^a-zA-Z0-9]', '', result['title'].lower()))
)
# Update the results with the distance
result['distance'] = distance
results[i]['distance'] = distance
# Update the results with year
result['with_year'] = (year is not None)
results[i]['with_year'] = (year is not None)
# Add count field to the result
result['count'] = 1
results[i]['count'] = 1
if distance == 0:
zero_distance_results.append(result)
if len(zero_distance_results) > 0:
# Directly return results with zero error
return zero_distance_results
if year is not None and len(results) > 0:
# Directly return results which were queried with year
return results
# If neither zero distance results are present nor is the year,
# accumulate results from directory one level up
if directory is not None:
dirname = directory.split('/')[-1]
results_from_directory = identify(dirname)
results_to_be_removed = []
# Increment count for all duplicate results
for i, r1 in enumerate(results):
for r2 in results_from_directory:
if r1['popularity'] == r2['popularity']:
# Check with popularity since title can be duplicate
results[i]['count'] += r2['count']
results_from_directory.remove(r2)
break
results = results + results_from_directory
return results
def identify(filename, directory=None):
results = _gather(filename, directory)
max_distance = 1 + max([result['distance'] for result in results])
return sorted(
results,
lambda r: (r['count'] ** 1.1) \
* ((max_distance - r['distance'])) \
* ((1 + r['with_year'])) \
* ((r['popularity'])),
reverse=True
)
|
Add initial version of identification algorithm
|
Add initial version of identification algorithm
|
Python
|
mit
|
divijbindlish/movienamer
|
Add initial version of identification algorithm
|
import os.path as path
import re
import Levenshtein
from .sanitize import sanitize
from .tmdb import search
def _gather(filename, directory=None, titles={}):
# Sanitize the input filename
name, year = sanitize(filename)
# Start with a basic search
results = search(name, year)
if year is not None and len(results) == 0:
# If no results are found when year is present,
# allow a tolerance of 1 in the year
results = search(name, year + 1)
results = results + search(name, year - 1)
# Try to find a result with zero error and return
zero_distance_results = []
for i, result in enumerate(results):
distance = Levenshtein.distance(
unicode(re.sub('[^a-zA-Z0-9]', '', name.lower())),
unicode(re.sub('[^a-zA-Z0-9]', '', result['title'].lower()))
)
# Update the results with the distance
result['distance'] = distance
results[i]['distance'] = distance
# Update the results with year
result['with_year'] = (year is not None)
results[i]['with_year'] = (year is not None)
# Add count field to the result
result['count'] = 1
results[i]['count'] = 1
if distance == 0:
zero_distance_results.append(result)
if len(zero_distance_results) > 0:
# Directly return results with zero error
return zero_distance_results
if year is not None and len(results) > 0:
# Directly return results which were queried with year
return results
# If neither zero distance results are present nor is the year,
# accumulate results from directory one level up
if directory is not None:
dirname = directory.split('/')[-1]
results_from_directory = identify(dirname)
results_to_be_removed = []
# Increment count for all duplicate results
for i, r1 in enumerate(results):
for r2 in results_from_directory:
if r1['popularity'] == r2['popularity']:
# Check with popularity since title can be duplicate
results[i]['count'] += r2['count']
results_from_directory.remove(r2)
break
results = results + results_from_directory
return results
def identify(filename, directory=None):
results = _gather(filename, directory)
max_distance = 1 + max([result['distance'] for result in results])
return sorted(
results,
lambda r: (r['count'] ** 1.1) \
* ((max_distance - r['distance'])) \
* ((1 + r['with_year'])) \
* ((r['popularity'])),
reverse=True
)
|
<commit_before><commit_msg>Add initial version of identification algorithm<commit_after>
|
import os.path as path
import re
import Levenshtein
from .sanitize import sanitize
from .tmdb import search
def _gather(filename, directory=None, titles={}):
# Sanitize the input filename
name, year = sanitize(filename)
# Start with a basic search
results = search(name, year)
if year is not None and len(results) == 0:
# If no results are found when year is present,
# allow a tolerance of 1 in the year
results = search(name, year + 1)
results = results + search(name, year - 1)
# Try to find a result with zero error and return
zero_distance_results = []
for i, result in enumerate(results):
distance = Levenshtein.distance(
unicode(re.sub('[^a-zA-Z0-9]', '', name.lower())),
unicode(re.sub('[^a-zA-Z0-9]', '', result['title'].lower()))
)
# Update the results with the distance
result['distance'] = distance
results[i]['distance'] = distance
# Update the results with year
result['with_year'] = (year is not None)
results[i]['with_year'] = (year is not None)
# Add count field to the result
result['count'] = 1
results[i]['count'] = 1
if distance == 0:
zero_distance_results.append(result)
if len(zero_distance_results) > 0:
# Directly return results with zero error
return zero_distance_results
if year is not None and len(results) > 0:
# Directly return results which were queried with year
return results
# If neither zero distance results are present nor is the year,
# accumulate results from directory one level up
if directory is not None:
dirname = directory.split('/')[-1]
results_from_directory = identify(dirname)
results_to_be_removed = []
# Increment count for all duplicate results
for i, r1 in enumerate(results):
for r2 in results_from_directory:
if r1['popularity'] == r2['popularity']:
# Check with popularity since title can be duplicate
results[i]['count'] += r2['count']
results_from_directory.remove(r2)
break
results = results + results_from_directory
return results
def identify(filename, directory=None):
results = _gather(filename, directory)
max_distance = 1 + max([result['distance'] for result in results])
return sorted(
results,
lambda r: (r['count'] ** 1.1) \
* ((max_distance - r['distance'])) \
* ((1 + r['with_year'])) \
* ((r['popularity'])),
reverse=True
)
|
Add initial version of identification algorithmimport os.path as path
import re
import Levenshtein
from .sanitize import sanitize
from .tmdb import search
def _gather(filename, directory=None, titles={}):
# Sanitize the input filename
name, year = sanitize(filename)
# Start with a basic search
results = search(name, year)
if year is not None and len(results) == 0:
# If no results are found when year is present,
# allow a tolerance of 1 in the year
results = search(name, year + 1)
results = results + search(name, year - 1)
# Try to find a result with zero error and return
zero_distance_results = []
for i, result in enumerate(results):
distance = Levenshtein.distance(
unicode(re.sub('[^a-zA-Z0-9]', '', name.lower())),
unicode(re.sub('[^a-zA-Z0-9]', '', result['title'].lower()))
)
# Update the results with the distance
result['distance'] = distance
results[i]['distance'] = distance
# Update the results with year
result['with_year'] = (year is not None)
results[i]['with_year'] = (year is not None)
# Add count field to the result
result['count'] = 1
results[i]['count'] = 1
if distance == 0:
zero_distance_results.append(result)
if len(zero_distance_results) > 0:
# Directly return results with zero error
return zero_distance_results
if year is not None and len(results) > 0:
# Directly return results which were queried with year
return results
# If neither zero distance results are present nor is the year,
# accumulate results from directory one level up
if directory is not None:
dirname = directory.split('/')[-1]
results_from_directory = identify(dirname)
results_to_be_removed = []
# Increment count for all duplicate results
for i, r1 in enumerate(results):
for r2 in results_from_directory:
if r1['popularity'] == r2['popularity']:
# Check with popularity since title can be duplicate
results[i]['count'] += r2['count']
results_from_directory.remove(r2)
break
results = results + results_from_directory
return results
def identify(filename, directory=None):
results = _gather(filename, directory)
max_distance = 1 + max([result['distance'] for result in results])
return sorted(
results,
lambda r: (r['count'] ** 1.1) \
* ((max_distance - r['distance'])) \
* ((1 + r['with_year'])) \
* ((r['popularity'])),
reverse=True
)
|
<commit_before><commit_msg>Add initial version of identification algorithm<commit_after>import os.path as path
import re
import Levenshtein
from .sanitize import sanitize
from .tmdb import search
def _gather(filename, directory=None, titles={}):
# Sanitize the input filename
name, year = sanitize(filename)
# Start with a basic search
results = search(name, year)
if year is not None and len(results) == 0:
# If no results are found when year is present,
# allow a tolerance of 1 in the year
results = search(name, year + 1)
results = results + search(name, year - 1)
# Try to find a result with zero error and return
zero_distance_results = []
for i, result in enumerate(results):
distance = Levenshtein.distance(
unicode(re.sub('[^a-zA-Z0-9]', '', name.lower())),
unicode(re.sub('[^a-zA-Z0-9]', '', result['title'].lower()))
)
# Update the results with the distance
result['distance'] = distance
results[i]['distance'] = distance
# Update the results with year
result['with_year'] = (year is not None)
results[i]['with_year'] = (year is not None)
# Add count field to the result
result['count'] = 1
results[i]['count'] = 1
if distance == 0:
zero_distance_results.append(result)
if len(zero_distance_results) > 0:
# Directly return results with zero error
return zero_distance_results
if year is not None and len(results) > 0:
# Directly return results which were queried with year
return results
# If neither zero distance results are present nor is the year,
# accumulate results from directory one level up
if directory is not None:
dirname = directory.split('/')[-1]
results_from_directory = identify(dirname)
results_to_be_removed = []
# Increment count for all duplicate results
for i, r1 in enumerate(results):
for r2 in results_from_directory:
if r1['popularity'] == r2['popularity']:
# Check with popularity since title can be duplicate
results[i]['count'] += r2['count']
results_from_directory.remove(r2)
break
results = results + results_from_directory
return results
def identify(filename, directory=None):
results = _gather(filename, directory)
max_distance = 1 + max([result['distance'] for result in results])
return sorted(
results,
lambda r: (r['count'] ** 1.1) \
* ((max_distance - r['distance'])) \
* ((1 + r['with_year'])) \
* ((r['popularity'])),
reverse=True
)
|
|
83e136a0e0d93d1dde4966322a3b51f453d0a1ba
|
tcflib/examples/csv_exporter.py
|
tcflib/examples/csv_exporter.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
from io import StringIO
from collections import OrderedDict
from tcflib.service import ExportingWorker, run_as_cli
class CSVExporter(ExportingWorker):
def export(self):
columns = OrderedDict()
columns['tokenID'] = [token.id for token in self.corpus.tokens]
columns['token'] = [token.text for token in self.corpus.tokens]
if hasattr(self.corpus, 'postags'):
columns['POStag'] = [token.tag for token in self.corpus.tokens]
if hasattr(self.corpus, 'lemmas'):
columns['lemma'] = [token.lemma for token in self.corpus.tokens]
if hasattr(self.corpus, 'wsd'):
columns['wordsenses'] = [', '.join(token.wordsenses)
for token in self.corpus.tokens]
if hasattr(self.corpus, 'namedentities'):
entities = []
for token in self.corpus.tokens:
if not token.entity:
entities.append('')
elif token == token.entity.tokens[0]:
entities.append('B-{}'.format(token.entity.class_))
else:
entities.append('I-{}'.format(token.entity.class_))
columns['NamedEntity'] = entities
# Write to CSV
with StringIO(newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(list(columns.keys()))
for row in zip(*columns.values()):
writer.writerow(row)
outstring = csvfile.getvalue()
return outstring.encode('utf-8')
if __name__ == '__main__':
run_as_cli(CSVExporter)
|
Add simple CSV exporter to examples.
|
Add simple CSV exporter to examples.
|
Python
|
mit
|
SeNeReKo/TCFlib
|
Add simple CSV exporter to examples.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
from io import StringIO
from collections import OrderedDict
from tcflib.service import ExportingWorker, run_as_cli
class CSVExporter(ExportingWorker):
def export(self):
columns = OrderedDict()
columns['tokenID'] = [token.id for token in self.corpus.tokens]
columns['token'] = [token.text for token in self.corpus.tokens]
if hasattr(self.corpus, 'postags'):
columns['POStag'] = [token.tag for token in self.corpus.tokens]
if hasattr(self.corpus, 'lemmas'):
columns['lemma'] = [token.lemma for token in self.corpus.tokens]
if hasattr(self.corpus, 'wsd'):
columns['wordsenses'] = [', '.join(token.wordsenses)
for token in self.corpus.tokens]
if hasattr(self.corpus, 'namedentities'):
entities = []
for token in self.corpus.tokens:
if not token.entity:
entities.append('')
elif token == token.entity.tokens[0]:
entities.append('B-{}'.format(token.entity.class_))
else:
entities.append('I-{}'.format(token.entity.class_))
columns['NamedEntity'] = entities
# Write to CSV
with StringIO(newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(list(columns.keys()))
for row in zip(*columns.values()):
writer.writerow(row)
outstring = csvfile.getvalue()
return outstring.encode('utf-8')
if __name__ == '__main__':
run_as_cli(CSVExporter)
|
<commit_before><commit_msg>Add simple CSV exporter to examples.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
from io import StringIO
from collections import OrderedDict
from tcflib.service import ExportingWorker, run_as_cli
class CSVExporter(ExportingWorker):
def export(self):
columns = OrderedDict()
columns['tokenID'] = [token.id for token in self.corpus.tokens]
columns['token'] = [token.text for token in self.corpus.tokens]
if hasattr(self.corpus, 'postags'):
columns['POStag'] = [token.tag for token in self.corpus.tokens]
if hasattr(self.corpus, 'lemmas'):
columns['lemma'] = [token.lemma for token in self.corpus.tokens]
if hasattr(self.corpus, 'wsd'):
columns['wordsenses'] = [', '.join(token.wordsenses)
for token in self.corpus.tokens]
if hasattr(self.corpus, 'namedentities'):
entities = []
for token in self.corpus.tokens:
if not token.entity:
entities.append('')
elif token == token.entity.tokens[0]:
entities.append('B-{}'.format(token.entity.class_))
else:
entities.append('I-{}'.format(token.entity.class_))
columns['NamedEntity'] = entities
# Write to CSV
with StringIO(newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(list(columns.keys()))
for row in zip(*columns.values()):
writer.writerow(row)
outstring = csvfile.getvalue()
return outstring.encode('utf-8')
if __name__ == '__main__':
run_as_cli(CSVExporter)
|
Add simple CSV exporter to examples.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
from io import StringIO
from collections import OrderedDict
from tcflib.service import ExportingWorker, run_as_cli
class CSVExporter(ExportingWorker):
def export(self):
columns = OrderedDict()
columns['tokenID'] = [token.id for token in self.corpus.tokens]
columns['token'] = [token.text for token in self.corpus.tokens]
if hasattr(self.corpus, 'postags'):
columns['POStag'] = [token.tag for token in self.corpus.tokens]
if hasattr(self.corpus, 'lemmas'):
columns['lemma'] = [token.lemma for token in self.corpus.tokens]
if hasattr(self.corpus, 'wsd'):
columns['wordsenses'] = [', '.join(token.wordsenses)
for token in self.corpus.tokens]
if hasattr(self.corpus, 'namedentities'):
entities = []
for token in self.corpus.tokens:
if not token.entity:
entities.append('')
elif token == token.entity.tokens[0]:
entities.append('B-{}'.format(token.entity.class_))
else:
entities.append('I-{}'.format(token.entity.class_))
columns['NamedEntity'] = entities
# Write to CSV
with StringIO(newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(list(columns.keys()))
for row in zip(*columns.values()):
writer.writerow(row)
outstring = csvfile.getvalue()
return outstring.encode('utf-8')
if __name__ == '__main__':
run_as_cli(CSVExporter)
|
<commit_before><commit_msg>Add simple CSV exporter to examples.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
from io import StringIO
from collections import OrderedDict
from tcflib.service import ExportingWorker, run_as_cli
class CSVExporter(ExportingWorker):
def export(self):
columns = OrderedDict()
columns['tokenID'] = [token.id for token in self.corpus.tokens]
columns['token'] = [token.text for token in self.corpus.tokens]
if hasattr(self.corpus, 'postags'):
columns['POStag'] = [token.tag for token in self.corpus.tokens]
if hasattr(self.corpus, 'lemmas'):
columns['lemma'] = [token.lemma for token in self.corpus.tokens]
if hasattr(self.corpus, 'wsd'):
columns['wordsenses'] = [', '.join(token.wordsenses)
for token in self.corpus.tokens]
if hasattr(self.corpus, 'namedentities'):
entities = []
for token in self.corpus.tokens:
if not token.entity:
entities.append('')
elif token == token.entity.tokens[0]:
entities.append('B-{}'.format(token.entity.class_))
else:
entities.append('I-{}'.format(token.entity.class_))
columns['NamedEntity'] = entities
# Write to CSV
with StringIO(newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(list(columns.keys()))
for row in zip(*columns.values()):
writer.writerow(row)
outstring = csvfile.getvalue()
return outstring.encode('utf-8')
if __name__ == '__main__':
run_as_cli(CSVExporter)
|
|
1c5fef3a34ed421610a4e9a38feb07e6545e5d13
|
tests/rules/test_dirty_untar.py
|
tests/rules/test_dirty_untar.py
|
import os
import pytest
import tarfile
from thefuck.rules.dirty_untar import match, get_new_command, side_effect
from tests.utils import Command
@pytest.fixture
def tar_error(tmpdir):
def fixture(filename):
path = os.path.join(str(tmpdir), filename)
def reset(path):
with tarfile.TarFile(path, 'w') as archive:
for file in ('a', 'b', 'c'):
with open(file, 'w') as f:
f.write('*')
archive.add(file)
os.remove(file)
with tarfile.TarFile(path, 'r') as archive:
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
assert(set(os.listdir('.')) == {filename, 'a', 'b', 'c'})
return fixture
parametrize_filename = pytest.mark.parametrize('filename', [
'foo.tar',
'foo.tar.gz',
'foo.tgz'])
parametrize_script = pytest.mark.parametrize('script, fixed', [
('tar xvf {}', 'mkdir -p foo && tar xvf {} -C foo'),
('tar -xvf {}', 'mkdir -p foo && tar -xvf {} -C foo'),
('tar --extract -f {}', 'mkdir -p foo && tar --extract -f {} -C foo')])
@parametrize_filename
@parametrize_script
def test_match(tar_error, filename, script, fixed):
tar_error(filename)
assert match(Command(script=script.format(filename)), None)
@parametrize_filename
@parametrize_script
def test_side_effect(tar_error, filename, script, fixed):
tar_error(filename)
side_effect(Command(script=script.format(filename)), None)
assert(os.listdir('.') == [filename])
@parametrize_filename
@parametrize_script
def test_get_new_command(tar_error, filename, script, fixed):
tar_error(filename)
assert get_new_command(Command(script=script.format(filename)), None) == fixed.format(filename)
|
Add tests for the `dirty_untar` rule
|
Add tests for the `dirty_untar` rule
|
Python
|
mit
|
PLNech/thefuck,AntonChankin/thefuck,SimenB/thefuck,gogobebe2/thefuck,BertieJim/thefuck,lawrencebenson/thefuck,barneyElDinosaurio/thefuck,vanita5/thefuck,mcarton/thefuck,redreamality/thefuck,subajat1/thefuck,mbbill/thefuck,vanita5/thefuck,qingying5810/thefuck,thinkerchan/thefuck,bigplus/thefuck,mlk/thefuck,AntonChankin/thefuck,SimenB/thefuck,subajat1/thefuck,MJerty/thefuck,ostree/thefuck,bugaevc/thefuck,princeofdarkness76/thefuck,Clpsplug/thefuck,manashmndl/thefuck,thesoulkiller/thefuck,LawrenceHan/thefuck,NguyenHoaiNam/thefuck,sekaiamber/thefuck,beni55/thefuck,mlk/thefuck,Clpsplug/thefuck,hxddh/thefuck,PLNech/thefuck,scorphus/thefuck,levythu/thefuck,barneyElDinosaurio/thefuck,nvbn/thefuck,Aeron/thefuck,bigplus/thefuck,thinkerchan/thefuck,manashmndl/thefuck,mcarton/thefuck,MJerty/thefuck,roth1002/thefuck,princeofdarkness76/thefuck,scorphus/thefuck,BertieJim/thefuck,beni55/thefuck,zhangzhishan/thefuck,hxddh/thefuck,lawrencebenson/thefuck,levythu/thefuck,LawrenceHan/thefuck,nvbn/thefuck,artiya4u/thefuck,thesoulkiller/thefuck,ostree/thefuck,roth1002/thefuck,redreamality/thefuck
|
Add tests for the `dirty_untar` rule
|
import os
import pytest
import tarfile
from thefuck.rules.dirty_untar import match, get_new_command, side_effect
from tests.utils import Command
@pytest.fixture
def tar_error(tmpdir):
def fixture(filename):
path = os.path.join(str(tmpdir), filename)
def reset(path):
with tarfile.TarFile(path, 'w') as archive:
for file in ('a', 'b', 'c'):
with open(file, 'w') as f:
f.write('*')
archive.add(file)
os.remove(file)
with tarfile.TarFile(path, 'r') as archive:
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
assert(set(os.listdir('.')) == {filename, 'a', 'b', 'c'})
return fixture
parametrize_filename = pytest.mark.parametrize('filename', [
'foo.tar',
'foo.tar.gz',
'foo.tgz'])
parametrize_script = pytest.mark.parametrize('script, fixed', [
('tar xvf {}', 'mkdir -p foo && tar xvf {} -C foo'),
('tar -xvf {}', 'mkdir -p foo && tar -xvf {} -C foo'),
('tar --extract -f {}', 'mkdir -p foo && tar --extract -f {} -C foo')])
@parametrize_filename
@parametrize_script
def test_match(tar_error, filename, script, fixed):
tar_error(filename)
assert match(Command(script=script.format(filename)), None)
@parametrize_filename
@parametrize_script
def test_side_effect(tar_error, filename, script, fixed):
tar_error(filename)
side_effect(Command(script=script.format(filename)), None)
assert(os.listdir('.') == [filename])
@parametrize_filename
@parametrize_script
def test_get_new_command(tar_error, filename, script, fixed):
tar_error(filename)
assert get_new_command(Command(script=script.format(filename)), None) == fixed.format(filename)
|
<commit_before><commit_msg>Add tests for the `dirty_untar` rule<commit_after>
|
import os
import pytest
import tarfile
from thefuck.rules.dirty_untar import match, get_new_command, side_effect
from tests.utils import Command
@pytest.fixture
def tar_error(tmpdir):
def fixture(filename):
path = os.path.join(str(tmpdir), filename)
def reset(path):
with tarfile.TarFile(path, 'w') as archive:
for file in ('a', 'b', 'c'):
with open(file, 'w') as f:
f.write('*')
archive.add(file)
os.remove(file)
with tarfile.TarFile(path, 'r') as archive:
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
assert(set(os.listdir('.')) == {filename, 'a', 'b', 'c'})
return fixture
parametrize_filename = pytest.mark.parametrize('filename', [
'foo.tar',
'foo.tar.gz',
'foo.tgz'])
parametrize_script = pytest.mark.parametrize('script, fixed', [
('tar xvf {}', 'mkdir -p foo && tar xvf {} -C foo'),
('tar -xvf {}', 'mkdir -p foo && tar -xvf {} -C foo'),
('tar --extract -f {}', 'mkdir -p foo && tar --extract -f {} -C foo')])
@parametrize_filename
@parametrize_script
def test_match(tar_error, filename, script, fixed):
tar_error(filename)
assert match(Command(script=script.format(filename)), None)
@parametrize_filename
@parametrize_script
def test_side_effect(tar_error, filename, script, fixed):
tar_error(filename)
side_effect(Command(script=script.format(filename)), None)
assert(os.listdir('.') == [filename])
@parametrize_filename
@parametrize_script
def test_get_new_command(tar_error, filename, script, fixed):
tar_error(filename)
assert get_new_command(Command(script=script.format(filename)), None) == fixed.format(filename)
|
Add tests for the `dirty_untar` ruleimport os
import pytest
import tarfile
from thefuck.rules.dirty_untar import match, get_new_command, side_effect
from tests.utils import Command
@pytest.fixture
def tar_error(tmpdir):
def fixture(filename):
path = os.path.join(str(tmpdir), filename)
def reset(path):
with tarfile.TarFile(path, 'w') as archive:
for file in ('a', 'b', 'c'):
with open(file, 'w') as f:
f.write('*')
archive.add(file)
os.remove(file)
with tarfile.TarFile(path, 'r') as archive:
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
assert(set(os.listdir('.')) == {filename, 'a', 'b', 'c'})
return fixture
parametrize_filename = pytest.mark.parametrize('filename', [
'foo.tar',
'foo.tar.gz',
'foo.tgz'])
parametrize_script = pytest.mark.parametrize('script, fixed', [
('tar xvf {}', 'mkdir -p foo && tar xvf {} -C foo'),
('tar -xvf {}', 'mkdir -p foo && tar -xvf {} -C foo'),
('tar --extract -f {}', 'mkdir -p foo && tar --extract -f {} -C foo')])
@parametrize_filename
@parametrize_script
def test_match(tar_error, filename, script, fixed):
tar_error(filename)
assert match(Command(script=script.format(filename)), None)
@parametrize_filename
@parametrize_script
def test_side_effect(tar_error, filename, script, fixed):
tar_error(filename)
side_effect(Command(script=script.format(filename)), None)
assert(os.listdir('.') == [filename])
@parametrize_filename
@parametrize_script
def test_get_new_command(tar_error, filename, script, fixed):
tar_error(filename)
assert get_new_command(Command(script=script.format(filename)), None) == fixed.format(filename)
|
<commit_before><commit_msg>Add tests for the `dirty_untar` rule<commit_after>import os
import pytest
import tarfile
from thefuck.rules.dirty_untar import match, get_new_command, side_effect
from tests.utils import Command
@pytest.fixture
def tar_error(tmpdir):
def fixture(filename):
path = os.path.join(str(tmpdir), filename)
def reset(path):
with tarfile.TarFile(path, 'w') as archive:
for file in ('a', 'b', 'c'):
with open(file, 'w') as f:
f.write('*')
archive.add(file)
os.remove(file)
with tarfile.TarFile(path, 'r') as archive:
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
assert(set(os.listdir('.')) == {filename, 'a', 'b', 'c'})
return fixture
parametrize_filename = pytest.mark.parametrize('filename', [
'foo.tar',
'foo.tar.gz',
'foo.tgz'])
parametrize_script = pytest.mark.parametrize('script, fixed', [
('tar xvf {}', 'mkdir -p foo && tar xvf {} -C foo'),
('tar -xvf {}', 'mkdir -p foo && tar -xvf {} -C foo'),
('tar --extract -f {}', 'mkdir -p foo && tar --extract -f {} -C foo')])
@parametrize_filename
@parametrize_script
def test_match(tar_error, filename, script, fixed):
tar_error(filename)
assert match(Command(script=script.format(filename)), None)
@parametrize_filename
@parametrize_script
def test_side_effect(tar_error, filename, script, fixed):
tar_error(filename)
side_effect(Command(script=script.format(filename)), None)
assert(os.listdir('.') == [filename])
@parametrize_filename
@parametrize_script
def test_get_new_command(tar_error, filename, script, fixed):
tar_error(filename)
assert get_new_command(Command(script=script.format(filename)), None) == fixed.format(filename)
|
|
62fb38d0860b5feeee39764b6c66f5ceed39b984
|
alembic_migration/versions/077ddf78a1f3_fix_protected_docs_versions.py
|
alembic_migration/versions/077ddf78a1f3_fix_protected_docs_versions.py
|
"""Fix protected docs versions
Revision ID: 077ddf78a1f3
Revises: 9739938498a8
Create Date: 2017-10-30 12:05:51.679435
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '077ddf78a1f3'
down_revision = '9739938498a8'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
with versions_from_archives as (
select document_id, max(version) as version
from guidebook.documents_archives
group by document_id
)
update guidebook.documents as d
set version = va.version
from versions_from_archives va
where d.document_id = va.document_id""")
def downgrade():
# Not reversible
pass
|
Fix versions of protected/unprotected documents
|
Fix versions of protected/unprotected documents
|
Python
|
agpl-3.0
|
c2corg/v6_api,c2corg/v6_api,c2corg/v6_api
|
Fix versions of protected/unprotected documents
|
"""Fix protected docs versions
Revision ID: 077ddf78a1f3
Revises: 9739938498a8
Create Date: 2017-10-30 12:05:51.679435
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '077ddf78a1f3'
down_revision = '9739938498a8'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
with versions_from_archives as (
select document_id, max(version) as version
from guidebook.documents_archives
group by document_id
)
update guidebook.documents as d
set version = va.version
from versions_from_archives va
where d.document_id = va.document_id""")
def downgrade():
# Not reversible
pass
|
<commit_before><commit_msg>Fix versions of protected/unprotected documents<commit_after>
|
"""Fix protected docs versions
Revision ID: 077ddf78a1f3
Revises: 9739938498a8
Create Date: 2017-10-30 12:05:51.679435
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '077ddf78a1f3'
down_revision = '9739938498a8'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
with versions_from_archives as (
select document_id, max(version) as version
from guidebook.documents_archives
group by document_id
)
update guidebook.documents as d
set version = va.version
from versions_from_archives va
where d.document_id = va.document_id""")
def downgrade():
# Not reversible
pass
|
Fix versions of protected/unprotected documents"""Fix protected docs versions
Revision ID: 077ddf78a1f3
Revises: 9739938498a8
Create Date: 2017-10-30 12:05:51.679435
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '077ddf78a1f3'
down_revision = '9739938498a8'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
with versions_from_archives as (
select document_id, max(version) as version
from guidebook.documents_archives
group by document_id
)
update guidebook.documents as d
set version = va.version
from versions_from_archives va
where d.document_id = va.document_id""")
def downgrade():
# Not reversible
pass
|
<commit_before><commit_msg>Fix versions of protected/unprotected documents<commit_after>"""Fix protected docs versions
Revision ID: 077ddf78a1f3
Revises: 9739938498a8
Create Date: 2017-10-30 12:05:51.679435
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '077ddf78a1f3'
down_revision = '9739938498a8'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
with versions_from_archives as (
select document_id, max(version) as version
from guidebook.documents_archives
group by document_id
)
update guidebook.documents as d
set version = va.version
from versions_from_archives va
where d.document_id = va.document_id""")
def downgrade():
# Not reversible
pass
|
|
b39dd2afea1f4662e17a927e7e6aa41e850f7470
|
lib/gen-hangul.py
|
lib/gen-hangul.py
|
#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/Jamo.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
chars = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
data, _comment = line.split('#', 2)
codepoint, short_name = data.split(';')
short_name = short_name.strip()
chars.append((codepoint, short_name))
return chars
def write(self, data):
print('''\
struct HangulCharacter
{
gunichar uc;
const char *short_name;
};''')
print('static const struct HangulCharacter hangul_chars[] =\n {')
s = ''
offset = 0
for codepoint, short_name in data:
print(' {{ 0x{0}, "{1}" }},'.format(codepoint, short_name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
Add a script for generating jamo character table
|
lib: Add a script for generating jamo character table
|
Python
|
bsd-3-clause
|
GNOME/gnome-characters,GNOME/gnome-characters,GNOME/gnome-characters,GNOME/gnome-characters,GNOME/gnome-characters
|
lib: Add a script for generating jamo character table
|
#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/Jamo.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
chars = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
data, _comment = line.split('#', 2)
codepoint, short_name = data.split(';')
short_name = short_name.strip()
chars.append((codepoint, short_name))
return chars
def write(self, data):
print('''\
struct HangulCharacter
{
gunichar uc;
const char *short_name;
};''')
print('static const struct HangulCharacter hangul_chars[] =\n {')
s = ''
offset = 0
for codepoint, short_name in data:
print(' {{ 0x{0}, "{1}" }},'.format(codepoint, short_name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
<commit_before><commit_msg>lib: Add a script for generating jamo character table<commit_after>
|
#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/Jamo.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
chars = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
data, _comment = line.split('#', 2)
codepoint, short_name = data.split(';')
short_name = short_name.strip()
chars.append((codepoint, short_name))
return chars
def write(self, data):
print('''\
struct HangulCharacter
{
gunichar uc;
const char *short_name;
};''')
print('static const struct HangulCharacter hangul_chars[] =\n {')
s = ''
offset = 0
for codepoint, short_name in data:
print(' {{ 0x{0}, "{1}" }},'.format(codepoint, short_name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
lib: Add a script for generating jamo character table#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/Jamo.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
chars = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
data, _comment = line.split('#', 2)
codepoint, short_name = data.split(';')
short_name = short_name.strip()
chars.append((codepoint, short_name))
return chars
def write(self, data):
print('''\
struct HangulCharacter
{
gunichar uc;
const char *short_name;
};''')
print('static const struct HangulCharacter hangul_chars[] =\n {')
s = ''
offset = 0
for codepoint, short_name in data:
print(' {{ 0x{0}, "{1}" }},'.format(codepoint, short_name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
<commit_before><commit_msg>lib: Add a script for generating jamo character table<commit_after>#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/Jamo.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
chars = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
data, _comment = line.split('#', 2)
codepoint, short_name = data.split(';')
short_name = short_name.strip()
chars.append((codepoint, short_name))
return chars
def write(self, data):
print('''\
struct HangulCharacter
{
gunichar uc;
const char *short_name;
};''')
print('static const struct HangulCharacter hangul_chars[] =\n {')
s = ''
offset = 0
for codepoint, short_name in data:
print(' {{ 0x{0}, "{1}" }},'.format(codepoint, short_name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
|
72e69f3535c7e2cd82cdda62636eabd7421ebddf
|
generative/tests/compare_test/concat_first/dump_hiddens.py
|
generative/tests/compare_test/concat_first/dump_hiddens.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import subprocess
if __name__ == "__main__":
for hiddens_dim in [512, 256, 128, 64, 32, 16]:
print('Dumping files for (%d)' % hiddens_dim)
model_path = '/mnt/visual_communication_dataset/trained_models_5_30_18/hiddens_fc6/%d/model_best.pth.tar' % hiddens_dim
out_dir = './dump_hiddens_outputs/%d/' % hiddens_dim
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
command = 'CUDA_VISIBLE_DEVICES=7 python dump.py {model} --train-test-split-dir ./train_test_split/1 --out-dir {outdir} --average-labels --overwrite-layer fc6 --cuda'.format(model=model_path, outdir=out_dir)
subprocess.call(command, shell=True)
|
Add dump script for all hiddens
|
Add dump script for all hiddens
|
Python
|
mit
|
judithfan/pix2svg
|
Add dump script for all hiddens
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import subprocess
if __name__ == "__main__":
for hiddens_dim in [512, 256, 128, 64, 32, 16]:
print('Dumping files for (%d)' % hiddens_dim)
model_path = '/mnt/visual_communication_dataset/trained_models_5_30_18/hiddens_fc6/%d/model_best.pth.tar' % hiddens_dim
out_dir = './dump_hiddens_outputs/%d/' % hiddens_dim
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
command = 'CUDA_VISIBLE_DEVICES=7 python dump.py {model} --train-test-split-dir ./train_test_split/1 --out-dir {outdir} --average-labels --overwrite-layer fc6 --cuda'.format(model=model_path, outdir=out_dir)
subprocess.call(command, shell=True)
|
<commit_before><commit_msg>Add dump script for all hiddens<commit_after>
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import subprocess
if __name__ == "__main__":
for hiddens_dim in [512, 256, 128, 64, 32, 16]:
print('Dumping files for (%d)' % hiddens_dim)
model_path = '/mnt/visual_communication_dataset/trained_models_5_30_18/hiddens_fc6/%d/model_best.pth.tar' % hiddens_dim
out_dir = './dump_hiddens_outputs/%d/' % hiddens_dim
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
command = 'CUDA_VISIBLE_DEVICES=7 python dump.py {model} --train-test-split-dir ./train_test_split/1 --out-dir {outdir} --average-labels --overwrite-layer fc6 --cuda'.format(model=model_path, outdir=out_dir)
subprocess.call(command, shell=True)
|
Add dump script for all hiddensfrom __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import subprocess
if __name__ == "__main__":
for hiddens_dim in [512, 256, 128, 64, 32, 16]:
print('Dumping files for (%d)' % hiddens_dim)
model_path = '/mnt/visual_communication_dataset/trained_models_5_30_18/hiddens_fc6/%d/model_best.pth.tar' % hiddens_dim
out_dir = './dump_hiddens_outputs/%d/' % hiddens_dim
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
command = 'CUDA_VISIBLE_DEVICES=7 python dump.py {model} --train-test-split-dir ./train_test_split/1 --out-dir {outdir} --average-labels --overwrite-layer fc6 --cuda'.format(model=model_path, outdir=out_dir)
subprocess.call(command, shell=True)
|
<commit_before><commit_msg>Add dump script for all hiddens<commit_after>from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import subprocess
if __name__ == "__main__":
for hiddens_dim in [512, 256, 128, 64, 32, 16]:
print('Dumping files for (%d)' % hiddens_dim)
model_path = '/mnt/visual_communication_dataset/trained_models_5_30_18/hiddens_fc6/%d/model_best.pth.tar' % hiddens_dim
out_dir = './dump_hiddens_outputs/%d/' % hiddens_dim
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
command = 'CUDA_VISIBLE_DEVICES=7 python dump.py {model} --train-test-split-dir ./train_test_split/1 --out-dir {outdir} --average-labels --overwrite-layer fc6 --cuda'.format(model=model_path, outdir=out_dir)
subprocess.call(command, shell=True)
|
|
df784323d0da737755def4015840d118e3c8e595
|
nettests/core/http_body_length.py
|
nettests/core/http_body_length.py
|
# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from twisted.internet import defer
from twisted.python import usage
from ooni.templates import httpt
class UsageOptions(usage.Options):
optParameters = [
['url', 'u', None, 'Specify a single URL to test.'],
['factor', 'f', 0.8, 'What factor should be used for triggering censorship (0.8 == 80%)']
]
class HTTPBodyLength(httpt.HTTPTest):
"""
Performs a two GET requests to the set of sites to be tested for
censorship, one over a known good control channel (Tor), the other over the
test network.
We then look at the response body lengths and see if the control response
differs from the experiment response by a certain factor.
"""
name = "HTTP Body length test"
author = "Arturo Filastò"
version = "0.1"
usageOptions = UsageOptions
inputFile = ['file', 'f', None,
'List of URLS to perform GET and POST requests to']
# These values are used for determining censorship based on response body
# lengths
control_body_length = None
experiment_body_length = None
def setUp(self):
"""
Check for inputs.
"""
if self.input:
self.url = self.input
elif self.localOptions['url']:
self.url = self.localOptions['url']
else:
raise Exception("No input specified")
self.factor = self.localOptions['factor']
def compare_body_lengths(self):
body_length_a = self.control_body_length
body_length_b = self.experiment_body_length
rel = float(body_length_a)/float(body_length_b)
if rel > 1:
rel = 1/rel
self.report['body_proportion'] = rel
self.report['factor'] = self.factor
if rel < self.factor:
self.report['censorship'] = True
else:
self.report['censorship'] = False
def test_get(self):
def errback(failure):
log.err("There was an error while testing %s" % self.url)
log.exception(failure)
def control_body(result):
self.control_body_length = len(result)
if self.experiment_body_length:
self.compare_body_lengths()
def experiment_body(result):
self.experiment_body_length = len(result)
if self.control_body_length:
self.compare_body_lengths()
dl = []
experiment_request = self.doRequest(self.url, method="GET",
body_processor=experiment_body)
control_request = self.doRequest(self.url, method="GET",
use_tor=True, body_processor=control_body)
dl.append(experiment_request)
dl.append(control_request)
d = defer.DeferredList(dl)
return d
|
Add test that detects censorship in HTTP pages based on HTTP body length
|
Add test that detects censorship in HTTP pages based on HTTP body length
|
Python
|
bsd-2-clause
|
juga0/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe
|
Add test that detects censorship in HTTP pages based on HTTP body length
|
# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from twisted.internet import defer
from twisted.python import usage
from ooni.templates import httpt
class UsageOptions(usage.Options):
optParameters = [
['url', 'u', None, 'Specify a single URL to test.'],
['factor', 'f', 0.8, 'What factor should be used for triggering censorship (0.8 == 80%)']
]
class HTTPBodyLength(httpt.HTTPTest):
"""
Performs a two GET requests to the set of sites to be tested for
censorship, one over a known good control channel (Tor), the other over the
test network.
We then look at the response body lengths and see if the control response
differs from the experiment response by a certain factor.
"""
name = "HTTP Body length test"
author = "Arturo Filastò"
version = "0.1"
usageOptions = UsageOptions
inputFile = ['file', 'f', None,
'List of URLS to perform GET and POST requests to']
# These values are used for determining censorship based on response body
# lengths
control_body_length = None
experiment_body_length = None
def setUp(self):
"""
Check for inputs.
"""
if self.input:
self.url = self.input
elif self.localOptions['url']:
self.url = self.localOptions['url']
else:
raise Exception("No input specified")
self.factor = self.localOptions['factor']
def compare_body_lengths(self):
body_length_a = self.control_body_length
body_length_b = self.experiment_body_length
rel = float(body_length_a)/float(body_length_b)
if rel > 1:
rel = 1/rel
self.report['body_proportion'] = rel
self.report['factor'] = self.factor
if rel < self.factor:
self.report['censorship'] = True
else:
self.report['censorship'] = False
def test_get(self):
def errback(failure):
log.err("There was an error while testing %s" % self.url)
log.exception(failure)
def control_body(result):
self.control_body_length = len(result)
if self.experiment_body_length:
self.compare_body_lengths()
def experiment_body(result):
self.experiment_body_length = len(result)
if self.control_body_length:
self.compare_body_lengths()
dl = []
experiment_request = self.doRequest(self.url, method="GET",
body_processor=experiment_body)
control_request = self.doRequest(self.url, method="GET",
use_tor=True, body_processor=control_body)
dl.append(experiment_request)
dl.append(control_request)
d = defer.DeferredList(dl)
return d
|
<commit_before><commit_msg>Add test that detects censorship in HTTP pages based on HTTP body length<commit_after>
|
# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from twisted.internet import defer
from twisted.python import usage
from ooni.templates import httpt
class UsageOptions(usage.Options):
optParameters = [
['url', 'u', None, 'Specify a single URL to test.'],
['factor', 'f', 0.8, 'What factor should be used for triggering censorship (0.8 == 80%)']
]
class HTTPBodyLength(httpt.HTTPTest):
"""
Performs a two GET requests to the set of sites to be tested for
censorship, one over a known good control channel (Tor), the other over the
test network.
We then look at the response body lengths and see if the control response
differs from the experiment response by a certain factor.
"""
name = "HTTP Body length test"
author = "Arturo Filastò"
version = "0.1"
usageOptions = UsageOptions
inputFile = ['file', 'f', None,
'List of URLS to perform GET and POST requests to']
# These values are used for determining censorship based on response body
# lengths
control_body_length = None
experiment_body_length = None
def setUp(self):
"""
Check for inputs.
"""
if self.input:
self.url = self.input
elif self.localOptions['url']:
self.url = self.localOptions['url']
else:
raise Exception("No input specified")
self.factor = self.localOptions['factor']
def compare_body_lengths(self):
body_length_a = self.control_body_length
body_length_b = self.experiment_body_length
rel = float(body_length_a)/float(body_length_b)
if rel > 1:
rel = 1/rel
self.report['body_proportion'] = rel
self.report['factor'] = self.factor
if rel < self.factor:
self.report['censorship'] = True
else:
self.report['censorship'] = False
def test_get(self):
def errback(failure):
log.err("There was an error while testing %s" % self.url)
log.exception(failure)
def control_body(result):
self.control_body_length = len(result)
if self.experiment_body_length:
self.compare_body_lengths()
def experiment_body(result):
self.experiment_body_length = len(result)
if self.control_body_length:
self.compare_body_lengths()
dl = []
experiment_request = self.doRequest(self.url, method="GET",
body_processor=experiment_body)
control_request = self.doRequest(self.url, method="GET",
use_tor=True, body_processor=control_body)
dl.append(experiment_request)
dl.append(control_request)
d = defer.DeferredList(dl)
return d
|
Add test that detects censorship in HTTP pages based on HTTP body length# -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from twisted.internet import defer
from twisted.python import usage
from ooni.templates import httpt
class UsageOptions(usage.Options):
optParameters = [
['url', 'u', None, 'Specify a single URL to test.'],
['factor', 'f', 0.8, 'What factor should be used for triggering censorship (0.8 == 80%)']
]
class HTTPBodyLength(httpt.HTTPTest):
"""
Performs a two GET requests to the set of sites to be tested for
censorship, one over a known good control channel (Tor), the other over the
test network.
We then look at the response body lengths and see if the control response
differs from the experiment response by a certain factor.
"""
name = "HTTP Body length test"
author = "Arturo Filastò"
version = "0.1"
usageOptions = UsageOptions
inputFile = ['file', 'f', None,
'List of URLS to perform GET and POST requests to']
# These values are used for determining censorship based on response body
# lengths
control_body_length = None
experiment_body_length = None
def setUp(self):
"""
Check for inputs.
"""
if self.input:
self.url = self.input
elif self.localOptions['url']:
self.url = self.localOptions['url']
else:
raise Exception("No input specified")
self.factor = self.localOptions['factor']
def compare_body_lengths(self):
body_length_a = self.control_body_length
body_length_b = self.experiment_body_length
rel = float(body_length_a)/float(body_length_b)
if rel > 1:
rel = 1/rel
self.report['body_proportion'] = rel
self.report['factor'] = self.factor
if rel < self.factor:
self.report['censorship'] = True
else:
self.report['censorship'] = False
def test_get(self):
def errback(failure):
log.err("There was an error while testing %s" % self.url)
log.exception(failure)
def control_body(result):
self.control_body_length = len(result)
if self.experiment_body_length:
self.compare_body_lengths()
def experiment_body(result):
self.experiment_body_length = len(result)
if self.control_body_length:
self.compare_body_lengths()
dl = []
experiment_request = self.doRequest(self.url, method="GET",
body_processor=experiment_body)
control_request = self.doRequest(self.url, method="GET",
use_tor=True, body_processor=control_body)
dl.append(experiment_request)
dl.append(control_request)
d = defer.DeferredList(dl)
return d
|
<commit_before><commit_msg>Add test that detects censorship in HTTP pages based on HTTP body length<commit_after># -*- encoding: utf-8 -*-
#
# :authors: Arturo Filastò
# :licence: see LICENSE
from twisted.internet import defer
from twisted.python import usage
from ooni.templates import httpt
class UsageOptions(usage.Options):
optParameters = [
['url', 'u', None, 'Specify a single URL to test.'],
['factor', 'f', 0.8, 'What factor should be used for triggering censorship (0.8 == 80%)']
]
class HTTPBodyLength(httpt.HTTPTest):
"""
Performs a two GET requests to the set of sites to be tested for
censorship, one over a known good control channel (Tor), the other over the
test network.
We then look at the response body lengths and see if the control response
differs from the experiment response by a certain factor.
"""
name = "HTTP Body length test"
author = "Arturo Filastò"
version = "0.1"
usageOptions = UsageOptions
inputFile = ['file', 'f', None,
'List of URLS to perform GET and POST requests to']
# These values are used for determining censorship based on response body
# lengths
control_body_length = None
experiment_body_length = None
def setUp(self):
"""
Check for inputs.
"""
if self.input:
self.url = self.input
elif self.localOptions['url']:
self.url = self.localOptions['url']
else:
raise Exception("No input specified")
self.factor = self.localOptions['factor']
def compare_body_lengths(self):
body_length_a = self.control_body_length
body_length_b = self.experiment_body_length
rel = float(body_length_a)/float(body_length_b)
if rel > 1:
rel = 1/rel
self.report['body_proportion'] = rel
self.report['factor'] = self.factor
if rel < self.factor:
self.report['censorship'] = True
else:
self.report['censorship'] = False
def test_get(self):
def errback(failure):
log.err("There was an error while testing %s" % self.url)
log.exception(failure)
def control_body(result):
self.control_body_length = len(result)
if self.experiment_body_length:
self.compare_body_lengths()
def experiment_body(result):
self.experiment_body_length = len(result)
if self.control_body_length:
self.compare_body_lengths()
dl = []
experiment_request = self.doRequest(self.url, method="GET",
body_processor=experiment_body)
control_request = self.doRequest(self.url, method="GET",
use_tor=True, body_processor=control_body)
dl.append(experiment_request)
dl.append(control_request)
d = defer.DeferredList(dl)
return d
|
|
f970198596d8c20c89701fbcce38fd5736096e86
|
namegen/markov.py
|
namegen/markov.py
|
#!/usr/bin/env python
"""
Module which produces readble name from 256-bit of random data
(i.e. sha-256 hash)
"""
MAXWORDLEN=12
#
# Modules which contain problablity dictionaries
# generated by genmarkov script
#
from surname_hash import surname
from female_hash import female
from male_hash import male
#
import operator
gendernames=[male,female]
class RandError(Exception):
"""
Raised if all bits are exhausted
"""
pass
def getrandom(count):
"""
extracts neccessary amount of randomness (non neccessary
integer number of bits) from given input
"""
global data
if data==0:
raise RandError("No more random data")
data,r=divmod(data,count)
return r
def morerandom():
"""
Checks if there are some random bits left
"""
global data
return data!=0
def mkword(x):
"""
Compuites word from given dictionary.
Randomly chooses first and second letter and
with probabilities of their occurence after whicespace
and then chooses next letter until space is encountered
"""
count=0
global MAXWORDLEN
first=x[' ']
count=reduce(operator.add,map(len,first.itervalues()))
i=getrandom(count)
for letter in sorted(first):
if i<len(first[letter]):
word=letter+first[letter][i]
break
i-=len(first[letter])
while word[-1]!=' ':
y=x[word[-2]][word[-1]]
word+=y[getrandom(len(y))]
if len(word)>= MAXWORDLEN:
word+=" "
break
return word.title()
def mkname(hash):
"""
Gets 64 hexadecimal digits and computes name from it
"""
global data
if (len(hash)<64):
raise ValueError("To few random data: "+hash)
data=int(hash,16)
x=gendernames[getrandom(2)]
name=mkword(surname)
more=True
while more:
try:
name+=mkword(x)
except RandError:
more= False
return name.strip()
if __name__ == "__main__":
#
# Test main routine which reads random data from
#
import sys
from base64 import b16encode
from os import urandom
if len(sys.argv)>1 and sys.argv[1].isdigit():
for i in xrange(0,int(sys.argv[1])):
d=b16encode(urandom(32))
try:
print mkname(d)
except RandError:
print "Not enough random: "+d
else:
for i in xrange(0,20):
d=b16encode(urandom(32))
print d,mkname(d)
|
Set maximal word length limit
|
Set maximal word length limit
|
Python
|
agpl-3.0
|
cheshirenet/cheshirenet
|
Set maximal word length limit
|
#!/usr/bin/env python
"""
Module which produces readble name from 256-bit of random data
(i.e. sha-256 hash)
"""
MAXWORDLEN=12
#
# Modules which contain problablity dictionaries
# generated by genmarkov script
#
from surname_hash import surname
from female_hash import female
from male_hash import male
#
import operator
gendernames=[male,female]
class RandError(Exception):
"""
Raised if all bits are exhausted
"""
pass
def getrandom(count):
"""
extracts neccessary amount of randomness (non neccessary
integer number of bits) from given input
"""
global data
if data==0:
raise RandError("No more random data")
data,r=divmod(data,count)
return r
def morerandom():
"""
Checks if there are some random bits left
"""
global data
return data!=0
def mkword(x):
"""
Compuites word from given dictionary.
Randomly chooses first and second letter and
with probabilities of their occurence after whicespace
and then chooses next letter until space is encountered
"""
count=0
global MAXWORDLEN
first=x[' ']
count=reduce(operator.add,map(len,first.itervalues()))
i=getrandom(count)
for letter in sorted(first):
if i<len(first[letter]):
word=letter+first[letter][i]
break
i-=len(first[letter])
while word[-1]!=' ':
y=x[word[-2]][word[-1]]
word+=y[getrandom(len(y))]
if len(word)>= MAXWORDLEN:
word+=" "
break
return word.title()
def mkname(hash):
"""
Gets 64 hexadecimal digits and computes name from it
"""
global data
if (len(hash)<64):
raise ValueError("To few random data: "+hash)
data=int(hash,16)
x=gendernames[getrandom(2)]
name=mkword(surname)
more=True
while more:
try:
name+=mkword(x)
except RandError:
more= False
return name.strip()
if __name__ == "__main__":
#
# Test main routine which reads random data from
#
import sys
from base64 import b16encode
from os import urandom
if len(sys.argv)>1 and sys.argv[1].isdigit():
for i in xrange(0,int(sys.argv[1])):
d=b16encode(urandom(32))
try:
print mkname(d)
except RandError:
print "Not enough random: "+d
else:
for i in xrange(0,20):
d=b16encode(urandom(32))
print d,mkname(d)
|
<commit_before><commit_msg>Set maximal word length limit<commit_after>
|
#!/usr/bin/env python
"""
Module which produces readble name from 256-bit of random data
(i.e. sha-256 hash)
"""
MAXWORDLEN=12
#
# Modules which contain problablity dictionaries
# generated by genmarkov script
#
from surname_hash import surname
from female_hash import female
from male_hash import male
#
import operator
gendernames=[male,female]
class RandError(Exception):
"""
Raised if all bits are exhausted
"""
pass
def getrandom(count):
"""
extracts neccessary amount of randomness (non neccessary
integer number of bits) from given input
"""
global data
if data==0:
raise RandError("No more random data")
data,r=divmod(data,count)
return r
def morerandom():
"""
Checks if there are some random bits left
"""
global data
return data!=0
def mkword(x):
"""
Compuites word from given dictionary.
Randomly chooses first and second letter and
with probabilities of their occurence after whicespace
and then chooses next letter until space is encountered
"""
count=0
global MAXWORDLEN
first=x[' ']
count=reduce(operator.add,map(len,first.itervalues()))
i=getrandom(count)
for letter in sorted(first):
if i<len(first[letter]):
word=letter+first[letter][i]
break
i-=len(first[letter])
while word[-1]!=' ':
y=x[word[-2]][word[-1]]
word+=y[getrandom(len(y))]
if len(word)>= MAXWORDLEN:
word+=" "
break
return word.title()
def mkname(hash):
"""
Gets 64 hexadecimal digits and computes name from it
"""
global data
if (len(hash)<64):
raise ValueError("To few random data: "+hash)
data=int(hash,16)
x=gendernames[getrandom(2)]
name=mkword(surname)
more=True
while more:
try:
name+=mkword(x)
except RandError:
more= False
return name.strip()
if __name__ == "__main__":
#
# Test main routine which reads random data from
#
import sys
from base64 import b16encode
from os import urandom
if len(sys.argv)>1 and sys.argv[1].isdigit():
for i in xrange(0,int(sys.argv[1])):
d=b16encode(urandom(32))
try:
print mkname(d)
except RandError:
print "Not enough random: "+d
else:
for i in xrange(0,20):
d=b16encode(urandom(32))
print d,mkname(d)
|
Set maximal word length limit#!/usr/bin/env python
"""
Module which produces readble name from 256-bit of random data
(i.e. sha-256 hash)
"""
MAXWORDLEN=12
#
# Modules which contain problablity dictionaries
# generated by genmarkov script
#
from surname_hash import surname
from female_hash import female
from male_hash import male
#
import operator
gendernames=[male,female]
class RandError(Exception):
"""
Raised if all bits are exhausted
"""
pass
def getrandom(count):
"""
extracts neccessary amount of randomness (non neccessary
integer number of bits) from given input
"""
global data
if data==0:
raise RandError("No more random data")
data,r=divmod(data,count)
return r
def morerandom():
"""
Checks if there are some random bits left
"""
global data
return data!=0
def mkword(x):
"""
Compuites word from given dictionary.
Randomly chooses first and second letter and
with probabilities of their occurence after whicespace
and then chooses next letter until space is encountered
"""
count=0
global MAXWORDLEN
first=x[' ']
count=reduce(operator.add,map(len,first.itervalues()))
i=getrandom(count)
for letter in sorted(first):
if i<len(first[letter]):
word=letter+first[letter][i]
break
i-=len(first[letter])
while word[-1]!=' ':
y=x[word[-2]][word[-1]]
word+=y[getrandom(len(y))]
if len(word)>= MAXWORDLEN:
word+=" "
break
return word.title()
def mkname(hash):
"""
Gets 64 hexadecimal digits and computes name from it
"""
global data
if (len(hash)<64):
raise ValueError("To few random data: "+hash)
data=int(hash,16)
x=gendernames[getrandom(2)]
name=mkword(surname)
more=True
while more:
try:
name+=mkword(x)
except RandError:
more= False
return name.strip()
if __name__ == "__main__":
#
# Test main routine which reads random data from
#
import sys
from base64 import b16encode
from os import urandom
if len(sys.argv)>1 and sys.argv[1].isdigit():
for i in xrange(0,int(sys.argv[1])):
d=b16encode(urandom(32))
try:
print mkname(d)
except RandError:
print "Not enough random: "+d
else:
for i in xrange(0,20):
d=b16encode(urandom(32))
print d,mkname(d)
|
<commit_before><commit_msg>Set maximal word length limit<commit_after>#!/usr/bin/env python
"""
Module which produces readble name from 256-bit of random data
(i.e. sha-256 hash)
"""
MAXWORDLEN=12
#
# Modules which contain problablity dictionaries
# generated by genmarkov script
#
from surname_hash import surname
from female_hash import female
from male_hash import male
#
import operator
gendernames=[male,female]
class RandError(Exception):
"""
Raised if all bits are exhausted
"""
pass
def getrandom(count):
"""
extracts neccessary amount of randomness (non neccessary
integer number of bits) from given input
"""
global data
if data==0:
raise RandError("No more random data")
data,r=divmod(data,count)
return r
def morerandom():
"""
Checks if there are some random bits left
"""
global data
return data!=0
def mkword(x):
"""
Compuites word from given dictionary.
Randomly chooses first and second letter and
with probabilities of their occurence after whicespace
and then chooses next letter until space is encountered
"""
count=0
global MAXWORDLEN
first=x[' ']
count=reduce(operator.add,map(len,first.itervalues()))
i=getrandom(count)
for letter in sorted(first):
if i<len(first[letter]):
word=letter+first[letter][i]
break
i-=len(first[letter])
while word[-1]!=' ':
y=x[word[-2]][word[-1]]
word+=y[getrandom(len(y))]
if len(word)>= MAXWORDLEN:
word+=" "
break
return word.title()
def mkname(hash):
"""
Gets 64 hexadecimal digits and computes name from it
"""
global data
if (len(hash)<64):
raise ValueError("To few random data: "+hash)
data=int(hash,16)
x=gendernames[getrandom(2)]
name=mkword(surname)
more=True
while more:
try:
name+=mkword(x)
except RandError:
more= False
return name.strip()
if __name__ == "__main__":
#
# Test main routine which reads random data from
#
import sys
from base64 import b16encode
from os import urandom
if len(sys.argv)>1 and sys.argv[1].isdigit():
for i in xrange(0,int(sys.argv[1])):
d=b16encode(urandom(32))
try:
print mkname(d)
except RandError:
print "Not enough random: "+d
else:
for i in xrange(0,20):
d=b16encode(urandom(32))
print d,mkname(d)
|
|
3601a0dc9d762e17c24e0dbf86ee1ef4a00c49cd
|
yithlibraryserver/tests/test_security.py
|
yithlibraryserver/tests/test_security.py
|
from pyramid.httpexceptions import HTTPBadRequest, HTTPUnauthorized
from yithlibraryserver import testing
from yithlibraryserver.security import authorize_user
class AuthorizationTests(testing.TestCase):
clean_collections = ('access_codes', 'users')
def test_authorize_user(self):
request = testing.FakeRequest({})
# The authorization header is required
self.assertRaises(HTTPUnauthorized, authorize_user, request)
request = testing.FakeRequest({'Authorization': 'Basic foobar'})
# Only the bearer method is allowed
self.assertRaises(HTTPBadRequest, authorize_user, request)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid code
self.assertRaises(HTTPUnauthorized, authorize_user, request)
access_code_id = self.db.access_codes.insert({
'code': '1234',
'user': 'user1',
}, safe=True)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid user
self.assertRaises(HTTPUnauthorized, authorize_user, request)
user_id = self.db.users.insert({
'username': 'user1',
}, safe=True)
self.db.access_codes.update({'_id': access_code_id}, {
'$set': {'user': user_id},
}, safe=True)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid user
authorized_user = authorize_user(request)
self.assertEqual(authorized_user['username'], 'user1')
|
Add tests for the authorize_user function
|
Add tests for the authorize_user function
|
Python
|
agpl-3.0
|
lorenzogil/yith-library-server,Yaco-Sistemas/yith-library-server,Yaco-Sistemas/yith-library-server,lorenzogil/yith-library-server,lorenzogil/yith-library-server,Yaco-Sistemas/yith-library-server
|
Add tests for the authorize_user function
|
from pyramid.httpexceptions import HTTPBadRequest, HTTPUnauthorized
from yithlibraryserver import testing
from yithlibraryserver.security import authorize_user
class AuthorizationTests(testing.TestCase):
clean_collections = ('access_codes', 'users')
def test_authorize_user(self):
request = testing.FakeRequest({})
# The authorization header is required
self.assertRaises(HTTPUnauthorized, authorize_user, request)
request = testing.FakeRequest({'Authorization': 'Basic foobar'})
# Only the bearer method is allowed
self.assertRaises(HTTPBadRequest, authorize_user, request)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid code
self.assertRaises(HTTPUnauthorized, authorize_user, request)
access_code_id = self.db.access_codes.insert({
'code': '1234',
'user': 'user1',
}, safe=True)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid user
self.assertRaises(HTTPUnauthorized, authorize_user, request)
user_id = self.db.users.insert({
'username': 'user1',
}, safe=True)
self.db.access_codes.update({'_id': access_code_id}, {
'$set': {'user': user_id},
}, safe=True)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid user
authorized_user = authorize_user(request)
self.assertEqual(authorized_user['username'], 'user1')
|
<commit_before><commit_msg>Add tests for the authorize_user function<commit_after>
|
from pyramid.httpexceptions import HTTPBadRequest, HTTPUnauthorized
from yithlibraryserver import testing
from yithlibraryserver.security import authorize_user
class AuthorizationTests(testing.TestCase):
clean_collections = ('access_codes', 'users')
def test_authorize_user(self):
request = testing.FakeRequest({})
# The authorization header is required
self.assertRaises(HTTPUnauthorized, authorize_user, request)
request = testing.FakeRequest({'Authorization': 'Basic foobar'})
# Only the bearer method is allowed
self.assertRaises(HTTPBadRequest, authorize_user, request)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid code
self.assertRaises(HTTPUnauthorized, authorize_user, request)
access_code_id = self.db.access_codes.insert({
'code': '1234',
'user': 'user1',
}, safe=True)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid user
self.assertRaises(HTTPUnauthorized, authorize_user, request)
user_id = self.db.users.insert({
'username': 'user1',
}, safe=True)
self.db.access_codes.update({'_id': access_code_id}, {
'$set': {'user': user_id},
}, safe=True)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid user
authorized_user = authorize_user(request)
self.assertEqual(authorized_user['username'], 'user1')
|
Add tests for the authorize_user functionfrom pyramid.httpexceptions import HTTPBadRequest, HTTPUnauthorized
from yithlibraryserver import testing
from yithlibraryserver.security import authorize_user
class AuthorizationTests(testing.TestCase):
clean_collections = ('access_codes', 'users')
def test_authorize_user(self):
request = testing.FakeRequest({})
# The authorization header is required
self.assertRaises(HTTPUnauthorized, authorize_user, request)
request = testing.FakeRequest({'Authorization': 'Basic foobar'})
# Only the bearer method is allowed
self.assertRaises(HTTPBadRequest, authorize_user, request)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid code
self.assertRaises(HTTPUnauthorized, authorize_user, request)
access_code_id = self.db.access_codes.insert({
'code': '1234',
'user': 'user1',
}, safe=True)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid user
self.assertRaises(HTTPUnauthorized, authorize_user, request)
user_id = self.db.users.insert({
'username': 'user1',
}, safe=True)
self.db.access_codes.update({'_id': access_code_id}, {
'$set': {'user': user_id},
}, safe=True)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid user
authorized_user = authorize_user(request)
self.assertEqual(authorized_user['username'], 'user1')
|
<commit_before><commit_msg>Add tests for the authorize_user function<commit_after>from pyramid.httpexceptions import HTTPBadRequest, HTTPUnauthorized
from yithlibraryserver import testing
from yithlibraryserver.security import authorize_user
class AuthorizationTests(testing.TestCase):
clean_collections = ('access_codes', 'users')
def test_authorize_user(self):
request = testing.FakeRequest({})
# The authorization header is required
self.assertRaises(HTTPUnauthorized, authorize_user, request)
request = testing.FakeRequest({'Authorization': 'Basic foobar'})
# Only the bearer method is allowed
self.assertRaises(HTTPBadRequest, authorize_user, request)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid code
self.assertRaises(HTTPUnauthorized, authorize_user, request)
access_code_id = self.db.access_codes.insert({
'code': '1234',
'user': 'user1',
}, safe=True)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid user
self.assertRaises(HTTPUnauthorized, authorize_user, request)
user_id = self.db.users.insert({
'username': 'user1',
}, safe=True)
self.db.access_codes.update({'_id': access_code_id}, {
'$set': {'user': user_id},
}, safe=True)
request = testing.FakeRequest({
'Authorization': 'Bearer 1234',
}, self.db)
# Invalid user
authorized_user = authorize_user(request)
self.assertEqual(authorized_user['username'], 'user1')
|
|
40dd078b5e176ae5039bf20dcb50350e8f065808
|
recognition/scrollError.py
|
recognition/scrollError.py
|
from sense_hat import SenseHat
import sys
sense = SenseHat()
sense.show_message(sys.stdin.read(), scroll_speed=.08, text_colour=[255, 0, 0])
|
Create python script to scroll error messages
|
Create python script to scroll error messages
|
Python
|
mit
|
jeffstephens/pi-resto,jeffstephens/pi-resto
|
Create python script to scroll error messages
|
from sense_hat import SenseHat
import sys
sense = SenseHat()
sense.show_message(sys.stdin.read(), scroll_speed=.08, text_colour=[255, 0, 0])
|
<commit_before><commit_msg>Create python script to scroll error messages<commit_after>
|
from sense_hat import SenseHat
import sys
sense = SenseHat()
sense.show_message(sys.stdin.read(), scroll_speed=.08, text_colour=[255, 0, 0])
|
Create python script to scroll error messagesfrom sense_hat import SenseHat
import sys
sense = SenseHat()
sense.show_message(sys.stdin.read(), scroll_speed=.08, text_colour=[255, 0, 0])
|
<commit_before><commit_msg>Create python script to scroll error messages<commit_after>from sense_hat import SenseHat
import sys
sense = SenseHat()
sense.show_message(sys.stdin.read(), scroll_speed=.08, text_colour=[255, 0, 0])
|
|
b0c03b86d606c85dd1cab1ad9e9678e1057d0ae1
|
Lib/fontTools/pens/ttGlyphPen.py
|
Lib/fontTools/pens/ttGlyphPen.py
|
from __future__ import print_function, division, absolute_import
from array import array
from fontTools.misc.py23 import *
from fontTools.pens.basePen import AbstractPen
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._g_l_y_f import Glyph
from fontTools.ttLib.tables._g_l_y_f import GlyphComponent
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
__all__ = ["TTGlyphPen"]
class TTGlyphPen(AbstractPen):
"""Pen used for drawing to a TrueType glyph."""
def __init__(self):
self.points = []
self.endPts = []
self.types = []
self.components = []
def _addPoint(self, pt, onCurve):
self.points.append([int(coord) for coord in pt])
self.types.append(onCurve)
def lineTo(self, pt):
self._addPoint(pt, 1)
def moveTo(self, pt):
assert (not self.points) or (self.endPts[-1] == len(self.points) - 1)
self.lineTo(pt)
def qCurveTo(self, *points):
for pt in points[:-1]:
self._addPoint(pt, 0)
self._addPoint(points[-1], 1)
def closePath(self):
endPt = len(self.points) - 1
# ignore anchors
if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1):
self.points.pop()
self.types.pop()
return
self.endPts.append(endPt)
def endPath(self):
# TrueType contours are always "closed"
self.closePath()
def addComponent(self, glyphName, transformation):
component = GlyphComponent()
component.glyphName = glyphName
component.transform = (transformation[:2], transformation[2:4])
component.x, component.y = [int(n) for n in transformation[4:]]
component.flags = 0
self.components.append(component)
def glyph(self):
glyph = Glyph()
glyph.coordinates = GlyphCoordinates(self.points)
glyph.endPtsOfContours = self.endPts
glyph.flags = array("B", self.types)
glyph.components = self.components
# TrueType glyphs can't have both contours and components
if glyph.components:
glyph.numberOfContours = -1
else:
glyph.numberOfContours = len(glyph.endPtsOfContours)
glyph.program = ttProgram.Program()
glyph.program.fromBytecode("")
return glyph
|
Add pen which draws to TrueType glyphs.
|
Add pen which draws to TrueType glyphs.
|
Python
|
mit
|
googlefonts/fonttools,fonttools/fonttools
|
Add pen which draws to TrueType glyphs.
|
from __future__ import print_function, division, absolute_import
from array import array
from fontTools.misc.py23 import *
from fontTools.pens.basePen import AbstractPen
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._g_l_y_f import Glyph
from fontTools.ttLib.tables._g_l_y_f import GlyphComponent
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
__all__ = ["TTGlyphPen"]
class TTGlyphPen(AbstractPen):
"""Pen used for drawing to a TrueType glyph."""
def __init__(self):
self.points = []
self.endPts = []
self.types = []
self.components = []
def _addPoint(self, pt, onCurve):
self.points.append([int(coord) for coord in pt])
self.types.append(onCurve)
def lineTo(self, pt):
self._addPoint(pt, 1)
def moveTo(self, pt):
assert (not self.points) or (self.endPts[-1] == len(self.points) - 1)
self.lineTo(pt)
def qCurveTo(self, *points):
for pt in points[:-1]:
self._addPoint(pt, 0)
self._addPoint(points[-1], 1)
def closePath(self):
endPt = len(self.points) - 1
# ignore anchors
if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1):
self.points.pop()
self.types.pop()
return
self.endPts.append(endPt)
def endPath(self):
# TrueType contours are always "closed"
self.closePath()
def addComponent(self, glyphName, transformation):
component = GlyphComponent()
component.glyphName = glyphName
component.transform = (transformation[:2], transformation[2:4])
component.x, component.y = [int(n) for n in transformation[4:]]
component.flags = 0
self.components.append(component)
def glyph(self):
glyph = Glyph()
glyph.coordinates = GlyphCoordinates(self.points)
glyph.endPtsOfContours = self.endPts
glyph.flags = array("B", self.types)
glyph.components = self.components
# TrueType glyphs can't have both contours and components
if glyph.components:
glyph.numberOfContours = -1
else:
glyph.numberOfContours = len(glyph.endPtsOfContours)
glyph.program = ttProgram.Program()
glyph.program.fromBytecode("")
return glyph
|
<commit_before><commit_msg>Add pen which draws to TrueType glyphs.<commit_after>
|
from __future__ import print_function, division, absolute_import
from array import array
from fontTools.misc.py23 import *
from fontTools.pens.basePen import AbstractPen
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._g_l_y_f import Glyph
from fontTools.ttLib.tables._g_l_y_f import GlyphComponent
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
__all__ = ["TTGlyphPen"]
class TTGlyphPen(AbstractPen):
"""Pen used for drawing to a TrueType glyph."""
def __init__(self):
self.points = []
self.endPts = []
self.types = []
self.components = []
def _addPoint(self, pt, onCurve):
self.points.append([int(coord) for coord in pt])
self.types.append(onCurve)
def lineTo(self, pt):
self._addPoint(pt, 1)
def moveTo(self, pt):
assert (not self.points) or (self.endPts[-1] == len(self.points) - 1)
self.lineTo(pt)
def qCurveTo(self, *points):
for pt in points[:-1]:
self._addPoint(pt, 0)
self._addPoint(points[-1], 1)
def closePath(self):
endPt = len(self.points) - 1
# ignore anchors
if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1):
self.points.pop()
self.types.pop()
return
self.endPts.append(endPt)
def endPath(self):
# TrueType contours are always "closed"
self.closePath()
def addComponent(self, glyphName, transformation):
component = GlyphComponent()
component.glyphName = glyphName
component.transform = (transformation[:2], transformation[2:4])
component.x, component.y = [int(n) for n in transformation[4:]]
component.flags = 0
self.components.append(component)
def glyph(self):
glyph = Glyph()
glyph.coordinates = GlyphCoordinates(self.points)
glyph.endPtsOfContours = self.endPts
glyph.flags = array("B", self.types)
glyph.components = self.components
# TrueType glyphs can't have both contours and components
if glyph.components:
glyph.numberOfContours = -1
else:
glyph.numberOfContours = len(glyph.endPtsOfContours)
glyph.program = ttProgram.Program()
glyph.program.fromBytecode("")
return glyph
|
Add pen which draws to TrueType glyphs.from __future__ import print_function, division, absolute_import
from array import array
from fontTools.misc.py23 import *
from fontTools.pens.basePen import AbstractPen
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._g_l_y_f import Glyph
from fontTools.ttLib.tables._g_l_y_f import GlyphComponent
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
__all__ = ["TTGlyphPen"]
class TTGlyphPen(AbstractPen):
"""Pen used for drawing to a TrueType glyph."""
def __init__(self):
self.points = []
self.endPts = []
self.types = []
self.components = []
def _addPoint(self, pt, onCurve):
self.points.append([int(coord) for coord in pt])
self.types.append(onCurve)
def lineTo(self, pt):
self._addPoint(pt, 1)
def moveTo(self, pt):
assert (not self.points) or (self.endPts[-1] == len(self.points) - 1)
self.lineTo(pt)
def qCurveTo(self, *points):
for pt in points[:-1]:
self._addPoint(pt, 0)
self._addPoint(points[-1], 1)
def closePath(self):
endPt = len(self.points) - 1
# ignore anchors
if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1):
self.points.pop()
self.types.pop()
return
self.endPts.append(endPt)
def endPath(self):
# TrueType contours are always "closed"
self.closePath()
def addComponent(self, glyphName, transformation):
component = GlyphComponent()
component.glyphName = glyphName
component.transform = (transformation[:2], transformation[2:4])
component.x, component.y = [int(n) for n in transformation[4:]]
component.flags = 0
self.components.append(component)
def glyph(self):
glyph = Glyph()
glyph.coordinates = GlyphCoordinates(self.points)
glyph.endPtsOfContours = self.endPts
glyph.flags = array("B", self.types)
glyph.components = self.components
# TrueType glyphs can't have both contours and components
if glyph.components:
glyph.numberOfContours = -1
else:
glyph.numberOfContours = len(glyph.endPtsOfContours)
glyph.program = ttProgram.Program()
glyph.program.fromBytecode("")
return glyph
|
<commit_before><commit_msg>Add pen which draws to TrueType glyphs.<commit_after>from __future__ import print_function, division, absolute_import
from array import array
from fontTools.misc.py23 import *
from fontTools.pens.basePen import AbstractPen
from fontTools.ttLib.tables import ttProgram
from fontTools.ttLib.tables._g_l_y_f import Glyph
from fontTools.ttLib.tables._g_l_y_f import GlyphComponent
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
__all__ = ["TTGlyphPen"]
class TTGlyphPen(AbstractPen):
"""Pen used for drawing to a TrueType glyph."""
def __init__(self):
self.points = []
self.endPts = []
self.types = []
self.components = []
def _addPoint(self, pt, onCurve):
self.points.append([int(coord) for coord in pt])
self.types.append(onCurve)
def lineTo(self, pt):
self._addPoint(pt, 1)
def moveTo(self, pt):
assert (not self.points) or (self.endPts[-1] == len(self.points) - 1)
self.lineTo(pt)
def qCurveTo(self, *points):
for pt in points[:-1]:
self._addPoint(pt, 0)
self._addPoint(points[-1], 1)
def closePath(self):
endPt = len(self.points) - 1
# ignore anchors
if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1):
self.points.pop()
self.types.pop()
return
self.endPts.append(endPt)
def endPath(self):
# TrueType contours are always "closed"
self.closePath()
def addComponent(self, glyphName, transformation):
component = GlyphComponent()
component.glyphName = glyphName
component.transform = (transformation[:2], transformation[2:4])
component.x, component.y = [int(n) for n in transformation[4:]]
component.flags = 0
self.components.append(component)
def glyph(self):
glyph = Glyph()
glyph.coordinates = GlyphCoordinates(self.points)
glyph.endPtsOfContours = self.endPts
glyph.flags = array("B", self.types)
glyph.components = self.components
# TrueType glyphs can't have both contours and components
if glyph.components:
glyph.numberOfContours = -1
else:
glyph.numberOfContours = len(glyph.endPtsOfContours)
glyph.program = ttProgram.Program()
glyph.program.fromBytecode("")
return glyph
|
|
08447fa344e21d6d704c6f195ad2b7405fa8f916
|
saleor/order/test_order.py
|
saleor/order/test_order.py
|
from .models import Order
def test_total_property():
order = Order(total_net=20, total_tax=5)
assert order.total.gross == 25
assert order.total.tax == 5
assert order.total.net == 20
|
Add test for total property
|
Add test for total property
|
Python
|
bsd-3-clause
|
UITools/saleor,car3oon/saleor,itbabu/saleor,tfroehlich82/saleor,tfroehlich82/saleor,maferelo/saleor,laosunhust/saleor,jreigel/saleor,itbabu/saleor,UITools/saleor,car3oon/saleor,KenMutemi/saleor,maferelo/saleor,HyperManTT/ECommerceSaleor,itbabu/saleor,spartonia/saleor,KenMutemi/saleor,laosunhust/saleor,jreigel/saleor,rodrigozn/CW-Shop,rchav/vinerack,mociepka/saleor,jreigel/saleor,rodrigozn/CW-Shop,KenMutemi/saleor,spartonia/saleor,HyperManTT/ECommerceSaleor,mociepka/saleor,UITools/saleor,maferelo/saleor,tfroehlich82/saleor,car3oon/saleor,rodrigozn/CW-Shop,HyperManTT/ECommerceSaleor,mociepka/saleor,laosunhust/saleor,spartonia/saleor,spartonia/saleor,UITools/saleor,laosunhust/saleor,rchav/vinerack,rchav/vinerack,UITools/saleor
|
Add test for total property
|
from .models import Order
def test_total_property():
order = Order(total_net=20, total_tax=5)
assert order.total.gross == 25
assert order.total.tax == 5
assert order.total.net == 20
|
<commit_before><commit_msg>Add test for total property<commit_after>
|
from .models import Order
def test_total_property():
order = Order(total_net=20, total_tax=5)
assert order.total.gross == 25
assert order.total.tax == 5
assert order.total.net == 20
|
Add test for total propertyfrom .models import Order
def test_total_property():
order = Order(total_net=20, total_tax=5)
assert order.total.gross == 25
assert order.total.tax == 5
assert order.total.net == 20
|
<commit_before><commit_msg>Add test for total property<commit_after>from .models import Order
def test_total_property():
order = Order(total_net=20, total_tax=5)
assert order.total.gross == 25
assert order.total.tax == 5
assert order.total.net == 20
|
|
ace26ab5e713fabd02f4f481956c47640f50b166
|
tempest/tests/lib/services/volume/v2/test_limits_client.py
|
tempest/tests/lib/services/volume/v2/test_limits_client.py
|
# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import limits_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestLimitsClient(base.BaseServiceTest):
FAKE_LIMIT_INFO = {
"limits": {
"rate": [],
"absolute": {
"totalSnapshotsUsed": 0,
"maxTotalBackups": 10,
"maxTotalVolumeGigabytes": 1000,
"maxTotalSnapshots": 10,
"maxTotalBackupGigabytes": 1000,
"totalBackupGigabytesUsed": 0,
"maxTotalVolumes": 10,
"totalVolumesUsed": 0,
"totalBackupsUsed": 0,
"totalGigabytesUsed": 0
}
}
}
def setUp(self):
super(TestLimitsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = limits_client.LimitsClient(fake_auth,
'volume',
'regionOne')
def _test_show_limits(self, bytes_body=False):
self.check_service_client_function(
self.client.show_limits,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIMIT_INFO,
bytes_body)
def test_show_limits_with_str_body(self):
self._test_show_limits()
def test_show_limits_with_bytes_body(self):
self._test_show_limits(bytes_body=True)
|
Add unit test for volume limits client
|
Add unit test for volume limits client
This patch adds unit test for volume v2 limits client.
Partially Implements: blueprint tempest-lib-missing-test-coverage
Change-Id: I08a982758fcdd364e1790e1049d0022f04e4bcc7
|
Python
|
apache-2.0
|
vedujoshi/tempest,openstack/tempest,cisco-openstack/tempest,cisco-openstack/tempest,Juniper/tempest,masayukig/tempest,openstack/tempest,Juniper/tempest,masayukig/tempest,vedujoshi/tempest
|
Add unit test for volume limits client
This patch adds unit test for volume v2 limits client.
Partially Implements: blueprint tempest-lib-missing-test-coverage
Change-Id: I08a982758fcdd364e1790e1049d0022f04e4bcc7
|
# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import limits_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestLimitsClient(base.BaseServiceTest):
FAKE_LIMIT_INFO = {
"limits": {
"rate": [],
"absolute": {
"totalSnapshotsUsed": 0,
"maxTotalBackups": 10,
"maxTotalVolumeGigabytes": 1000,
"maxTotalSnapshots": 10,
"maxTotalBackupGigabytes": 1000,
"totalBackupGigabytesUsed": 0,
"maxTotalVolumes": 10,
"totalVolumesUsed": 0,
"totalBackupsUsed": 0,
"totalGigabytesUsed": 0
}
}
}
def setUp(self):
super(TestLimitsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = limits_client.LimitsClient(fake_auth,
'volume',
'regionOne')
def _test_show_limits(self, bytes_body=False):
self.check_service_client_function(
self.client.show_limits,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIMIT_INFO,
bytes_body)
def test_show_limits_with_str_body(self):
self._test_show_limits()
def test_show_limits_with_bytes_body(self):
self._test_show_limits(bytes_body=True)
|
<commit_before><commit_msg>Add unit test for volume limits client
This patch adds unit test for volume v2 limits client.
Partially Implements: blueprint tempest-lib-missing-test-coverage
Change-Id: I08a982758fcdd364e1790e1049d0022f04e4bcc7<commit_after>
|
# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import limits_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestLimitsClient(base.BaseServiceTest):
FAKE_LIMIT_INFO = {
"limits": {
"rate": [],
"absolute": {
"totalSnapshotsUsed": 0,
"maxTotalBackups": 10,
"maxTotalVolumeGigabytes": 1000,
"maxTotalSnapshots": 10,
"maxTotalBackupGigabytes": 1000,
"totalBackupGigabytesUsed": 0,
"maxTotalVolumes": 10,
"totalVolumesUsed": 0,
"totalBackupsUsed": 0,
"totalGigabytesUsed": 0
}
}
}
def setUp(self):
super(TestLimitsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = limits_client.LimitsClient(fake_auth,
'volume',
'regionOne')
def _test_show_limits(self, bytes_body=False):
self.check_service_client_function(
self.client.show_limits,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIMIT_INFO,
bytes_body)
def test_show_limits_with_str_body(self):
self._test_show_limits()
def test_show_limits_with_bytes_body(self):
self._test_show_limits(bytes_body=True)
|
Add unit test for volume limits client
This patch adds unit test for volume v2 limits client.
Partially Implements: blueprint tempest-lib-missing-test-coverage
Change-Id: I08a982758fcdd364e1790e1049d0022f04e4bcc7# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import limits_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestLimitsClient(base.BaseServiceTest):
FAKE_LIMIT_INFO = {
"limits": {
"rate": [],
"absolute": {
"totalSnapshotsUsed": 0,
"maxTotalBackups": 10,
"maxTotalVolumeGigabytes": 1000,
"maxTotalSnapshots": 10,
"maxTotalBackupGigabytes": 1000,
"totalBackupGigabytesUsed": 0,
"maxTotalVolumes": 10,
"totalVolumesUsed": 0,
"totalBackupsUsed": 0,
"totalGigabytesUsed": 0
}
}
}
def setUp(self):
super(TestLimitsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = limits_client.LimitsClient(fake_auth,
'volume',
'regionOne')
def _test_show_limits(self, bytes_body=False):
self.check_service_client_function(
self.client.show_limits,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIMIT_INFO,
bytes_body)
def test_show_limits_with_str_body(self):
self._test_show_limits()
def test_show_limits_with_bytes_body(self):
self._test_show_limits(bytes_body=True)
|
<commit_before><commit_msg>Add unit test for volume limits client
This patch adds unit test for volume v2 limits client.
Partially Implements: blueprint tempest-lib-missing-test-coverage
Change-Id: I08a982758fcdd364e1790e1049d0022f04e4bcc7<commit_after># Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import limits_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestLimitsClient(base.BaseServiceTest):
FAKE_LIMIT_INFO = {
"limits": {
"rate": [],
"absolute": {
"totalSnapshotsUsed": 0,
"maxTotalBackups": 10,
"maxTotalVolumeGigabytes": 1000,
"maxTotalSnapshots": 10,
"maxTotalBackupGigabytes": 1000,
"totalBackupGigabytesUsed": 0,
"maxTotalVolumes": 10,
"totalVolumesUsed": 0,
"totalBackupsUsed": 0,
"totalGigabytesUsed": 0
}
}
}
def setUp(self):
super(TestLimitsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = limits_client.LimitsClient(fake_auth,
'volume',
'regionOne')
def _test_show_limits(self, bytes_body=False):
self.check_service_client_function(
self.client.show_limits,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIMIT_INFO,
bytes_body)
def test_show_limits_with_str_body(self):
self._test_show_limits()
def test_show_limits_with_bytes_body(self):
self._test_show_limits(bytes_body=True)
|
|
e4b9c43d53121d2b21c4b864fcc74674b0b6dfc1
|
scratchpad/Interpolator.py
|
scratchpad/Interpolator.py
|
class Interpolator:
def __init__(self):
self.data = []
def addIndexValue(self, index, value):
self.data.append((index, value))
def valueAtIndex(self, target_index):
if target_index < self.data[0][0]:
return None
elif self.data[-1][0] < target_index:
return None
else:
start = None
end = None
for (index, value) in self.data:
if index == target_index:
return value
else:
if index <= target_index:
start = (index, value)
elif target_index < index:
end = (index, value)
break
index_delta = end[0] - start[0]
percent = (target_index - start[0]) / index_delta
value_delta = end[1] - start[1]
return start[1] + value_delta * percent
|
Create class to interpolate values between indexes
|
Create class to interpolate values between indexes
|
Python
|
mit
|
thelonious/g2x,gizmo-cda/g2x,gizmo-cda/g2x,thelonious/g2x,gizmo-cda/g2x,gizmo-cda/g2x
|
Create class to interpolate values between indexes
|
class Interpolator:
def __init__(self):
self.data = []
def addIndexValue(self, index, value):
self.data.append((index, value))
def valueAtIndex(self, target_index):
if target_index < self.data[0][0]:
return None
elif self.data[-1][0] < target_index:
return None
else:
start = None
end = None
for (index, value) in self.data:
if index == target_index:
return value
else:
if index <= target_index:
start = (index, value)
elif target_index < index:
end = (index, value)
break
index_delta = end[0] - start[0]
percent = (target_index - start[0]) / index_delta
value_delta = end[1] - start[1]
return start[1] + value_delta * percent
|
<commit_before><commit_msg>Create class to interpolate values between indexes<commit_after>
|
class Interpolator:
def __init__(self):
self.data = []
def addIndexValue(self, index, value):
self.data.append((index, value))
def valueAtIndex(self, target_index):
if target_index < self.data[0][0]:
return None
elif self.data[-1][0] < target_index:
return None
else:
start = None
end = None
for (index, value) in self.data:
if index == target_index:
return value
else:
if index <= target_index:
start = (index, value)
elif target_index < index:
end = (index, value)
break
index_delta = end[0] - start[0]
percent = (target_index - start[0]) / index_delta
value_delta = end[1] - start[1]
return start[1] + value_delta * percent
|
Create class to interpolate values between indexesclass Interpolator:
def __init__(self):
self.data = []
def addIndexValue(self, index, value):
self.data.append((index, value))
def valueAtIndex(self, target_index):
if target_index < self.data[0][0]:
return None
elif self.data[-1][0] < target_index:
return None
else:
start = None
end = None
for (index, value) in self.data:
if index == target_index:
return value
else:
if index <= target_index:
start = (index, value)
elif target_index < index:
end = (index, value)
break
index_delta = end[0] - start[0]
percent = (target_index - start[0]) / index_delta
value_delta = end[1] - start[1]
return start[1] + value_delta * percent
|
<commit_before><commit_msg>Create class to interpolate values between indexes<commit_after>class Interpolator:
def __init__(self):
self.data = []
def addIndexValue(self, index, value):
self.data.append((index, value))
def valueAtIndex(self, target_index):
if target_index < self.data[0][0]:
return None
elif self.data[-1][0] < target_index:
return None
else:
start = None
end = None
for (index, value) in self.data:
if index == target_index:
return value
else:
if index <= target_index:
start = (index, value)
elif target_index < index:
end = (index, value)
break
index_delta = end[0] - start[0]
percent = (target_index - start[0]) / index_delta
value_delta = end[1] - start[1]
return start[1] + value_delta * percent
|
|
36781fb1b04a3d2fd3162ea88969244faab22a60
|
open511/utils/postgis.py
|
open511/utils/postgis.py
|
from django.db import connection
def gml_to_ewkt(gml_string, force_2D=False):
cursor = connection.cursor()
if force_2D:
sql = 'SELECT ST_AsEWKT(ST_Force_2D(ST_GeomFromGML(%s)))'
else:
sql = 'SELECT ST_AsEWKT(ST_GeomFromGML(%s))'
cursor.execute(sql, [gml_string])
return cursor.fetchone()[0]
|
Convert GML to EWKT, via PostGIS
|
Convert GML to EWKT, via PostGIS
|
Python
|
mit
|
Open511/open511-server,Open511/open511-server,Open511/open511-server
|
Convert GML to EWKT, via PostGIS
|
from django.db import connection
def gml_to_ewkt(gml_string, force_2D=False):
cursor = connection.cursor()
if force_2D:
sql = 'SELECT ST_AsEWKT(ST_Force_2D(ST_GeomFromGML(%s)))'
else:
sql = 'SELECT ST_AsEWKT(ST_GeomFromGML(%s))'
cursor.execute(sql, [gml_string])
return cursor.fetchone()[0]
|
<commit_before><commit_msg>Convert GML to EWKT, via PostGIS<commit_after>
|
from django.db import connection
def gml_to_ewkt(gml_string, force_2D=False):
cursor = connection.cursor()
if force_2D:
sql = 'SELECT ST_AsEWKT(ST_Force_2D(ST_GeomFromGML(%s)))'
else:
sql = 'SELECT ST_AsEWKT(ST_GeomFromGML(%s))'
cursor.execute(sql, [gml_string])
return cursor.fetchone()[0]
|
Convert GML to EWKT, via PostGISfrom django.db import connection
def gml_to_ewkt(gml_string, force_2D=False):
cursor = connection.cursor()
if force_2D:
sql = 'SELECT ST_AsEWKT(ST_Force_2D(ST_GeomFromGML(%s)))'
else:
sql = 'SELECT ST_AsEWKT(ST_GeomFromGML(%s))'
cursor.execute(sql, [gml_string])
return cursor.fetchone()[0]
|
<commit_before><commit_msg>Convert GML to EWKT, via PostGIS<commit_after>from django.db import connection
def gml_to_ewkt(gml_string, force_2D=False):
cursor = connection.cursor()
if force_2D:
sql = 'SELECT ST_AsEWKT(ST_Force_2D(ST_GeomFromGML(%s)))'
else:
sql = 'SELECT ST_AsEWKT(ST_GeomFromGML(%s))'
cursor.execute(sql, [gml_string])
return cursor.fetchone()[0]
|
|
fcc92760db0d1dc56aca70aff69b34a29c9e8e6c
|
tests/test_util.py
|
tests/test_util.py
|
from lib import util
def test_cachedproperty():
class Target:
def __init__(self):
self.call_count = 0
@util.cachedproperty
def prop(self):
self.call_count += 1
return self.call_count
t = Target()
assert t.prop == t.prop == 1
def test_deep_getsizeof():
int_t = util.deep_getsizeof(1)
assert util.deep_getsizeof([1, 1]) > 2 * int_t
assert util.deep_getsizeof({1: 1}) > 2 * int_t
assert util.deep_getsizeof({1: {1: 1}}) > 3 * int_t
class Base:
pass
class A(Base):
pass
class B(Base):
pass
def test_subclasses():
assert util.subclasses(Base) == [A, B]
def test_chunks():
assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00'
|
Add unit tests for the methods in util
|
Add unit tests for the methods in util
|
Python
|
mit
|
shsmith/electrumx,thelazier/electrumx,Groestlcoin/electrumx-grs,erasmospunk/electrumx,shsmith/electrumx,bauerj/electrumx,thelazier/electrumx,erasmospunk/electrumx,Crowndev/electrumx,Groestlcoin/electrumx-grs,bauerj/electrumx,Crowndev/electrumx
|
Add unit tests for the methods in util
|
from lib import util
def test_cachedproperty():
class Target:
def __init__(self):
self.call_count = 0
@util.cachedproperty
def prop(self):
self.call_count += 1
return self.call_count
t = Target()
assert t.prop == t.prop == 1
def test_deep_getsizeof():
int_t = util.deep_getsizeof(1)
assert util.deep_getsizeof([1, 1]) > 2 * int_t
assert util.deep_getsizeof({1: 1}) > 2 * int_t
assert util.deep_getsizeof({1: {1: 1}}) > 3 * int_t
class Base:
pass
class A(Base):
pass
class B(Base):
pass
def test_subclasses():
assert util.subclasses(Base) == [A, B]
def test_chunks():
assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00'
|
<commit_before><commit_msg>Add unit tests for the methods in util<commit_after>
|
from lib import util
def test_cachedproperty():
class Target:
def __init__(self):
self.call_count = 0
@util.cachedproperty
def prop(self):
self.call_count += 1
return self.call_count
t = Target()
assert t.prop == t.prop == 1
def test_deep_getsizeof():
int_t = util.deep_getsizeof(1)
assert util.deep_getsizeof([1, 1]) > 2 * int_t
assert util.deep_getsizeof({1: 1}) > 2 * int_t
assert util.deep_getsizeof({1: {1: 1}}) > 3 * int_t
class Base:
pass
class A(Base):
pass
class B(Base):
pass
def test_subclasses():
assert util.subclasses(Base) == [A, B]
def test_chunks():
assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00'
|
Add unit tests for the methods in utilfrom lib import util
def test_cachedproperty():
class Target:
def __init__(self):
self.call_count = 0
@util.cachedproperty
def prop(self):
self.call_count += 1
return self.call_count
t = Target()
assert t.prop == t.prop == 1
def test_deep_getsizeof():
int_t = util.deep_getsizeof(1)
assert util.deep_getsizeof([1, 1]) > 2 * int_t
assert util.deep_getsizeof({1: 1}) > 2 * int_t
assert util.deep_getsizeof({1: {1: 1}}) > 3 * int_t
class Base:
pass
class A(Base):
pass
class B(Base):
pass
def test_subclasses():
assert util.subclasses(Base) == [A, B]
def test_chunks():
assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00'
|
<commit_before><commit_msg>Add unit tests for the methods in util<commit_after>from lib import util
def test_cachedproperty():
class Target:
def __init__(self):
self.call_count = 0
@util.cachedproperty
def prop(self):
self.call_count += 1
return self.call_count
t = Target()
assert t.prop == t.prop == 1
def test_deep_getsizeof():
int_t = util.deep_getsizeof(1)
assert util.deep_getsizeof([1, 1]) > 2 * int_t
assert util.deep_getsizeof({1: 1}) > 2 * int_t
assert util.deep_getsizeof({1: {1: 1}}) > 3 * int_t
class Base:
pass
class A(Base):
pass
class B(Base):
pass
def test_subclasses():
assert util.subclasses(Base) == [A, B]
def test_chunks():
assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') == b'\x01\x00\x00'
|
|
6e805995a165f923c1c4f71c163c64a245f9a3d5
|
setup.py
|
setup.py
|
from distutils.core import setup
setup(name='dimreducer',
version='1.0',
description='Dimension reduction methods',
py_modules=['dimreducer'],
)
setup(name='multiphenotype_utils',
version='1.0',
description='Utility functions for all methods',
py_modules=['multiphenotype_utils'],
)
setup(name='general_autoencoder',
version='1.0',
description='Autoencoder base class',
py_modules=['general_autoencoder'],
)
setup(name='standard_autoencoder',
version='1.0',
description='Standard autoencoder',
py_modules=['standard_autoencoder'],
)
setup(name='variational_autoencoder',
version='1.0',
description='VAE',
py_modules=['variational_autoencoder'],
)
setup(name='variational_age_autoencoder',
version='1.0',
description='VAE with age',
py_modules=['variational_age_autoencoder'],
)
|
Add simple distutils script for modules
|
Add simple distutils script for modules
|
Python
|
mit
|
epierson9/multiphenotype_methods
|
Add simple distutils script for modules
|
from distutils.core import setup
setup(name='dimreducer',
version='1.0',
description='Dimension reduction methods',
py_modules=['dimreducer'],
)
setup(name='multiphenotype_utils',
version='1.0',
description='Utility functions for all methods',
py_modules=['multiphenotype_utils'],
)
setup(name='general_autoencoder',
version='1.0',
description='Autoencoder base class',
py_modules=['general_autoencoder'],
)
setup(name='standard_autoencoder',
version='1.0',
description='Standard autoencoder',
py_modules=['standard_autoencoder'],
)
setup(name='variational_autoencoder',
version='1.0',
description='VAE',
py_modules=['variational_autoencoder'],
)
setup(name='variational_age_autoencoder',
version='1.0',
description='VAE with age',
py_modules=['variational_age_autoencoder'],
)
|
<commit_before><commit_msg>Add simple distutils script for modules<commit_after>
|
from distutils.core import setup
setup(name='dimreducer',
version='1.0',
description='Dimension reduction methods',
py_modules=['dimreducer'],
)
setup(name='multiphenotype_utils',
version='1.0',
description='Utility functions for all methods',
py_modules=['multiphenotype_utils'],
)
setup(name='general_autoencoder',
version='1.0',
description='Autoencoder base class',
py_modules=['general_autoencoder'],
)
setup(name='standard_autoencoder',
version='1.0',
description='Standard autoencoder',
py_modules=['standard_autoencoder'],
)
setup(name='variational_autoencoder',
version='1.0',
description='VAE',
py_modules=['variational_autoencoder'],
)
setup(name='variational_age_autoencoder',
version='1.0',
description='VAE with age',
py_modules=['variational_age_autoencoder'],
)
|
Add simple distutils script for modulesfrom distutils.core import setup
setup(name='dimreducer',
version='1.0',
description='Dimension reduction methods',
py_modules=['dimreducer'],
)
setup(name='multiphenotype_utils',
version='1.0',
description='Utility functions for all methods',
py_modules=['multiphenotype_utils'],
)
setup(name='general_autoencoder',
version='1.0',
description='Autoencoder base class',
py_modules=['general_autoencoder'],
)
setup(name='standard_autoencoder',
version='1.0',
description='Standard autoencoder',
py_modules=['standard_autoencoder'],
)
setup(name='variational_autoencoder',
version='1.0',
description='VAE',
py_modules=['variational_autoencoder'],
)
setup(name='variational_age_autoencoder',
version='1.0',
description='VAE with age',
py_modules=['variational_age_autoencoder'],
)
|
<commit_before><commit_msg>Add simple distutils script for modules<commit_after>from distutils.core import setup
setup(name='dimreducer',
version='1.0',
description='Dimension reduction methods',
py_modules=['dimreducer'],
)
setup(name='multiphenotype_utils',
version='1.0',
description='Utility functions for all methods',
py_modules=['multiphenotype_utils'],
)
setup(name='general_autoencoder',
version='1.0',
description='Autoencoder base class',
py_modules=['general_autoencoder'],
)
setup(name='standard_autoencoder',
version='1.0',
description='Standard autoencoder',
py_modules=['standard_autoencoder'],
)
setup(name='variational_autoencoder',
version='1.0',
description='VAE',
py_modules=['variational_autoencoder'],
)
setup(name='variational_age_autoencoder',
version='1.0',
description='VAE with age',
py_modules=['variational_age_autoencoder'],
)
|
|
c6c6594cda35aaa15f1efb9f336548671b0028c5
|
rmake/lib/twisted_extras/tools.py
|
rmake/lib/twisted_extras/tools.py
|
#
# Copyright (c) rPath, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from twisted.internet import defer
class Serializer(object):
def __init__(self):
self._lock = defer.DeferredLock()
self._waiting = {}
def call(self, func, collapsible=False):
d = self._lock.acquire()
self._waiting[d] = collapsible
@d.addCallback
def _locked(_):
if collapsible and len(self._waiting) > 1:
# Superseded
return
return func()
@d.addBoth
def _unlock(result):
self._lock.release()
del self._waiting[d]
return result
return d
|
Add generic serializer tool for plugins to use
|
Add generic serializer tool for plugins to use
|
Python
|
apache-2.0
|
sassoftware/rmake3,sassoftware/rmake3,sassoftware/rmake3
|
Add generic serializer tool for plugins to use
|
#
# Copyright (c) rPath, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from twisted.internet import defer
class Serializer(object):
def __init__(self):
self._lock = defer.DeferredLock()
self._waiting = {}
def call(self, func, collapsible=False):
d = self._lock.acquire()
self._waiting[d] = collapsible
@d.addCallback
def _locked(_):
if collapsible and len(self._waiting) > 1:
# Superseded
return
return func()
@d.addBoth
def _unlock(result):
self._lock.release()
del self._waiting[d]
return result
return d
|
<commit_before><commit_msg>Add generic serializer tool for plugins to use<commit_after>
|
#
# Copyright (c) rPath, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from twisted.internet import defer
class Serializer(object):
def __init__(self):
self._lock = defer.DeferredLock()
self._waiting = {}
def call(self, func, collapsible=False):
d = self._lock.acquire()
self._waiting[d] = collapsible
@d.addCallback
def _locked(_):
if collapsible and len(self._waiting) > 1:
# Superseded
return
return func()
@d.addBoth
def _unlock(result):
self._lock.release()
del self._waiting[d]
return result
return d
|
Add generic serializer tool for plugins to use#
# Copyright (c) rPath, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from twisted.internet import defer
class Serializer(object):
def __init__(self):
self._lock = defer.DeferredLock()
self._waiting = {}
def call(self, func, collapsible=False):
d = self._lock.acquire()
self._waiting[d] = collapsible
@d.addCallback
def _locked(_):
if collapsible and len(self._waiting) > 1:
# Superseded
return
return func()
@d.addBoth
def _unlock(result):
self._lock.release()
del self._waiting[d]
return result
return d
|
<commit_before><commit_msg>Add generic serializer tool for plugins to use<commit_after>#
# Copyright (c) rPath, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from twisted.internet import defer
class Serializer(object):
def __init__(self):
self._lock = defer.DeferredLock()
self._waiting = {}
def call(self, func, collapsible=False):
d = self._lock.acquire()
self._waiting[d] = collapsible
@d.addCallback
def _locked(_):
if collapsible and len(self._waiting) > 1:
# Superseded
return
return func()
@d.addBoth
def _unlock(result):
self._lock.release()
del self._waiting[d]
return result
return d
|
|
1d25676049994db266129b1a1c98cec3acbba0ca
|
goodtablesio/models/subscription.py
|
goodtablesio/models/subscription.py
|
import logging
import datetime
from sqlalchemy import (
Column, Unicode, DateTime, Boolean, ForeignKey)
from sqlalchemy.orm import relationship
from goodtablesio.models.base import Base, BaseModelMixin, make_uuid
log = logging.getLogger(__name__)
class Subscription(Base, BaseModelMixin):
__tablename__ = 'subscriptions'
id = Column(Unicode, primary_key=True, default=make_uuid)
plan_id = Column(Unicode, ForeignKey('plans.id'))
user_id = Column(Unicode, ForeignKey('users.id'))
active = Column(Boolean, default=True)
started = Column(DateTime(timezone=True), default=datetime.datetime.utcnow)
expires = Column(DateTime(timezone=True))
finished = Column(DateTime(timezone=True))
plan = relationship(
'Plan', primaryjoin='Subscription.plan_id == Plan.id')
|
Add missing file on last merge
|
Add missing file on last merge
|
Python
|
agpl-3.0
|
frictionlessdata/goodtables.io,frictionlessdata/goodtables.io,frictionlessdata/goodtables.io,frictionlessdata/goodtables.io
|
Add missing file on last merge
|
import logging
import datetime
from sqlalchemy import (
Column, Unicode, DateTime, Boolean, ForeignKey)
from sqlalchemy.orm import relationship
from goodtablesio.models.base import Base, BaseModelMixin, make_uuid
log = logging.getLogger(__name__)
class Subscription(Base, BaseModelMixin):
__tablename__ = 'subscriptions'
id = Column(Unicode, primary_key=True, default=make_uuid)
plan_id = Column(Unicode, ForeignKey('plans.id'))
user_id = Column(Unicode, ForeignKey('users.id'))
active = Column(Boolean, default=True)
started = Column(DateTime(timezone=True), default=datetime.datetime.utcnow)
expires = Column(DateTime(timezone=True))
finished = Column(DateTime(timezone=True))
plan = relationship(
'Plan', primaryjoin='Subscription.plan_id == Plan.id')
|
<commit_before><commit_msg>Add missing file on last merge<commit_after>
|
import logging
import datetime
from sqlalchemy import (
Column, Unicode, DateTime, Boolean, ForeignKey)
from sqlalchemy.orm import relationship
from goodtablesio.models.base import Base, BaseModelMixin, make_uuid
log = logging.getLogger(__name__)
class Subscription(Base, BaseModelMixin):
__tablename__ = 'subscriptions'
id = Column(Unicode, primary_key=True, default=make_uuid)
plan_id = Column(Unicode, ForeignKey('plans.id'))
user_id = Column(Unicode, ForeignKey('users.id'))
active = Column(Boolean, default=True)
started = Column(DateTime(timezone=True), default=datetime.datetime.utcnow)
expires = Column(DateTime(timezone=True))
finished = Column(DateTime(timezone=True))
plan = relationship(
'Plan', primaryjoin='Subscription.plan_id == Plan.id')
|
Add missing file on last mergeimport logging
import datetime
from sqlalchemy import (
Column, Unicode, DateTime, Boolean, ForeignKey)
from sqlalchemy.orm import relationship
from goodtablesio.models.base import Base, BaseModelMixin, make_uuid
log = logging.getLogger(__name__)
class Subscription(Base, BaseModelMixin):
__tablename__ = 'subscriptions'
id = Column(Unicode, primary_key=True, default=make_uuid)
plan_id = Column(Unicode, ForeignKey('plans.id'))
user_id = Column(Unicode, ForeignKey('users.id'))
active = Column(Boolean, default=True)
started = Column(DateTime(timezone=True), default=datetime.datetime.utcnow)
expires = Column(DateTime(timezone=True))
finished = Column(DateTime(timezone=True))
plan = relationship(
'Plan', primaryjoin='Subscription.plan_id == Plan.id')
|
<commit_before><commit_msg>Add missing file on last merge<commit_after>import logging
import datetime
from sqlalchemy import (
Column, Unicode, DateTime, Boolean, ForeignKey)
from sqlalchemy.orm import relationship
from goodtablesio.models.base import Base, BaseModelMixin, make_uuid
log = logging.getLogger(__name__)
class Subscription(Base, BaseModelMixin):
__tablename__ = 'subscriptions'
id = Column(Unicode, primary_key=True, default=make_uuid)
plan_id = Column(Unicode, ForeignKey('plans.id'))
user_id = Column(Unicode, ForeignKey('users.id'))
active = Column(Boolean, default=True)
started = Column(DateTime(timezone=True), default=datetime.datetime.utcnow)
expires = Column(DateTime(timezone=True))
finished = Column(DateTime(timezone=True))
plan = relationship(
'Plan', primaryjoin='Subscription.plan_id == Plan.id')
|
|
5f051f2ae1b105d6cc58d1cac760cb5d20908c3b
|
valai/translate.py
|
valai/translate.py
|
# * coding: utf8 *
#
# (C) 2020 Muthiah Annamalai <ezhillang@gmail.com>
#
# Uses the IIT-Bombay service on the web.
#
import json
import requests
from urllib.parse import quote
from functools import lru_cache
@lru_cache(1024,str)
def en2ta(text):
"""translate from English to Tamil"""
return IITB_translator('en', 'ta', text)
@lru_cache(1024,str)
def ta2en(text):
"""translate from Tamil to English"""
return IITB_translator('ta','en',text)
def IITB_translator(src_lang,dest_lang,_text):
text = quote(_text)
URLFMT = 'http://www.cfilt.iitb.ac.in/indicnlpweb/indicnlpws/translate/{0}/{1}/{2}/'
url = URLFMT.format(src_lang.lower(),dest_lang.lower(),text)
response = requests.get(url)
return response.json()[dest_lang.lower()]
if __name__ == "__main__":
print(ta2en('கவிதை மிக அழகாக இருக்கிறது'))
print(en2ta('world is not flat'))
|
Support rudimentary translation service from IIT Bombay via web API.
|
Support rudimentary translation service from IIT Bombay
via web API.
me: கவிதை மிக அழகாக இருக்கிறது
it: கவிதை of the most beautiful
me: good morning
it: நல்ல காலை
me: world is not flat
it: உலக சமதளமான கிடைக்கிறது.
|
Python
|
mit
|
Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil,arcturusannamalai/open-tamil,arcturusannamalai/open-tamil,arcturusannamalai/open-tamil,arcturusannamalai/open-tamil,Ezhil-Language-Foundation/open-tamil,arcturusannamalai/open-tamil,arcturusannamalai/open-tamil,Ezhil-Language-Foundation/open-tamil,arcturusannamalai/open-tamil,Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil,Ezhil-Language-Foundation/open-tamil
|
Support rudimentary translation service from IIT Bombay
via web API.
me: கவிதை மிக அழகாக இருக்கிறது
it: கவிதை of the most beautiful
me: good morning
it: நல்ல காலை
me: world is not flat
it: உலக சமதளமான கிடைக்கிறது.
|
# * coding: utf8 *
#
# (C) 2020 Muthiah Annamalai <ezhillang@gmail.com>
#
# Uses the IIT-Bombay service on the web.
#
import json
import requests
from urllib.parse import quote
from functools import lru_cache
@lru_cache(1024,str)
def en2ta(text):
"""translate from English to Tamil"""
return IITB_translator('en', 'ta', text)
@lru_cache(1024,str)
def ta2en(text):
"""translate from Tamil to English"""
return IITB_translator('ta','en',text)
def IITB_translator(src_lang,dest_lang,_text):
text = quote(_text)
URLFMT = 'http://www.cfilt.iitb.ac.in/indicnlpweb/indicnlpws/translate/{0}/{1}/{2}/'
url = URLFMT.format(src_lang.lower(),dest_lang.lower(),text)
response = requests.get(url)
return response.json()[dest_lang.lower()]
if __name__ == "__main__":
print(ta2en('கவிதை மிக அழகாக இருக்கிறது'))
print(en2ta('world is not flat'))
|
<commit_before><commit_msg>Support rudimentary translation service from IIT Bombay
via web API.
me: கவிதை மிக அழகாக இருக்கிறது
it: கவிதை of the most beautiful
me: good morning
it: நல்ல காலை
me: world is not flat
it: உலக சமதளமான கிடைக்கிறது.<commit_after>
|
# * coding: utf8 *
#
# (C) 2020 Muthiah Annamalai <ezhillang@gmail.com>
#
# Uses the IIT-Bombay service on the web.
#
import json
import requests
from urllib.parse import quote
from functools import lru_cache
@lru_cache(1024,str)
def en2ta(text):
"""translate from English to Tamil"""
return IITB_translator('en', 'ta', text)
@lru_cache(1024,str)
def ta2en(text):
"""translate from Tamil to English"""
return IITB_translator('ta','en',text)
def IITB_translator(src_lang,dest_lang,_text):
text = quote(_text)
URLFMT = 'http://www.cfilt.iitb.ac.in/indicnlpweb/indicnlpws/translate/{0}/{1}/{2}/'
url = URLFMT.format(src_lang.lower(),dest_lang.lower(),text)
response = requests.get(url)
return response.json()[dest_lang.lower()]
if __name__ == "__main__":
print(ta2en('கவிதை மிக அழகாக இருக்கிறது'))
print(en2ta('world is not flat'))
|
Support rudimentary translation service from IIT Bombay
via web API.
me: கவிதை மிக அழகாக இருக்கிறது
it: கவிதை of the most beautiful
me: good morning
it: நல்ல காலை
me: world is not flat
it: உலக சமதளமான கிடைக்கிறது.# * coding: utf8 *
#
# (C) 2020 Muthiah Annamalai <ezhillang@gmail.com>
#
# Uses the IIT-Bombay service on the web.
#
import json
import requests
from urllib.parse import quote
from functools import lru_cache
@lru_cache(1024,str)
def en2ta(text):
"""translate from English to Tamil"""
return IITB_translator('en', 'ta', text)
@lru_cache(1024,str)
def ta2en(text):
"""translate from Tamil to English"""
return IITB_translator('ta','en',text)
def IITB_translator(src_lang,dest_lang,_text):
text = quote(_text)
URLFMT = 'http://www.cfilt.iitb.ac.in/indicnlpweb/indicnlpws/translate/{0}/{1}/{2}/'
url = URLFMT.format(src_lang.lower(),dest_lang.lower(),text)
response = requests.get(url)
return response.json()[dest_lang.lower()]
if __name__ == "__main__":
print(ta2en('கவிதை மிக அழகாக இருக்கிறது'))
print(en2ta('world is not flat'))
|
<commit_before><commit_msg>Support rudimentary translation service from IIT Bombay
via web API.
me: கவிதை மிக அழகாக இருக்கிறது
it: கவிதை of the most beautiful
me: good morning
it: நல்ல காலை
me: world is not flat
it: உலக சமதளமான கிடைக்கிறது.<commit_after># * coding: utf8 *
#
# (C) 2020 Muthiah Annamalai <ezhillang@gmail.com>
#
# Uses the IIT-Bombay service on the web.
#
import json
import requests
from urllib.parse import quote
from functools import lru_cache
@lru_cache(1024,str)
def en2ta(text):
"""translate from English to Tamil"""
return IITB_translator('en', 'ta', text)
@lru_cache(1024,str)
def ta2en(text):
"""translate from Tamil to English"""
return IITB_translator('ta','en',text)
def IITB_translator(src_lang,dest_lang,_text):
text = quote(_text)
URLFMT = 'http://www.cfilt.iitb.ac.in/indicnlpweb/indicnlpws/translate/{0}/{1}/{2}/'
url = URLFMT.format(src_lang.lower(),dest_lang.lower(),text)
response = requests.get(url)
return response.json()[dest_lang.lower()]
if __name__ == "__main__":
print(ta2en('கவிதை மிக அழகாக இருக்கிறது'))
print(en2ta('world is not flat'))
|
|
27ed68923579c5afff0c70b025deb8b73d448aa8
|
indicators/migrations/0013_set_all_calculation_type_to_numeric.py
|
indicators/migrations/0013_set_all_calculation_type_to_numeric.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-07-04 09:56
from __future__ import unicode_literals
from django.db import migrations
from ..models import Indicator
def set_calculation_type(apps, schema_editor):
Indicator.objects.all().update(
calculation_type=Indicator.CALC_TYPE_NUMERIC)
class Migration(migrations.Migration):
dependencies = [
('indicators', '0012_auto_20180704_0256'),
]
operations = [
migrations.RunPython(set_calculation_type),
]
|
Set calculation type of all indicators to Number
|
Set calculation type of all indicators to Number
|
Python
|
apache-2.0
|
toladata/TolaActivity,toladata/TolaActivity,toladata/TolaActivity,toladata/TolaActivity
|
Set calculation type of all indicators to Number
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-07-04 09:56
from __future__ import unicode_literals
from django.db import migrations
from ..models import Indicator
def set_calculation_type(apps, schema_editor):
Indicator.objects.all().update(
calculation_type=Indicator.CALC_TYPE_NUMERIC)
class Migration(migrations.Migration):
dependencies = [
('indicators', '0012_auto_20180704_0256'),
]
operations = [
migrations.RunPython(set_calculation_type),
]
|
<commit_before><commit_msg>Set calculation type of all indicators to Number<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-07-04 09:56
from __future__ import unicode_literals
from django.db import migrations
from ..models import Indicator
def set_calculation_type(apps, schema_editor):
Indicator.objects.all().update(
calculation_type=Indicator.CALC_TYPE_NUMERIC)
class Migration(migrations.Migration):
dependencies = [
('indicators', '0012_auto_20180704_0256'),
]
operations = [
migrations.RunPython(set_calculation_type),
]
|
Set calculation type of all indicators to Number# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-07-04 09:56
from __future__ import unicode_literals
from django.db import migrations
from ..models import Indicator
def set_calculation_type(apps, schema_editor):
Indicator.objects.all().update(
calculation_type=Indicator.CALC_TYPE_NUMERIC)
class Migration(migrations.Migration):
dependencies = [
('indicators', '0012_auto_20180704_0256'),
]
operations = [
migrations.RunPython(set_calculation_type),
]
|
<commit_before><commit_msg>Set calculation type of all indicators to Number<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-07-04 09:56
from __future__ import unicode_literals
from django.db import migrations
from ..models import Indicator
def set_calculation_type(apps, schema_editor):
Indicator.objects.all().update(
calculation_type=Indicator.CALC_TYPE_NUMERIC)
class Migration(migrations.Migration):
dependencies = [
('indicators', '0012_auto_20180704_0256'),
]
operations = [
migrations.RunPython(set_calculation_type),
]
|
|
834516acf7b5cfbbb0f728f8b725bea120b5f5b3
|
post_receive.py
|
post_receive.py
|
import re
import os
import sys
import os
import json
from subprocess import Popen, PIPE
from httplib2 import Http
postURL = "http://localhost:2069/json"
pwd = os.getcwd()
if len(sys.argv) <= 3:
print("Usage: post-receive [old] [new] [ref]")
exit()
old, new, ref = sys.argv[1:4]
m = re.match(r"^.*/([^/]+)$", pwd)
if not m:
print("Could not figure out which project this is :(", project)
exit()
project = m.group(1)
print("Posting commit message for project " + project)
process = Popen(["git", "show", "--name-only", new], stdout=PIPE)
#process = Popen(["ls", "-la"], stdout=PIPE)
exit_code = os.waitpid(process.pid, 0)
output = process.communicate()[0]
output = """
Author: Humbedooh <humbedooh@apache.org>
Stuffs: Mooo
Log message goes here
"""
commit = {'ref': ref, 'repository': "git", 'hash': new, 'project': project}
headers, commit['log'] = output.split("\n\n", 2)
parsed = dict(re.findall(r"(?P<name>[^:\n]+): (?P<value>[^\r\n]+)", headers))
author = re.match(r"^(.+) <(.+)>$", parsed.get("Author", "?? <??@??>"))
if author:
commit['author'] = author.group(1)
commit['email'] = author.group(2)
else:
commit['author'] = "Unknown"
commit['email'] = "unknown@unknown"
data = json.dumps(commit) + "\n\n"
print(data)
Http().request(postURL, "PUT", data)
|
Add python version of the post-receive hook
|
Add python version of the post-receive hook
|
Python
|
apache-2.0
|
Humbedooh/gitpubsub
|
Add python version of the post-receive hook
|
import re
import os
import sys
import os
import json
from subprocess import Popen, PIPE
from httplib2 import Http
postURL = "http://localhost:2069/json"
pwd = os.getcwd()
if len(sys.argv) <= 3:
print("Usage: post-receive [old] [new] [ref]")
exit()
old, new, ref = sys.argv[1:4]
m = re.match(r"^.*/([^/]+)$", pwd)
if not m:
print("Could not figure out which project this is :(", project)
exit()
project = m.group(1)
print("Posting commit message for project " + project)
process = Popen(["git", "show", "--name-only", new], stdout=PIPE)
#process = Popen(["ls", "-la"], stdout=PIPE)
exit_code = os.waitpid(process.pid, 0)
output = process.communicate()[0]
output = """
Author: Humbedooh <humbedooh@apache.org>
Stuffs: Mooo
Log message goes here
"""
commit = {'ref': ref, 'repository': "git", 'hash': new, 'project': project}
headers, commit['log'] = output.split("\n\n", 2)
parsed = dict(re.findall(r"(?P<name>[^:\n]+): (?P<value>[^\r\n]+)", headers))
author = re.match(r"^(.+) <(.+)>$", parsed.get("Author", "?? <??@??>"))
if author:
commit['author'] = author.group(1)
commit['email'] = author.group(2)
else:
commit['author'] = "Unknown"
commit['email'] = "unknown@unknown"
data = json.dumps(commit) + "\n\n"
print(data)
Http().request(postURL, "PUT", data)
|
<commit_before><commit_msg>Add python version of the post-receive hook<commit_after>
|
import re
import os
import sys
import os
import json
from subprocess import Popen, PIPE
from httplib2 import Http
postURL = "http://localhost:2069/json"
pwd = os.getcwd()
if len(sys.argv) <= 3:
print("Usage: post-receive [old] [new] [ref]")
exit()
old, new, ref = sys.argv[1:4]
m = re.match(r"^.*/([^/]+)$", pwd)
if not m:
print("Could not figure out which project this is :(", project)
exit()
project = m.group(1)
print("Posting commit message for project " + project)
process = Popen(["git", "show", "--name-only", new], stdout=PIPE)
#process = Popen(["ls", "-la"], stdout=PIPE)
exit_code = os.waitpid(process.pid, 0)
output = process.communicate()[0]
output = """
Author: Humbedooh <humbedooh@apache.org>
Stuffs: Mooo
Log message goes here
"""
commit = {'ref': ref, 'repository': "git", 'hash': new, 'project': project}
headers, commit['log'] = output.split("\n\n", 2)
parsed = dict(re.findall(r"(?P<name>[^:\n]+): (?P<value>[^\r\n]+)", headers))
author = re.match(r"^(.+) <(.+)>$", parsed.get("Author", "?? <??@??>"))
if author:
commit['author'] = author.group(1)
commit['email'] = author.group(2)
else:
commit['author'] = "Unknown"
commit['email'] = "unknown@unknown"
data = json.dumps(commit) + "\n\n"
print(data)
Http().request(postURL, "PUT", data)
|
Add python version of the post-receive hookimport re
import os
import sys
import os
import json
from subprocess import Popen, PIPE
from httplib2 import Http
postURL = "http://localhost:2069/json"
pwd = os.getcwd()
if len(sys.argv) <= 3:
print("Usage: post-receive [old] [new] [ref]")
exit()
old, new, ref = sys.argv[1:4]
m = re.match(r"^.*/([^/]+)$", pwd)
if not m:
print("Could not figure out which project this is :(", project)
exit()
project = m.group(1)
print("Posting commit message for project " + project)
process = Popen(["git", "show", "--name-only", new], stdout=PIPE)
#process = Popen(["ls", "-la"], stdout=PIPE)
exit_code = os.waitpid(process.pid, 0)
output = process.communicate()[0]
output = """
Author: Humbedooh <humbedooh@apache.org>
Stuffs: Mooo
Log message goes here
"""
commit = {'ref': ref, 'repository': "git", 'hash': new, 'project': project}
headers, commit['log'] = output.split("\n\n", 2)
parsed = dict(re.findall(r"(?P<name>[^:\n]+): (?P<value>[^\r\n]+)", headers))
author = re.match(r"^(.+) <(.+)>$", parsed.get("Author", "?? <??@??>"))
if author:
commit['author'] = author.group(1)
commit['email'] = author.group(2)
else:
commit['author'] = "Unknown"
commit['email'] = "unknown@unknown"
data = json.dumps(commit) + "\n\n"
print(data)
Http().request(postURL, "PUT", data)
|
<commit_before><commit_msg>Add python version of the post-receive hook<commit_after>import re
import os
import sys
import os
import json
from subprocess import Popen, PIPE
from httplib2 import Http
postURL = "http://localhost:2069/json"
pwd = os.getcwd()
if len(sys.argv) <= 3:
print("Usage: post-receive [old] [new] [ref]")
exit()
old, new, ref = sys.argv[1:4]
m = re.match(r"^.*/([^/]+)$", pwd)
if not m:
print("Could not figure out which project this is :(", project)
exit()
project = m.group(1)
print("Posting commit message for project " + project)
process = Popen(["git", "show", "--name-only", new], stdout=PIPE)
#process = Popen(["ls", "-la"], stdout=PIPE)
exit_code = os.waitpid(process.pid, 0)
output = process.communicate()[0]
output = """
Author: Humbedooh <humbedooh@apache.org>
Stuffs: Mooo
Log message goes here
"""
commit = {'ref': ref, 'repository': "git", 'hash': new, 'project': project}
headers, commit['log'] = output.split("\n\n", 2)
parsed = dict(re.findall(r"(?P<name>[^:\n]+): (?P<value>[^\r\n]+)", headers))
author = re.match(r"^(.+) <(.+)>$", parsed.get("Author", "?? <??@??>"))
if author:
commit['author'] = author.group(1)
commit['email'] = author.group(2)
else:
commit['author'] = "Unknown"
commit['email'] = "unknown@unknown"
data = json.dumps(commit) + "\n\n"
print(data)
Http().request(postURL, "PUT", data)
|
|
c7c3ab0a4013df99b928351040f1156b07ba6767
|
tests/unit/utils/test_tokens.py
|
tests/unit/utils/test_tokens.py
|
from flask import current_app
from itsdangerous import TimedJSONWebSignatureSerializer
from flaskbb.utils.tokens import make_token, get_token_status
def test_make_token(user):
token = make_token(user, "test")
s = TimedJSONWebSignatureSerializer(current_app.config['SECRET_KEY'])
unpacked_token = s.loads(token)
assert user.id == unpacked_token["id"]
assert "test" == unpacked_token["op"]
def test_valid_token_status(user):
token = make_token(user, "valid_test")
expired, invalid, token_user = get_token_status(token, "valid_test")
assert not expired
assert not invalid
assert token_user == user
def test_token_status_with_data(user):
token = make_token(user, "test_data")
expired, invalid, token_user, data = \
get_token_status(token, "test_data", return_data=True)
assert user.id == data["id"]
assert "test_data" == data["op"]
def test_token_operation(user):
token = make_token(user, "operation_test")
expired, invalid, token_user = get_token_status(token, "invalid_op")
assert invalid
assert not expired
assert not token_user
def test_invalid_token_status(user):
token = "this-is-not-a-token"
expired, invalid, token_user, data = \
get_token_status(token, "invalid_test", return_data=True)
assert invalid
assert not expired
assert not token_user
assert data is None
def test_expired_token_status(user):
token = make_token(user, "expired_test", -1)
expired, invalid, token_user = get_token_status(token, "expired_test")
assert expired
assert not invalid
assert not token_user
|
Add some tests for the tokens
|
Add some tests for the tokens
|
Python
|
bsd-3-clause
|
realityone/flaskbb,realityone/flaskbb,realityone/flaskbb
|
Add some tests for the tokens
|
from flask import current_app
from itsdangerous import TimedJSONWebSignatureSerializer
from flaskbb.utils.tokens import make_token, get_token_status
def test_make_token(user):
token = make_token(user, "test")
s = TimedJSONWebSignatureSerializer(current_app.config['SECRET_KEY'])
unpacked_token = s.loads(token)
assert user.id == unpacked_token["id"]
assert "test" == unpacked_token["op"]
def test_valid_token_status(user):
token = make_token(user, "valid_test")
expired, invalid, token_user = get_token_status(token, "valid_test")
assert not expired
assert not invalid
assert token_user == user
def test_token_status_with_data(user):
token = make_token(user, "test_data")
expired, invalid, token_user, data = \
get_token_status(token, "test_data", return_data=True)
assert user.id == data["id"]
assert "test_data" == data["op"]
def test_token_operation(user):
token = make_token(user, "operation_test")
expired, invalid, token_user = get_token_status(token, "invalid_op")
assert invalid
assert not expired
assert not token_user
def test_invalid_token_status(user):
token = "this-is-not-a-token"
expired, invalid, token_user, data = \
get_token_status(token, "invalid_test", return_data=True)
assert invalid
assert not expired
assert not token_user
assert data is None
def test_expired_token_status(user):
token = make_token(user, "expired_test", -1)
expired, invalid, token_user = get_token_status(token, "expired_test")
assert expired
assert not invalid
assert not token_user
|
<commit_before><commit_msg>Add some tests for the tokens<commit_after>
|
from flask import current_app
from itsdangerous import TimedJSONWebSignatureSerializer
from flaskbb.utils.tokens import make_token, get_token_status
def test_make_token(user):
token = make_token(user, "test")
s = TimedJSONWebSignatureSerializer(current_app.config['SECRET_KEY'])
unpacked_token = s.loads(token)
assert user.id == unpacked_token["id"]
assert "test" == unpacked_token["op"]
def test_valid_token_status(user):
token = make_token(user, "valid_test")
expired, invalid, token_user = get_token_status(token, "valid_test")
assert not expired
assert not invalid
assert token_user == user
def test_token_status_with_data(user):
token = make_token(user, "test_data")
expired, invalid, token_user, data = \
get_token_status(token, "test_data", return_data=True)
assert user.id == data["id"]
assert "test_data" == data["op"]
def test_token_operation(user):
token = make_token(user, "operation_test")
expired, invalid, token_user = get_token_status(token, "invalid_op")
assert invalid
assert not expired
assert not token_user
def test_invalid_token_status(user):
token = "this-is-not-a-token"
expired, invalid, token_user, data = \
get_token_status(token, "invalid_test", return_data=True)
assert invalid
assert not expired
assert not token_user
assert data is None
def test_expired_token_status(user):
token = make_token(user, "expired_test", -1)
expired, invalid, token_user = get_token_status(token, "expired_test")
assert expired
assert not invalid
assert not token_user
|
Add some tests for the tokensfrom flask import current_app
from itsdangerous import TimedJSONWebSignatureSerializer
from flaskbb.utils.tokens import make_token, get_token_status
def test_make_token(user):
token = make_token(user, "test")
s = TimedJSONWebSignatureSerializer(current_app.config['SECRET_KEY'])
unpacked_token = s.loads(token)
assert user.id == unpacked_token["id"]
assert "test" == unpacked_token["op"]
def test_valid_token_status(user):
token = make_token(user, "valid_test")
expired, invalid, token_user = get_token_status(token, "valid_test")
assert not expired
assert not invalid
assert token_user == user
def test_token_status_with_data(user):
token = make_token(user, "test_data")
expired, invalid, token_user, data = \
get_token_status(token, "test_data", return_data=True)
assert user.id == data["id"]
assert "test_data" == data["op"]
def test_token_operation(user):
token = make_token(user, "operation_test")
expired, invalid, token_user = get_token_status(token, "invalid_op")
assert invalid
assert not expired
assert not token_user
def test_invalid_token_status(user):
token = "this-is-not-a-token"
expired, invalid, token_user, data = \
get_token_status(token, "invalid_test", return_data=True)
assert invalid
assert not expired
assert not token_user
assert data is None
def test_expired_token_status(user):
token = make_token(user, "expired_test", -1)
expired, invalid, token_user = get_token_status(token, "expired_test")
assert expired
assert not invalid
assert not token_user
|
<commit_before><commit_msg>Add some tests for the tokens<commit_after>from flask import current_app
from itsdangerous import TimedJSONWebSignatureSerializer
from flaskbb.utils.tokens import make_token, get_token_status
def test_make_token(user):
token = make_token(user, "test")
s = TimedJSONWebSignatureSerializer(current_app.config['SECRET_KEY'])
unpacked_token = s.loads(token)
assert user.id == unpacked_token["id"]
assert "test" == unpacked_token["op"]
def test_valid_token_status(user):
token = make_token(user, "valid_test")
expired, invalid, token_user = get_token_status(token, "valid_test")
assert not expired
assert not invalid
assert token_user == user
def test_token_status_with_data(user):
token = make_token(user, "test_data")
expired, invalid, token_user, data = \
get_token_status(token, "test_data", return_data=True)
assert user.id == data["id"]
assert "test_data" == data["op"]
def test_token_operation(user):
token = make_token(user, "operation_test")
expired, invalid, token_user = get_token_status(token, "invalid_op")
assert invalid
assert not expired
assert not token_user
def test_invalid_token_status(user):
token = "this-is-not-a-token"
expired, invalid, token_user, data = \
get_token_status(token, "invalid_test", return_data=True)
assert invalid
assert not expired
assert not token_user
assert data is None
def test_expired_token_status(user):
token = make_token(user, "expired_test", -1)
expired, invalid, token_user = get_token_status(token, "expired_test")
assert expired
assert not invalid
assert not token_user
|
|
0848197b3c9ff8d09575b85b5e3a2ca1aac6f6c5
|
app/drivers/pycolator/splitmerge.py
|
app/drivers/pycolator/splitmerge.py
|
from app.drivers.basedrivers import PycolatorDriver
from app.preparation import pycolator as preparation
from app.readers import pycolator as readers
class SplitDriver(PycolatorDriver):
def __init__(self, **kwargs):
super(SplitDriver, self).__init__(**kwargs)
self.targetsuffix = kwargs.get('targetsuffix', '_target.xml')
self.decoysuffix = kwargs.get('decoysuffix', '_decoy.xml')
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def run(self):
td = {'target': self.targetsuffix, 'decoy': self.decoysuffix}
for filter_type in ['target', 'decoy']:
self.prepare()
self.set_features(filter_type)
self.outsuffix = td[filter_type]
self.write(filter_type)
def set_features(self, filter_type):
""" Calls splitter to split percolator output into target/decoy elements.
Writes two new xml files with features. Currently only psms and
peptides. Proteins not here, since one cannot do protein inference
before having merged and remapped multifraction data anyway.
"""
elements_to_split = {'psm': self.allpsms, 'peptide': self.allpeps}
self.features = preparation.split_target_decoy(elements_to_split,
self.ns, filter_type)
class MergeDriver(PycolatorDriver):
"""Base class for merging multiple percolator fractions under different
sorts of filtering. It writes a single percolator out xml from multiple fractions.
Namespace and static xml come from first percolator file.
Make sure fractions are from same percolator run."""
outsuffix = '_merged.xml'
def __init__(self, **kwargs):
super(MergeDriver, self).__init__(**kwargs)
self.mergefiles = [self.fn]
self.mergefiles.extend(kwargs.get('multifile_input', None))
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def set_features(self):
""""Merge all psms and peptides"""
allpsms_str = readers.generate_psms_multiple_fractions_strings(
self.mergefiles, self.ns)
allpeps_str = readers.generate_peptides_multiple_fractions_strings(
self.mergefiles, self.ns)
self.features = {'psm': allpsms_str, 'peptide': allpeps_str}
|
Put split and merge in own module too
|
Put split and merge in own module too
|
Python
|
mit
|
glormph/msstitch
|
Put split and merge in own module too
|
from app.drivers.basedrivers import PycolatorDriver
from app.preparation import pycolator as preparation
from app.readers import pycolator as readers
class SplitDriver(PycolatorDriver):
def __init__(self, **kwargs):
super(SplitDriver, self).__init__(**kwargs)
self.targetsuffix = kwargs.get('targetsuffix', '_target.xml')
self.decoysuffix = kwargs.get('decoysuffix', '_decoy.xml')
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def run(self):
td = {'target': self.targetsuffix, 'decoy': self.decoysuffix}
for filter_type in ['target', 'decoy']:
self.prepare()
self.set_features(filter_type)
self.outsuffix = td[filter_type]
self.write(filter_type)
def set_features(self, filter_type):
""" Calls splitter to split percolator output into target/decoy elements.
Writes two new xml files with features. Currently only psms and
peptides. Proteins not here, since one cannot do protein inference
before having merged and remapped multifraction data anyway.
"""
elements_to_split = {'psm': self.allpsms, 'peptide': self.allpeps}
self.features = preparation.split_target_decoy(elements_to_split,
self.ns, filter_type)
class MergeDriver(PycolatorDriver):
"""Base class for merging multiple percolator fractions under different
sorts of filtering. It writes a single percolator out xml from multiple fractions.
Namespace and static xml come from first percolator file.
Make sure fractions are from same percolator run."""
outsuffix = '_merged.xml'
def __init__(self, **kwargs):
super(MergeDriver, self).__init__(**kwargs)
self.mergefiles = [self.fn]
self.mergefiles.extend(kwargs.get('multifile_input', None))
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def set_features(self):
""""Merge all psms and peptides"""
allpsms_str = readers.generate_psms_multiple_fractions_strings(
self.mergefiles, self.ns)
allpeps_str = readers.generate_peptides_multiple_fractions_strings(
self.mergefiles, self.ns)
self.features = {'psm': allpsms_str, 'peptide': allpeps_str}
|
<commit_before><commit_msg>Put split and merge in own module too<commit_after>
|
from app.drivers.basedrivers import PycolatorDriver
from app.preparation import pycolator as preparation
from app.readers import pycolator as readers
class SplitDriver(PycolatorDriver):
def __init__(self, **kwargs):
super(SplitDriver, self).__init__(**kwargs)
self.targetsuffix = kwargs.get('targetsuffix', '_target.xml')
self.decoysuffix = kwargs.get('decoysuffix', '_decoy.xml')
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def run(self):
td = {'target': self.targetsuffix, 'decoy': self.decoysuffix}
for filter_type in ['target', 'decoy']:
self.prepare()
self.set_features(filter_type)
self.outsuffix = td[filter_type]
self.write(filter_type)
def set_features(self, filter_type):
""" Calls splitter to split percolator output into target/decoy elements.
Writes two new xml files with features. Currently only psms and
peptides. Proteins not here, since one cannot do protein inference
before having merged and remapped multifraction data anyway.
"""
elements_to_split = {'psm': self.allpsms, 'peptide': self.allpeps}
self.features = preparation.split_target_decoy(elements_to_split,
self.ns, filter_type)
class MergeDriver(PycolatorDriver):
"""Base class for merging multiple percolator fractions under different
sorts of filtering. It writes a single percolator out xml from multiple fractions.
Namespace and static xml come from first percolator file.
Make sure fractions are from same percolator run."""
outsuffix = '_merged.xml'
def __init__(self, **kwargs):
super(MergeDriver, self).__init__(**kwargs)
self.mergefiles = [self.fn]
self.mergefiles.extend(kwargs.get('multifile_input', None))
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def set_features(self):
""""Merge all psms and peptides"""
allpsms_str = readers.generate_psms_multiple_fractions_strings(
self.mergefiles, self.ns)
allpeps_str = readers.generate_peptides_multiple_fractions_strings(
self.mergefiles, self.ns)
self.features = {'psm': allpsms_str, 'peptide': allpeps_str}
|
Put split and merge in own module toofrom app.drivers.basedrivers import PycolatorDriver
from app.preparation import pycolator as preparation
from app.readers import pycolator as readers
class SplitDriver(PycolatorDriver):
def __init__(self, **kwargs):
super(SplitDriver, self).__init__(**kwargs)
self.targetsuffix = kwargs.get('targetsuffix', '_target.xml')
self.decoysuffix = kwargs.get('decoysuffix', '_decoy.xml')
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def run(self):
td = {'target': self.targetsuffix, 'decoy': self.decoysuffix}
for filter_type in ['target', 'decoy']:
self.prepare()
self.set_features(filter_type)
self.outsuffix = td[filter_type]
self.write(filter_type)
def set_features(self, filter_type):
""" Calls splitter to split percolator output into target/decoy elements.
Writes two new xml files with features. Currently only psms and
peptides. Proteins not here, since one cannot do protein inference
before having merged and remapped multifraction data anyway.
"""
elements_to_split = {'psm': self.allpsms, 'peptide': self.allpeps}
self.features = preparation.split_target_decoy(elements_to_split,
self.ns, filter_type)
class MergeDriver(PycolatorDriver):
"""Base class for merging multiple percolator fractions under different
sorts of filtering. It writes a single percolator out xml from multiple fractions.
Namespace and static xml come from first percolator file.
Make sure fractions are from same percolator run."""
outsuffix = '_merged.xml'
def __init__(self, **kwargs):
super(MergeDriver, self).__init__(**kwargs)
self.mergefiles = [self.fn]
self.mergefiles.extend(kwargs.get('multifile_input', None))
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def set_features(self):
""""Merge all psms and peptides"""
allpsms_str = readers.generate_psms_multiple_fractions_strings(
self.mergefiles, self.ns)
allpeps_str = readers.generate_peptides_multiple_fractions_strings(
self.mergefiles, self.ns)
self.features = {'psm': allpsms_str, 'peptide': allpeps_str}
|
<commit_before><commit_msg>Put split and merge in own module too<commit_after>from app.drivers.basedrivers import PycolatorDriver
from app.preparation import pycolator as preparation
from app.readers import pycolator as readers
class SplitDriver(PycolatorDriver):
def __init__(self, **kwargs):
super(SplitDriver, self).__init__(**kwargs)
self.targetsuffix = kwargs.get('targetsuffix', '_target.xml')
self.decoysuffix = kwargs.get('decoysuffix', '_decoy.xml')
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def run(self):
td = {'target': self.targetsuffix, 'decoy': self.decoysuffix}
for filter_type in ['target', 'decoy']:
self.prepare()
self.set_features(filter_type)
self.outsuffix = td[filter_type]
self.write(filter_type)
def set_features(self, filter_type):
""" Calls splitter to split percolator output into target/decoy elements.
Writes two new xml files with features. Currently only psms and
peptides. Proteins not here, since one cannot do protein inference
before having merged and remapped multifraction data anyway.
"""
elements_to_split = {'psm': self.allpsms, 'peptide': self.allpeps}
self.features = preparation.split_target_decoy(elements_to_split,
self.ns, filter_type)
class MergeDriver(PycolatorDriver):
"""Base class for merging multiple percolator fractions under different
sorts of filtering. It writes a single percolator out xml from multiple fractions.
Namespace and static xml come from first percolator file.
Make sure fractions are from same percolator run."""
outsuffix = '_merged.xml'
def __init__(self, **kwargs):
super(MergeDriver, self).__init__(**kwargs)
self.mergefiles = [self.fn]
self.mergefiles.extend(kwargs.get('multifile_input', None))
def prepare(self):
self.ns, self.static_xml = self.prepare_percolator_output(self.fn)
def set_features(self):
""""Merge all psms and peptides"""
allpsms_str = readers.generate_psms_multiple_fractions_strings(
self.mergefiles, self.ns)
allpeps_str = readers.generate_peptides_multiple_fractions_strings(
self.mergefiles, self.ns)
self.features = {'psm': allpsms_str, 'peptide': allpeps_str}
|
|
5dd9cc55368e9f5bd8c79f74f3c7c1fc84a6bd8b
|
common/migrations/0010_auto_20200529_0514.py
|
common/migrations/0010_auto_20200529_0514.py
|
# Generated by Django 2.2.12 on 2020-05-29 05:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0009_upload_hosting'),
]
operations = [
migrations.AlterField(
model_name='upload',
name='destination',
field=models.CharField(max_length=256, verbose_name='destination path'),
),
]
|
Add common migration (unrelated to branch)
|
Add common migration (unrelated to branch)
|
Python
|
agpl-3.0
|
lutris/website,lutris/website,lutris/website,lutris/website
|
Add common migration (unrelated to branch)
|
# Generated by Django 2.2.12 on 2020-05-29 05:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0009_upload_hosting'),
]
operations = [
migrations.AlterField(
model_name='upload',
name='destination',
field=models.CharField(max_length=256, verbose_name='destination path'),
),
]
|
<commit_before><commit_msg>Add common migration (unrelated to branch)<commit_after>
|
# Generated by Django 2.2.12 on 2020-05-29 05:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0009_upload_hosting'),
]
operations = [
migrations.AlterField(
model_name='upload',
name='destination',
field=models.CharField(max_length=256, verbose_name='destination path'),
),
]
|
Add common migration (unrelated to branch)# Generated by Django 2.2.12 on 2020-05-29 05:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0009_upload_hosting'),
]
operations = [
migrations.AlterField(
model_name='upload',
name='destination',
field=models.CharField(max_length=256, verbose_name='destination path'),
),
]
|
<commit_before><commit_msg>Add common migration (unrelated to branch)<commit_after># Generated by Django 2.2.12 on 2020-05-29 05:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0009_upload_hosting'),
]
operations = [
migrations.AlterField(
model_name='upload',
name='destination',
field=models.CharField(max_length=256, verbose_name='destination path'),
),
]
|
|
fa0afad07f34f350233ae2a4f1654faef9bc1814
|
utils/benchmark/Strings/PySort.py
|
utils/benchmark/Strings/PySort.py
|
words=[
u"James", u"John", u"Robert", u"Michael", u"William", u"David", u"Richard", u"Joseph",
u"Charles", u"Thomas", u"Christopher", u"Daniel", u"Matthew", u"Donald", u"Anthony",
u"Paul", u"Mark", u"George", u"Steven", u"Kenneth", u"Andrew", u"Edward", u"Brian",
u"Joshua", u"Kevin", u"Ronald", u"Timothy", u"Jason", u"Jeffrey", u"Gary", u"Ryan",
u"Nicholas", u"Eric", u"Stephen", u"Jacob", u"Larry", u"Frank", u"Jonathan", u"Scott",
u"Justin", u"Raymond", u"Brandon", u"Gregory", u"Samuel", u"Patrick", u"Benjamin",
u"Jack", u"Dennis", u"Jerry", u"Alexander", u"Tyler", u"Douglas", u"Henry", u"Peter",
u"Walter", u"Aaron", u"Jose", u"Adam", u"Harold", u"Zachary", u"Nathan", u"Carl",
u"Kyle", u"Arthur", u"Gerald", u"Lawrence", u"Roger", u"Albert", u"Keith", u"Jeremy",
u"Terry", u"Joe", u"Sean", u"Willie", u"Jesse", u"Ralph", u"Billy", u"Austin", u"Bruce",
u"Christian", u"Roy", u"Bryan", u"Eugene", u"Louis", u"Harry", u"Wayne", u"Ethan",
u"Jordan", u"Russell", u"Alan", u"Philip", u"Randy", u"Juan", u"Howard", u"Vincent",
u"Bobby", u"Dylan", u"Johnny", u"Phillip", u"Craig"]
# This is a phone book record.
class Record:
def __init__(self, firstname, lastname):
self.first = firstname
self.last = lastname
def __lt__(self, other):
if self.last < other.last:
return True
if self.last > other.last:
return False
return self.first < other.first
Records = []
for first in words:
for last in words:
Records.append(Record(first, last))
for i in xrange(100):
y = Records[:]
y = sorted(y)
#for w in y:
# print w.first, w.last
|
Add a python version for the phonebook benchmark
|
Add a python version for the phonebook benchmark
Swift SVN r17601
|
Python
|
apache-2.0
|
SwiftAndroid/swift,ahoppen/swift,uasys/swift,karwa/swift,gottesmm/swift,ken0nek/swift,xwu/swift,huonw/swift,natecook1000/swift,jckarter/swift,ken0nek/swift,allevato/swift,hughbe/swift,tinysun212/swift-windows,gmilos/swift,MukeshKumarS/Swift,shahmishal/swift,sschiau/swift,austinzheng/swift,adrfer/swift,uasys/swift,shajrawi/swift,bitjammer/swift,tjw/swift,allevato/swift,calebd/swift,return/swift,benlangmuir/swift,gribozavr/swift,tjw/swift,johnno1962d/swift,nathawes/swift,nathawes/swift,danielmartin/swift,nathawes/swift,bitjammer/swift,xedin/swift,calebd/swift,jtbandes/swift,kstaring/swift,lorentey/swift,tardieu/swift,dduan/swift,CodaFi/swift,jopamer/swift,gregomni/swift,shahmishal/swift,gmilos/swift,manavgabhawala/swift,shajrawi/swift,natecook1000/swift,tkremenek/swift,JGiola/swift,jckarter/swift,JGiola/swift,cbrentharris/swift,calebd/swift,karwa/swift,zisko/swift,slavapestov/swift,ahoppen/swift,ken0nek/swift,swiftix/swift.old,airspeedswift/swift,emilstahl/swift,Ivacker/swift,tinysun212/swift-windows,ben-ng/swift,danielmartin/swift,apple/swift,amraboelela/swift,gottesmm/swift,return/swift,apple/swift,swiftix/swift.old,khizkhiz/swift,gregomni/swift,LeoShimonaka/swift,Jnosh/swift,austinzheng/swift,russbishop/swift,amraboelela/swift,felix91gr/swift,kentya6/swift,JGiola/swift,xwu/swift,mightydeveloper/swift,tardieu/swift,apple/swift,frootloops/swift,jckarter/swift,brentdax/swift,felix91gr/swift,adrfer/swift,karwa/swift,practicalswift/swift,alblue/swift,CodaFi/swift,xedin/swift,LeoShimonaka/swift,huonw/swift,swiftix/swift,parkera/swift,jmgc/swift,parkera/swift,aschwaighofer/swift,dduan/swift,lorentey/swift,JaSpa/swift,tinysun212/swift-windows,cbrentharris/swift,jmgc/swift,kstaring/swift,austinzheng/swift,LeoShimonaka/swift,ben-ng/swift,therealbnut/swift,kentya6/swift,bitjammer/swift,johnno1962d/swift,gottesmm/swift,johnno1962d/swift,felix91gr/swift,kperryua/swift,johnno1962d/swift,jopamer/swift,milseman/swift,return/swift,stephentyrone/swift,kstaring/swift,SwiftAndroid/swift,shahmishal/swift,codestergit/swift,modocache/swift,nathawes/swift,tkremenek/swift,tkremenek/swift,gregomni/swift,swiftix/swift,Jnosh/swift,rudkx/swift,JaSpa/swift,Ivacker/swift,kusl/swift,gottesmm/swift,devincoughlin/swift,frootloops/swift,jtbandes/swift,ken0nek/swift,cbrentharris/swift,shahmishal/swift,therealbnut/swift,gottesmm/swift,atrick/swift,codestergit/swift,practicalswift/swift,ken0nek/swift,shahmishal/swift,shahmishal/swift,cbrentharris/swift,huonw/swift,russbishop/swift,gmilos/swift,JGiola/swift,therealbnut/swift,IngmarStein/swift,Ivacker/swift,JaSpa/swift,frootloops/swift,tinysun212/swift-windows,harlanhaskins/swift,deyton/swift,nathawes/swift,xwu/swift,milseman/swift,LeoShimonaka/swift,gregomni/swift,SwiftAndroid/swift,mightydeveloper/swift,swiftix/swift.old,KrishMunot/swift,ahoppen/swift,therealbnut/swift,MukeshKumarS/Swift,jmgc/swift,emilstahl/swift,deyton/swift,danielmartin/swift,emilstahl/swift,zisko/swift,codestergit/swift,sdulal/swift,Ivacker/swift,therealbnut/swift,amraboelela/swift,kentya6/swift,return/swift,shajrawi/swift,alblue/swift,airspeedswift/swift,huonw/swift,JaSpa/swift,therealbnut/swift,calebd/swift,tkremenek/swift,manavgabhawala/swift,khizkhiz/swift,gribozavr/swift,tinysun212/swift-windows,KrishMunot/swift,emilstahl/swift,dduan/swift,jopamer/swift,ken0nek/swift,aschwaighofer/swift,Jnosh/swift,dduan/swift,mightydeveloper/swift,arvedviehweger/swift,zisko/swift,alblue/swift,russbishop/swift,kperryua/swift,kstaring/swift,CodaFi/swift,jckarter/swift,sschiau/swift,LeoShimonaka/swift,stephentyrone/swift,parkera/swift,swiftix/swift.old,sdulal/swift,djwbrown/swift,amraboelela/swift,Jnosh/swift,airspeedswift/swift,return/swift,adrfer/swift,rudkx/swift,kperryua/swift,natecook1000/swift,JaSpa/swift,kusl/swift,JGiola/swift,atrick/swift,CodaFi/swift,gmilos/swift,jmgc/swift,lorentey/swift,amraboelela/swift,JGiola/swift,devincoughlin/swift,allevato/swift,tinysun212/swift-windows,modocache/swift,benlangmuir/swift,parkera/swift,hooman/swift,uasys/swift,natecook1000/swift,LeoShimonaka/swift,swiftix/swift.old,apple/swift,jopamer/swift,tjw/swift,djwbrown/swift,danielmartin/swift,allevato/swift,uasys/swift,swiftix/swift.old,ahoppen/swift,Jnosh/swift,arvedviehweger/swift,cbrentharris/swift,return/swift,alblue/swift,bitjammer/swift,modocache/swift,IngmarStein/swift,alblue/swift,harlanhaskins/swift,djwbrown/swift,modocache/swift,huonw/swift,russbishop/swift,tardieu/swift,OscarSwanros/swift,jopamer/swift,adrfer/swift,IngmarStein/swift,karwa/swift,milseman/swift,atrick/swift,glessard/swift,jtbandes/swift,xedin/swift,practicalswift/swift,hughbe/swift,slavapestov/swift,codestergit/swift,huonw/swift,felix91gr/swift,benlangmuir/swift,alblue/swift,lorentey/swift,manavgabhawala/swift,alblue/swift,practicalswift/swift,gribozavr/swift,MukeshKumarS/Swift,deyton/swift,brentdax/swift,xedin/swift,harlanhaskins/swift,brentdax/swift,aschwaighofer/swift,johnno1962d/swift,johnno1962d/swift,tardieu/swift,codestergit/swift,kperryua/swift,codestergit/swift,devincoughlin/swift,kusl/swift,shajrawi/swift,mightydeveloper/swift,swiftix/swift,sschiau/swift,IngmarStein/swift,tkremenek/swift,kusl/swift,roambotics/swift,jopamer/swift,SwiftAndroid/swift,tjw/swift,deyton/swift,felix91gr/swift,shajrawi/swift,austinzheng/swift,tjw/swift,bitjammer/swift,apple/swift,glessard/swift,ken0nek/swift,ben-ng/swift,gribozavr/swift,jmgc/swift,jckarter/swift,CodaFi/swift,airspeedswift/swift,kentya6/swift,sdulal/swift,dreamsxin/swift,hughbe/swift,shahmishal/swift,gregomni/swift,LeoShimonaka/swift,adrfer/swift,parkera/swift,jckarter/swift,practicalswift/swift,kentya6/swift,stephentyrone/swift,tinysun212/swift-windows,LeoShimonaka/swift,russbishop/swift,swiftix/swift.old,harlanhaskins/swift,apple/swift,devincoughlin/swift,uasys/swift,kusl/swift,hughbe/swift,austinzheng/swift,milseman/swift,return/swift,tardieu/swift,ahoppen/swift,rudkx/swift,sschiau/swift,swiftix/swift,dduan/swift,OscarSwanros/swift,milseman/swift,austinzheng/swift,kentya6/swift,IngmarStein/swift,deyton/swift,parkera/swift,CodaFi/swift,modocache/swift,tjw/swift,jtbandes/swift,karwa/swift,glessard/swift,felix91gr/swift,ben-ng/swift,shajrawi/swift,karwa/swift,lorentey/swift,zisko/swift,manavgabhawala/swift,stephentyrone/swift,khizkhiz/swift,roambotics/swift,brentdax/swift,hughbe/swift,kentya6/swift,jmgc/swift,mightydeveloper/swift,hughbe/swift,jtbandes/swift,jckarter/swift,ben-ng/swift,OscarSwanros/swift,devincoughlin/swift,Ivacker/swift,xedin/swift,deyton/swift,hooman/swift,benlangmuir/swift,kentya6/swift,SwiftAndroid/swift,slavapestov/swift,stephentyrone/swift,brentdax/swift,hooman/swift,brentdax/swift,roambotics/swift,stephentyrone/swift,aschwaighofer/swift,rudkx/swift,jtbandes/swift,tkremenek/swift,manavgabhawala/swift,calebd/swift,milseman/swift,atrick/swift,sdulal/swift,allevato/swift,manavgabhawala/swift,djwbrown/swift,devincoughlin/swift,lorentey/swift,KrishMunot/swift,hooman/swift,djwbrown/swift,KrishMunot/swift,rudkx/swift,sdulal/swift,MukeshKumarS/Swift,dreamsxin/swift,amraboelela/swift,nathawes/swift,stephentyrone/swift,gmilos/swift,danielmartin/swift,hooman/swift,roambotics/swift,rudkx/swift,JaSpa/swift,gottesmm/swift,SwiftAndroid/swift,harlanhaskins/swift,airspeedswift/swift,dduan/swift,Jnosh/swift,mightydeveloper/swift,sdulal/swift,practicalswift/swift,swiftix/swift,MukeshKumarS/Swift,johnno1962d/swift,arvedviehweger/swift,cbrentharris/swift,xwu/swift,practicalswift/swift,IngmarStein/swift,therealbnut/swift,hooman/swift,frootloops/swift,kperryua/swift,emilstahl/swift,deyton/swift,nathawes/swift,kstaring/swift,xwu/swift,kusl/swift,gmilos/swift,uasys/swift,felix91gr/swift,emilstahl/swift,arvedviehweger/swift,arvedviehweger/swift,kperryua/swift,modocache/swift,adrfer/swift,codestergit/swift,slavapestov/swift,gribozavr/swift,sdulal/swift,jopamer/swift,milseman/swift,mightydeveloper/swift,xedin/swift,khizkhiz/swift,cbrentharris/swift,kusl/swift,harlanhaskins/swift,OscarSwanros/swift,amraboelela/swift,tardieu/swift,glessard/swift,arvedviehweger/swift,practicalswift/swift,allevato/swift,djwbrown/swift,karwa/swift,tardieu/swift,zisko/swift,xedin/swift,djwbrown/swift,sschiau/swift,xwu/swift,slavapestov/swift,tkremenek/swift,dduan/swift,kusl/swift,hughbe/swift,OscarSwanros/swift,gregomni/swift,lorentey/swift,manavgabhawala/swift,glessard/swift,calebd/swift,khizkhiz/swift,kstaring/swift,danielmartin/swift,KrishMunot/swift,parkera/swift,aschwaighofer/swift,roambotics/swift,calebd/swift,huonw/swift,OscarSwanros/swift,cbrentharris/swift,glessard/swift,adrfer/swift,parkera/swift,tjw/swift,swiftix/swift,shajrawi/swift,sschiau/swift,Jnosh/swift,emilstahl/swift,gribozavr/swift,IngmarStein/swift,frootloops/swift,mightydeveloper/swift,KrishMunot/swift,ben-ng/swift,benlangmuir/swift,aschwaighofer/swift,kstaring/swift,swiftix/swift,modocache/swift,khizkhiz/swift,atrick/swift,xwu/swift,sdulal/swift,khizkhiz/swift,frootloops/swift,devincoughlin/swift,MukeshKumarS/Swift,CodaFi/swift,zisko/swift,ahoppen/swift,hooman/swift,natecook1000/swift,airspeedswift/swift,karwa/swift,harlanhaskins/swift,roambotics/swift,slavapestov/swift,SwiftAndroid/swift,danielmartin/swift,uasys/swift,emilstahl/swift,russbishop/swift,jtbandes/swift,jmgc/swift,Ivacker/swift,OscarSwanros/swift,sschiau/swift,gmilos/swift,Ivacker/swift,kperryua/swift,frootloops/swift,xedin/swift,gottesmm/swift,lorentey/swift,natecook1000/swift,gribozavr/swift,sschiau/swift,aschwaighofer/swift,MukeshKumarS/Swift,shahmishal/swift,russbishop/swift,austinzheng/swift,gribozavr/swift,natecook1000/swift,slavapestov/swift,zisko/swift,brentdax/swift,KrishMunot/swift,ben-ng/swift,Ivacker/swift,allevato/swift,devincoughlin/swift,benlangmuir/swift,shajrawi/swift,arvedviehweger/swift,airspeedswift/swift,atrick/swift,JaSpa/swift,bitjammer/swift,swiftix/swift.old,bitjammer/swift
|
Add a python version for the phonebook benchmark
Swift SVN r17601
|
words=[
u"James", u"John", u"Robert", u"Michael", u"William", u"David", u"Richard", u"Joseph",
u"Charles", u"Thomas", u"Christopher", u"Daniel", u"Matthew", u"Donald", u"Anthony",
u"Paul", u"Mark", u"George", u"Steven", u"Kenneth", u"Andrew", u"Edward", u"Brian",
u"Joshua", u"Kevin", u"Ronald", u"Timothy", u"Jason", u"Jeffrey", u"Gary", u"Ryan",
u"Nicholas", u"Eric", u"Stephen", u"Jacob", u"Larry", u"Frank", u"Jonathan", u"Scott",
u"Justin", u"Raymond", u"Brandon", u"Gregory", u"Samuel", u"Patrick", u"Benjamin",
u"Jack", u"Dennis", u"Jerry", u"Alexander", u"Tyler", u"Douglas", u"Henry", u"Peter",
u"Walter", u"Aaron", u"Jose", u"Adam", u"Harold", u"Zachary", u"Nathan", u"Carl",
u"Kyle", u"Arthur", u"Gerald", u"Lawrence", u"Roger", u"Albert", u"Keith", u"Jeremy",
u"Terry", u"Joe", u"Sean", u"Willie", u"Jesse", u"Ralph", u"Billy", u"Austin", u"Bruce",
u"Christian", u"Roy", u"Bryan", u"Eugene", u"Louis", u"Harry", u"Wayne", u"Ethan",
u"Jordan", u"Russell", u"Alan", u"Philip", u"Randy", u"Juan", u"Howard", u"Vincent",
u"Bobby", u"Dylan", u"Johnny", u"Phillip", u"Craig"]
# This is a phone book record.
class Record:
def __init__(self, firstname, lastname):
self.first = firstname
self.last = lastname
def __lt__(self, other):
if self.last < other.last:
return True
if self.last > other.last:
return False
return self.first < other.first
Records = []
for first in words:
for last in words:
Records.append(Record(first, last))
for i in xrange(100):
y = Records[:]
y = sorted(y)
#for w in y:
# print w.first, w.last
|
<commit_before><commit_msg>Add a python version for the phonebook benchmark
Swift SVN r17601<commit_after>
|
words=[
u"James", u"John", u"Robert", u"Michael", u"William", u"David", u"Richard", u"Joseph",
u"Charles", u"Thomas", u"Christopher", u"Daniel", u"Matthew", u"Donald", u"Anthony",
u"Paul", u"Mark", u"George", u"Steven", u"Kenneth", u"Andrew", u"Edward", u"Brian",
u"Joshua", u"Kevin", u"Ronald", u"Timothy", u"Jason", u"Jeffrey", u"Gary", u"Ryan",
u"Nicholas", u"Eric", u"Stephen", u"Jacob", u"Larry", u"Frank", u"Jonathan", u"Scott",
u"Justin", u"Raymond", u"Brandon", u"Gregory", u"Samuel", u"Patrick", u"Benjamin",
u"Jack", u"Dennis", u"Jerry", u"Alexander", u"Tyler", u"Douglas", u"Henry", u"Peter",
u"Walter", u"Aaron", u"Jose", u"Adam", u"Harold", u"Zachary", u"Nathan", u"Carl",
u"Kyle", u"Arthur", u"Gerald", u"Lawrence", u"Roger", u"Albert", u"Keith", u"Jeremy",
u"Terry", u"Joe", u"Sean", u"Willie", u"Jesse", u"Ralph", u"Billy", u"Austin", u"Bruce",
u"Christian", u"Roy", u"Bryan", u"Eugene", u"Louis", u"Harry", u"Wayne", u"Ethan",
u"Jordan", u"Russell", u"Alan", u"Philip", u"Randy", u"Juan", u"Howard", u"Vincent",
u"Bobby", u"Dylan", u"Johnny", u"Phillip", u"Craig"]
# This is a phone book record.
class Record:
def __init__(self, firstname, lastname):
self.first = firstname
self.last = lastname
def __lt__(self, other):
if self.last < other.last:
return True
if self.last > other.last:
return False
return self.first < other.first
Records = []
for first in words:
for last in words:
Records.append(Record(first, last))
for i in xrange(100):
y = Records[:]
y = sorted(y)
#for w in y:
# print w.first, w.last
|
Add a python version for the phonebook benchmark
Swift SVN r17601
words=[
u"James", u"John", u"Robert", u"Michael", u"William", u"David", u"Richard", u"Joseph",
u"Charles", u"Thomas", u"Christopher", u"Daniel", u"Matthew", u"Donald", u"Anthony",
u"Paul", u"Mark", u"George", u"Steven", u"Kenneth", u"Andrew", u"Edward", u"Brian",
u"Joshua", u"Kevin", u"Ronald", u"Timothy", u"Jason", u"Jeffrey", u"Gary", u"Ryan",
u"Nicholas", u"Eric", u"Stephen", u"Jacob", u"Larry", u"Frank", u"Jonathan", u"Scott",
u"Justin", u"Raymond", u"Brandon", u"Gregory", u"Samuel", u"Patrick", u"Benjamin",
u"Jack", u"Dennis", u"Jerry", u"Alexander", u"Tyler", u"Douglas", u"Henry", u"Peter",
u"Walter", u"Aaron", u"Jose", u"Adam", u"Harold", u"Zachary", u"Nathan", u"Carl",
u"Kyle", u"Arthur", u"Gerald", u"Lawrence", u"Roger", u"Albert", u"Keith", u"Jeremy",
u"Terry", u"Joe", u"Sean", u"Willie", u"Jesse", u"Ralph", u"Billy", u"Austin", u"Bruce",
u"Christian", u"Roy", u"Bryan", u"Eugene", u"Louis", u"Harry", u"Wayne", u"Ethan",
u"Jordan", u"Russell", u"Alan", u"Philip", u"Randy", u"Juan", u"Howard", u"Vincent",
u"Bobby", u"Dylan", u"Johnny", u"Phillip", u"Craig"]
# This is a phone book record.
class Record:
def __init__(self, firstname, lastname):
self.first = firstname
self.last = lastname
def __lt__(self, other):
if self.last < other.last:
return True
if self.last > other.last:
return False
return self.first < other.first
Records = []
for first in words:
for last in words:
Records.append(Record(first, last))
for i in xrange(100):
y = Records[:]
y = sorted(y)
#for w in y:
# print w.first, w.last
|
<commit_before><commit_msg>Add a python version for the phonebook benchmark
Swift SVN r17601<commit_after>
words=[
u"James", u"John", u"Robert", u"Michael", u"William", u"David", u"Richard", u"Joseph",
u"Charles", u"Thomas", u"Christopher", u"Daniel", u"Matthew", u"Donald", u"Anthony",
u"Paul", u"Mark", u"George", u"Steven", u"Kenneth", u"Andrew", u"Edward", u"Brian",
u"Joshua", u"Kevin", u"Ronald", u"Timothy", u"Jason", u"Jeffrey", u"Gary", u"Ryan",
u"Nicholas", u"Eric", u"Stephen", u"Jacob", u"Larry", u"Frank", u"Jonathan", u"Scott",
u"Justin", u"Raymond", u"Brandon", u"Gregory", u"Samuel", u"Patrick", u"Benjamin",
u"Jack", u"Dennis", u"Jerry", u"Alexander", u"Tyler", u"Douglas", u"Henry", u"Peter",
u"Walter", u"Aaron", u"Jose", u"Adam", u"Harold", u"Zachary", u"Nathan", u"Carl",
u"Kyle", u"Arthur", u"Gerald", u"Lawrence", u"Roger", u"Albert", u"Keith", u"Jeremy",
u"Terry", u"Joe", u"Sean", u"Willie", u"Jesse", u"Ralph", u"Billy", u"Austin", u"Bruce",
u"Christian", u"Roy", u"Bryan", u"Eugene", u"Louis", u"Harry", u"Wayne", u"Ethan",
u"Jordan", u"Russell", u"Alan", u"Philip", u"Randy", u"Juan", u"Howard", u"Vincent",
u"Bobby", u"Dylan", u"Johnny", u"Phillip", u"Craig"]
# This is a phone book record.
class Record:
def __init__(self, firstname, lastname):
self.first = firstname
self.last = lastname
def __lt__(self, other):
if self.last < other.last:
return True
if self.last > other.last:
return False
return self.first < other.first
Records = []
for first in words:
for last in words:
Records.append(Record(first, last))
for i in xrange(100):
y = Records[:]
y = sorted(y)
#for w in y:
# print w.first, w.last
|
|
2e9f43d1c1679355e2d7d452137ddf7fb2bbdedf
|
tests/async-send-get-test.py
|
tests/async-send-get-test.py
|
#!/usr/bin/env python
"""
Send a message and confirm you can retrieve it with Basic.Get
Test Steps:
1) Connect to broker - start_test
2) Open Channel - on_connected
3) Delcare Queue - on_channel_open
4) Send test message - on_queue_declared
5) Call basic get - on_queue_declared
6) Validate that sent message and basic get body are the same - check_message
"""
import utils.async as async
import nose
from pika.adapters import SelectConnection
channel = None
confirmed = False
connection = None
queue = None
ADAPTER = SelectConnection
HOST = 'localhost'
PORT = 5672
@nose.tools.timed(2)
def start_test():
global confirmed, connection
confirmed = False
connection = async.connect(ADAPTER, HOST, PORT, on_connected)
connection.ioloop.start()
if not confirmed:
assert False
pass
@nose.tools.nottest
def on_connected(connection):
global connected
connected = connection.is_open()
if connected:
async.channel(connection, on_channel_open)
@nose.tools.nottest
def on_channel_open(channel_):
global channel, queue
channel = channel_
queue = async.queue_name()
async.queue_declare(channel, queue, on_queue_declared)
@nose.tools.nottest
def on_queue_declared(frame):
global channel, queue
test_message = async.send_test_message(channel, queue)
def check_message(channel_number, method, header, body):
global connection, confirmed
if body == test_message:
confirmed = True
connection.ioloop.stop()
channel.basic_get(callback=check_message, queue=queue)
|
Test Basic.Publish -> Basic.Get message passing
|
Test Basic.Publish -> Basic.Get message passing
|
Python
|
bsd-3-clause
|
Zephor5/pika,vrtsystems/pika,renshawbay/pika-python3,jstnlef/pika,hugoxia/pika,vitaly-krugl/pika,skftn/pika,fkarb/pika-python3,reddec/pika,shinji-s/pika,benjamin9999/pika,pika/pika,zixiliuyue/pika,knowsis/pika,Tarsbot/pika
|
Test Basic.Publish -> Basic.Get message passing
|
#!/usr/bin/env python
"""
Send a message and confirm you can retrieve it with Basic.Get
Test Steps:
1) Connect to broker - start_test
2) Open Channel - on_connected
3) Delcare Queue - on_channel_open
4) Send test message - on_queue_declared
5) Call basic get - on_queue_declared
6) Validate that sent message and basic get body are the same - check_message
"""
import utils.async as async
import nose
from pika.adapters import SelectConnection
channel = None
confirmed = False
connection = None
queue = None
ADAPTER = SelectConnection
HOST = 'localhost'
PORT = 5672
@nose.tools.timed(2)
def start_test():
global confirmed, connection
confirmed = False
connection = async.connect(ADAPTER, HOST, PORT, on_connected)
connection.ioloop.start()
if not confirmed:
assert False
pass
@nose.tools.nottest
def on_connected(connection):
global connected
connected = connection.is_open()
if connected:
async.channel(connection, on_channel_open)
@nose.tools.nottest
def on_channel_open(channel_):
global channel, queue
channel = channel_
queue = async.queue_name()
async.queue_declare(channel, queue, on_queue_declared)
@nose.tools.nottest
def on_queue_declared(frame):
global channel, queue
test_message = async.send_test_message(channel, queue)
def check_message(channel_number, method, header, body):
global connection, confirmed
if body == test_message:
confirmed = True
connection.ioloop.stop()
channel.basic_get(callback=check_message, queue=queue)
|
<commit_before><commit_msg>Test Basic.Publish -> Basic.Get message passing<commit_after>
|
#!/usr/bin/env python
"""
Send a message and confirm you can retrieve it with Basic.Get
Test Steps:
1) Connect to broker - start_test
2) Open Channel - on_connected
3) Delcare Queue - on_channel_open
4) Send test message - on_queue_declared
5) Call basic get - on_queue_declared
6) Validate that sent message and basic get body are the same - check_message
"""
import utils.async as async
import nose
from pika.adapters import SelectConnection
channel = None
confirmed = False
connection = None
queue = None
ADAPTER = SelectConnection
HOST = 'localhost'
PORT = 5672
@nose.tools.timed(2)
def start_test():
global confirmed, connection
confirmed = False
connection = async.connect(ADAPTER, HOST, PORT, on_connected)
connection.ioloop.start()
if not confirmed:
assert False
pass
@nose.tools.nottest
def on_connected(connection):
global connected
connected = connection.is_open()
if connected:
async.channel(connection, on_channel_open)
@nose.tools.nottest
def on_channel_open(channel_):
global channel, queue
channel = channel_
queue = async.queue_name()
async.queue_declare(channel, queue, on_queue_declared)
@nose.tools.nottest
def on_queue_declared(frame):
global channel, queue
test_message = async.send_test_message(channel, queue)
def check_message(channel_number, method, header, body):
global connection, confirmed
if body == test_message:
confirmed = True
connection.ioloop.stop()
channel.basic_get(callback=check_message, queue=queue)
|
Test Basic.Publish -> Basic.Get message passing#!/usr/bin/env python
"""
Send a message and confirm you can retrieve it with Basic.Get
Test Steps:
1) Connect to broker - start_test
2) Open Channel - on_connected
3) Delcare Queue - on_channel_open
4) Send test message - on_queue_declared
5) Call basic get - on_queue_declared
6) Validate that sent message and basic get body are the same - check_message
"""
import utils.async as async
import nose
from pika.adapters import SelectConnection
channel = None
confirmed = False
connection = None
queue = None
ADAPTER = SelectConnection
HOST = 'localhost'
PORT = 5672
@nose.tools.timed(2)
def start_test():
global confirmed, connection
confirmed = False
connection = async.connect(ADAPTER, HOST, PORT, on_connected)
connection.ioloop.start()
if not confirmed:
assert False
pass
@nose.tools.nottest
def on_connected(connection):
global connected
connected = connection.is_open()
if connected:
async.channel(connection, on_channel_open)
@nose.tools.nottest
def on_channel_open(channel_):
global channel, queue
channel = channel_
queue = async.queue_name()
async.queue_declare(channel, queue, on_queue_declared)
@nose.tools.nottest
def on_queue_declared(frame):
global channel, queue
test_message = async.send_test_message(channel, queue)
def check_message(channel_number, method, header, body):
global connection, confirmed
if body == test_message:
confirmed = True
connection.ioloop.stop()
channel.basic_get(callback=check_message, queue=queue)
|
<commit_before><commit_msg>Test Basic.Publish -> Basic.Get message passing<commit_after>#!/usr/bin/env python
"""
Send a message and confirm you can retrieve it with Basic.Get
Test Steps:
1) Connect to broker - start_test
2) Open Channel - on_connected
3) Delcare Queue - on_channel_open
4) Send test message - on_queue_declared
5) Call basic get - on_queue_declared
6) Validate that sent message and basic get body are the same - check_message
"""
import utils.async as async
import nose
from pika.adapters import SelectConnection
channel = None
confirmed = False
connection = None
queue = None
ADAPTER = SelectConnection
HOST = 'localhost'
PORT = 5672
@nose.tools.timed(2)
def start_test():
global confirmed, connection
confirmed = False
connection = async.connect(ADAPTER, HOST, PORT, on_connected)
connection.ioloop.start()
if not confirmed:
assert False
pass
@nose.tools.nottest
def on_connected(connection):
global connected
connected = connection.is_open()
if connected:
async.channel(connection, on_channel_open)
@nose.tools.nottest
def on_channel_open(channel_):
global channel, queue
channel = channel_
queue = async.queue_name()
async.queue_declare(channel, queue, on_queue_declared)
@nose.tools.nottest
def on_queue_declared(frame):
global channel, queue
test_message = async.send_test_message(channel, queue)
def check_message(channel_number, method, header, body):
global connection, confirmed
if body == test_message:
confirmed = True
connection.ioloop.stop()
channel.basic_get(callback=check_message, queue=queue)
|
|
e7e51333133dd561e8a746144c29c6635d8a982a
|
migrations/versions/320f4eb0698b_add_proposal_image.py
|
migrations/versions/320f4eb0698b_add_proposal_image.py
|
"""add proposal image
Revision ID: 320f4eb0698b
Revises: 26ef95fc6f2c
Create Date: 2015-03-31 15:55:20.062624
"""
# revision identifiers, used by Alembic.
revision = '320f4eb0698b'
down_revision = '26ef95fc6f2c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('proposal', sa.Column('image', sa.String(length=150), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('proposal', 'image')
### end Alembic commands ###
|
Add migration to add column for proposal image filename
|
Add migration to add column for proposal image filename
|
Python
|
agpl-3.0
|
fairdemocracy/vilfredo-core
|
Add migration to add column for proposal image filename
|
"""add proposal image
Revision ID: 320f4eb0698b
Revises: 26ef95fc6f2c
Create Date: 2015-03-31 15:55:20.062624
"""
# revision identifiers, used by Alembic.
revision = '320f4eb0698b'
down_revision = '26ef95fc6f2c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('proposal', sa.Column('image', sa.String(length=150), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('proposal', 'image')
### end Alembic commands ###
|
<commit_before><commit_msg>Add migration to add column for proposal image filename<commit_after>
|
"""add proposal image
Revision ID: 320f4eb0698b
Revises: 26ef95fc6f2c
Create Date: 2015-03-31 15:55:20.062624
"""
# revision identifiers, used by Alembic.
revision = '320f4eb0698b'
down_revision = '26ef95fc6f2c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('proposal', sa.Column('image', sa.String(length=150), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('proposal', 'image')
### end Alembic commands ###
|
Add migration to add column for proposal image filename"""add proposal image
Revision ID: 320f4eb0698b
Revises: 26ef95fc6f2c
Create Date: 2015-03-31 15:55:20.062624
"""
# revision identifiers, used by Alembic.
revision = '320f4eb0698b'
down_revision = '26ef95fc6f2c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('proposal', sa.Column('image', sa.String(length=150), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('proposal', 'image')
### end Alembic commands ###
|
<commit_before><commit_msg>Add migration to add column for proposal image filename<commit_after>"""add proposal image
Revision ID: 320f4eb0698b
Revises: 26ef95fc6f2c
Create Date: 2015-03-31 15:55:20.062624
"""
# revision identifiers, used by Alembic.
revision = '320f4eb0698b'
down_revision = '26ef95fc6f2c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('proposal', sa.Column('image', sa.String(length=150), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('proposal', 'image')
### end Alembic commands ###
|
|
fcf0ed3c4e2deb9ce1d6a758dc18e6a03542eb59
|
candidates/management/commands/candidates_parties_with_multiple_emblems.py
|
candidates/management/commands/candidates_parties_with_multiple_emblems.py
|
from django.core.management.base import BaseCommand
from candidates.popit import create_popit_api_object, popit_unwrap_pagination
class Command(BaseCommand):
def handle(self, *args, **options):
api = create_popit_api_object()
for org in popit_unwrap_pagination(
api.organizations,
per_page=100
):
org.pop('versions', None)
org.pop('memberships', None)
images = org.get('images', [])
if len(images) < 2:
continue
print "====================================================="
print len(images), org['id'], org['name'].encode('utf-8')
for image in images:
print ' --'
print ' ' + image['notes'].encode('utf-8')
print ' ' + image['url']
|
Add a script to find parties with multiple emblems (logos) from the EC
|
Add a script to find parties with multiple emblems (logos) from the EC
This was useful at some point, apparently!
|
Python
|
agpl-3.0
|
mysociety/yournextmp-popit,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,neavouli/yournextrepresentative,YoQuieroSaber/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextmp-popit,openstate/yournextrepresentative,datamade/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit,openstate/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,openstate/yournextrepresentative,DemocracyClub/yournextrepresentative,openstate/yournextrepresentative,neavouli/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextrepresentative,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit
|
Add a script to find parties with multiple emblems (logos) from the EC
This was useful at some point, apparently!
|
from django.core.management.base import BaseCommand
from candidates.popit import create_popit_api_object, popit_unwrap_pagination
class Command(BaseCommand):
def handle(self, *args, **options):
api = create_popit_api_object()
for org in popit_unwrap_pagination(
api.organizations,
per_page=100
):
org.pop('versions', None)
org.pop('memberships', None)
images = org.get('images', [])
if len(images) < 2:
continue
print "====================================================="
print len(images), org['id'], org['name'].encode('utf-8')
for image in images:
print ' --'
print ' ' + image['notes'].encode('utf-8')
print ' ' + image['url']
|
<commit_before><commit_msg>Add a script to find parties with multiple emblems (logos) from the EC
This was useful at some point, apparently!<commit_after>
|
from django.core.management.base import BaseCommand
from candidates.popit import create_popit_api_object, popit_unwrap_pagination
class Command(BaseCommand):
def handle(self, *args, **options):
api = create_popit_api_object()
for org in popit_unwrap_pagination(
api.organizations,
per_page=100
):
org.pop('versions', None)
org.pop('memberships', None)
images = org.get('images', [])
if len(images) < 2:
continue
print "====================================================="
print len(images), org['id'], org['name'].encode('utf-8')
for image in images:
print ' --'
print ' ' + image['notes'].encode('utf-8')
print ' ' + image['url']
|
Add a script to find parties with multiple emblems (logos) from the EC
This was useful at some point, apparently!from django.core.management.base import BaseCommand
from candidates.popit import create_popit_api_object, popit_unwrap_pagination
class Command(BaseCommand):
def handle(self, *args, **options):
api = create_popit_api_object()
for org in popit_unwrap_pagination(
api.organizations,
per_page=100
):
org.pop('versions', None)
org.pop('memberships', None)
images = org.get('images', [])
if len(images) < 2:
continue
print "====================================================="
print len(images), org['id'], org['name'].encode('utf-8')
for image in images:
print ' --'
print ' ' + image['notes'].encode('utf-8')
print ' ' + image['url']
|
<commit_before><commit_msg>Add a script to find parties with multiple emblems (logos) from the EC
This was useful at some point, apparently!<commit_after>from django.core.management.base import BaseCommand
from candidates.popit import create_popit_api_object, popit_unwrap_pagination
class Command(BaseCommand):
def handle(self, *args, **options):
api = create_popit_api_object()
for org in popit_unwrap_pagination(
api.organizations,
per_page=100
):
org.pop('versions', None)
org.pop('memberships', None)
images = org.get('images', [])
if len(images) < 2:
continue
print "====================================================="
print len(images), org['id'], org['name'].encode('utf-8')
for image in images:
print ' --'
print ' ' + image['notes'].encode('utf-8')
print ' ' + image['url']
|
|
0f6e065a70bcd1f9dd64dfa04c13cb0065e33c13
|
src/autobot/src/navigator_test.py
|
src/autobot/src/navigator_test.py
|
#!/usr/bin/env python
import unittest
import mock
from autobot.msg import detected_object
from navigator import *
def fake_stopCar():
return True
def fake_srvTogglePathFinder(state):
return
def fake_setWallDist(dist, wall):
return
class NavigatorTest(unittest.TestCase):
@mock.patch('navigator.setWallDist',
side_effect=fake_setWallDist)
@mock.patch('navigator.srvTogglePathFinder',
side_effect=fake_srvTogglePathFinder)
@mock.patch('navigator.stopCar', side_effect=fake_stopCar)
def testPersonInFront(self, fake_stopCar,
fake_srvTogglePathFinder,
fake_setWallDist):
global OBJECT_MAP
global PATH_STATE
OBJECT_MAP.addToMap('person', 10, 50, 1.2)
OBJECT_MAP.addToMap('cat', 10, 50, 60)
OBJECT_MAP.addToMap('bat', 10, 50, 65)
PATH_STATE.enabled = True
onDecisionInterval(None)
fake_setWallDist.assert_not_called()
fake_stopCar.assert_called()
|
Add basic test for navigator
|
Add basic test for navigator
|
Python
|
mit
|
atkvo/masters-bot,atkvo/masters-bot,atkvo/masters-bot,atkvo/masters-bot,atkvo/masters-bot
|
Add basic test for navigator
|
#!/usr/bin/env python
import unittest
import mock
from autobot.msg import detected_object
from navigator import *
def fake_stopCar():
return True
def fake_srvTogglePathFinder(state):
return
def fake_setWallDist(dist, wall):
return
class NavigatorTest(unittest.TestCase):
@mock.patch('navigator.setWallDist',
side_effect=fake_setWallDist)
@mock.patch('navigator.srvTogglePathFinder',
side_effect=fake_srvTogglePathFinder)
@mock.patch('navigator.stopCar', side_effect=fake_stopCar)
def testPersonInFront(self, fake_stopCar,
fake_srvTogglePathFinder,
fake_setWallDist):
global OBJECT_MAP
global PATH_STATE
OBJECT_MAP.addToMap('person', 10, 50, 1.2)
OBJECT_MAP.addToMap('cat', 10, 50, 60)
OBJECT_MAP.addToMap('bat', 10, 50, 65)
PATH_STATE.enabled = True
onDecisionInterval(None)
fake_setWallDist.assert_not_called()
fake_stopCar.assert_called()
|
<commit_before><commit_msg>Add basic test for navigator<commit_after>
|
#!/usr/bin/env python
import unittest
import mock
from autobot.msg import detected_object
from navigator import *
def fake_stopCar():
return True
def fake_srvTogglePathFinder(state):
return
def fake_setWallDist(dist, wall):
return
class NavigatorTest(unittest.TestCase):
@mock.patch('navigator.setWallDist',
side_effect=fake_setWallDist)
@mock.patch('navigator.srvTogglePathFinder',
side_effect=fake_srvTogglePathFinder)
@mock.patch('navigator.stopCar', side_effect=fake_stopCar)
def testPersonInFront(self, fake_stopCar,
fake_srvTogglePathFinder,
fake_setWallDist):
global OBJECT_MAP
global PATH_STATE
OBJECT_MAP.addToMap('person', 10, 50, 1.2)
OBJECT_MAP.addToMap('cat', 10, 50, 60)
OBJECT_MAP.addToMap('bat', 10, 50, 65)
PATH_STATE.enabled = True
onDecisionInterval(None)
fake_setWallDist.assert_not_called()
fake_stopCar.assert_called()
|
Add basic test for navigator#!/usr/bin/env python
import unittest
import mock
from autobot.msg import detected_object
from navigator import *
def fake_stopCar():
return True
def fake_srvTogglePathFinder(state):
return
def fake_setWallDist(dist, wall):
return
class NavigatorTest(unittest.TestCase):
@mock.patch('navigator.setWallDist',
side_effect=fake_setWallDist)
@mock.patch('navigator.srvTogglePathFinder',
side_effect=fake_srvTogglePathFinder)
@mock.patch('navigator.stopCar', side_effect=fake_stopCar)
def testPersonInFront(self, fake_stopCar,
fake_srvTogglePathFinder,
fake_setWallDist):
global OBJECT_MAP
global PATH_STATE
OBJECT_MAP.addToMap('person', 10, 50, 1.2)
OBJECT_MAP.addToMap('cat', 10, 50, 60)
OBJECT_MAP.addToMap('bat', 10, 50, 65)
PATH_STATE.enabled = True
onDecisionInterval(None)
fake_setWallDist.assert_not_called()
fake_stopCar.assert_called()
|
<commit_before><commit_msg>Add basic test for navigator<commit_after>#!/usr/bin/env python
import unittest
import mock
from autobot.msg import detected_object
from navigator import *
def fake_stopCar():
return True
def fake_srvTogglePathFinder(state):
return
def fake_setWallDist(dist, wall):
return
class NavigatorTest(unittest.TestCase):
@mock.patch('navigator.setWallDist',
side_effect=fake_setWallDist)
@mock.patch('navigator.srvTogglePathFinder',
side_effect=fake_srvTogglePathFinder)
@mock.patch('navigator.stopCar', side_effect=fake_stopCar)
def testPersonInFront(self, fake_stopCar,
fake_srvTogglePathFinder,
fake_setWallDist):
global OBJECT_MAP
global PATH_STATE
OBJECT_MAP.addToMap('person', 10, 50, 1.2)
OBJECT_MAP.addToMap('cat', 10, 50, 60)
OBJECT_MAP.addToMap('bat', 10, 50, 65)
PATH_STATE.enabled = True
onDecisionInterval(None)
fake_setWallDist.assert_not_called()
fake_stopCar.assert_called()
|
|
a5012c9fb81768e85b555b52264baa11efc17ba1
|
test/test_select_taxa.py
|
test/test_select_taxa.py
|
import logging
import os
import tempfile
import unittest
import select_taxa
class Test(unittest.TestCase):
def setUp(self):
self.longMessage = True
logging.root.setLevel(logging.DEBUG)
def test_main(self):
'''
Select a single genome and assert the download log file contains the correct output for it.
'''
# Setup arguments
target = tempfile.mktemp()[1]
try:
args = ('--genomes=13960 --genomes-file=' + target).split()
# Write to argument file
select_taxa.main(args)
# Assert contents
with open(target) as reader:
contents = reader.read()
self.assertIn('17745.1\tEscherichia coli E24377A', contents)
finally:
os.remove(target)
|
Add unittest for select_taxa that runs main and selects a single genome
|
Add unittest for select_taxa that runs main and selects a single genome
|
Python
|
mit
|
ODoSE/odose.nl
|
Add unittest for select_taxa that runs main and selects a single genome
|
import logging
import os
import tempfile
import unittest
import select_taxa
class Test(unittest.TestCase):
def setUp(self):
self.longMessage = True
logging.root.setLevel(logging.DEBUG)
def test_main(self):
'''
Select a single genome and assert the download log file contains the correct output for it.
'''
# Setup arguments
target = tempfile.mktemp()[1]
try:
args = ('--genomes=13960 --genomes-file=' + target).split()
# Write to argument file
select_taxa.main(args)
# Assert contents
with open(target) as reader:
contents = reader.read()
self.assertIn('17745.1\tEscherichia coli E24377A', contents)
finally:
os.remove(target)
|
<commit_before><commit_msg>Add unittest for select_taxa that runs main and selects a single genome<commit_after>
|
import logging
import os
import tempfile
import unittest
import select_taxa
class Test(unittest.TestCase):
def setUp(self):
self.longMessage = True
logging.root.setLevel(logging.DEBUG)
def test_main(self):
'''
Select a single genome and assert the download log file contains the correct output for it.
'''
# Setup arguments
target = tempfile.mktemp()[1]
try:
args = ('--genomes=13960 --genomes-file=' + target).split()
# Write to argument file
select_taxa.main(args)
# Assert contents
with open(target) as reader:
contents = reader.read()
self.assertIn('17745.1\tEscherichia coli E24377A', contents)
finally:
os.remove(target)
|
Add unittest for select_taxa that runs main and selects a single genomeimport logging
import os
import tempfile
import unittest
import select_taxa
class Test(unittest.TestCase):
def setUp(self):
self.longMessage = True
logging.root.setLevel(logging.DEBUG)
def test_main(self):
'''
Select a single genome and assert the download log file contains the correct output for it.
'''
# Setup arguments
target = tempfile.mktemp()[1]
try:
args = ('--genomes=13960 --genomes-file=' + target).split()
# Write to argument file
select_taxa.main(args)
# Assert contents
with open(target) as reader:
contents = reader.read()
self.assertIn('17745.1\tEscherichia coli E24377A', contents)
finally:
os.remove(target)
|
<commit_before><commit_msg>Add unittest for select_taxa that runs main and selects a single genome<commit_after>import logging
import os
import tempfile
import unittest
import select_taxa
class Test(unittest.TestCase):
def setUp(self):
self.longMessage = True
logging.root.setLevel(logging.DEBUG)
def test_main(self):
'''
Select a single genome and assert the download log file contains the correct output for it.
'''
# Setup arguments
target = tempfile.mktemp()[1]
try:
args = ('--genomes=13960 --genomes-file=' + target).split()
# Write to argument file
select_taxa.main(args)
# Assert contents
with open(target) as reader:
contents = reader.read()
self.assertIn('17745.1\tEscherichia coli E24377A', contents)
finally:
os.remove(target)
|
|
5692f64619bf009cf92bf0a8c6f77bf82f0e3d02
|
tests/test_regression.py
|
tests/test_regression.py
|
# Copyright: See the LICENSE file.
"""Regression tests related to issues found with the project"""
import datetime
import typing as T
import unittest
import factory
# Example objects
# ===============
class Author(T.NamedTuple):
fullname: str
pseudonym: T.Optional[str] = None
class Book(T.NamedTuple):
title: str
author: Author
class PublishedBook(T.NamedTuple):
book: Book
published_on: datetime.date
countries: T.List[str]
class FakerRegressionTests(unittest.TestCase):
def test_locale_issue(self):
"""Regression test for `KeyError: 'locale'`
See #785 #786 #787 #788 #790 #796.
"""
class AuthorFactory(factory.Factory):
class Meta:
model = Author
class Params:
unknown = factory.Trait(
fullname="",
)
fullname = factory.Faker("name")
public_author = AuthorFactory(unknown=False)
self.assertIsNone(public_author.pseudonym)
unknown_author = AuthorFactory(unknown=True)
self.assertEqual("", unknown_author.fullname)
|
Add a new regression testing module
|
Add a new regression testing module
That module should hold all tests used when reproducing (and fixing) an
issue.
|
Python
|
mit
|
FactoryBoy/factory_boy
|
Add a new regression testing module
That module should hold all tests used when reproducing (and fixing) an
issue.
|
# Copyright: See the LICENSE file.
"""Regression tests related to issues found with the project"""
import datetime
import typing as T
import unittest
import factory
# Example objects
# ===============
class Author(T.NamedTuple):
fullname: str
pseudonym: T.Optional[str] = None
class Book(T.NamedTuple):
title: str
author: Author
class PublishedBook(T.NamedTuple):
book: Book
published_on: datetime.date
countries: T.List[str]
class FakerRegressionTests(unittest.TestCase):
def test_locale_issue(self):
"""Regression test for `KeyError: 'locale'`
See #785 #786 #787 #788 #790 #796.
"""
class AuthorFactory(factory.Factory):
class Meta:
model = Author
class Params:
unknown = factory.Trait(
fullname="",
)
fullname = factory.Faker("name")
public_author = AuthorFactory(unknown=False)
self.assertIsNone(public_author.pseudonym)
unknown_author = AuthorFactory(unknown=True)
self.assertEqual("", unknown_author.fullname)
|
<commit_before><commit_msg>Add a new regression testing module
That module should hold all tests used when reproducing (and fixing) an
issue.<commit_after>
|
# Copyright: See the LICENSE file.
"""Regression tests related to issues found with the project"""
import datetime
import typing as T
import unittest
import factory
# Example objects
# ===============
class Author(T.NamedTuple):
fullname: str
pseudonym: T.Optional[str] = None
class Book(T.NamedTuple):
title: str
author: Author
class PublishedBook(T.NamedTuple):
book: Book
published_on: datetime.date
countries: T.List[str]
class FakerRegressionTests(unittest.TestCase):
def test_locale_issue(self):
"""Regression test for `KeyError: 'locale'`
See #785 #786 #787 #788 #790 #796.
"""
class AuthorFactory(factory.Factory):
class Meta:
model = Author
class Params:
unknown = factory.Trait(
fullname="",
)
fullname = factory.Faker("name")
public_author = AuthorFactory(unknown=False)
self.assertIsNone(public_author.pseudonym)
unknown_author = AuthorFactory(unknown=True)
self.assertEqual("", unknown_author.fullname)
|
Add a new regression testing module
That module should hold all tests used when reproducing (and fixing) an
issue.# Copyright: See the LICENSE file.
"""Regression tests related to issues found with the project"""
import datetime
import typing as T
import unittest
import factory
# Example objects
# ===============
class Author(T.NamedTuple):
fullname: str
pseudonym: T.Optional[str] = None
class Book(T.NamedTuple):
title: str
author: Author
class PublishedBook(T.NamedTuple):
book: Book
published_on: datetime.date
countries: T.List[str]
class FakerRegressionTests(unittest.TestCase):
def test_locale_issue(self):
"""Regression test for `KeyError: 'locale'`
See #785 #786 #787 #788 #790 #796.
"""
class AuthorFactory(factory.Factory):
class Meta:
model = Author
class Params:
unknown = factory.Trait(
fullname="",
)
fullname = factory.Faker("name")
public_author = AuthorFactory(unknown=False)
self.assertIsNone(public_author.pseudonym)
unknown_author = AuthorFactory(unknown=True)
self.assertEqual("", unknown_author.fullname)
|
<commit_before><commit_msg>Add a new regression testing module
That module should hold all tests used when reproducing (and fixing) an
issue.<commit_after># Copyright: See the LICENSE file.
"""Regression tests related to issues found with the project"""
import datetime
import typing as T
import unittest
import factory
# Example objects
# ===============
class Author(T.NamedTuple):
fullname: str
pseudonym: T.Optional[str] = None
class Book(T.NamedTuple):
title: str
author: Author
class PublishedBook(T.NamedTuple):
book: Book
published_on: datetime.date
countries: T.List[str]
class FakerRegressionTests(unittest.TestCase):
def test_locale_issue(self):
"""Regression test for `KeyError: 'locale'`
See #785 #786 #787 #788 #790 #796.
"""
class AuthorFactory(factory.Factory):
class Meta:
model = Author
class Params:
unknown = factory.Trait(
fullname="",
)
fullname = factory.Faker("name")
public_author = AuthorFactory(unknown=False)
self.assertIsNone(public_author.pseudonym)
unknown_author = AuthorFactory(unknown=True)
self.assertEqual("", unknown_author.fullname)
|
|
122eb3c6eb9f8467fc5d3325f0e5c58cc285cb50
|
token-hexkey.py
|
token-hexkey.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specif
# a script to convert a given key in hex format to token using RandomPartitioner
import hashlib
import binascii
import sys
if len(sys.argv) < 2:
print "usage: python token.py <key in hex format>"
sys.exit(1)
key = binascii.unhexlify(sys.argv[1])
# Calculate MD5 digest and convert it to hex format
digest = hashlib.md5(key).hexdigest()
# Convert the hash digest to 2's complement form
token = long(digest, 16)
bits = 128
if ((token & (1 << (bits - 1))) != 0):
token = token - (1 << bits)
# Convert the resulting number to unsigned form
print abs(token)
|
Add a script to convert hex formatted key to token using random partitioner
|
Add a script to convert hex formatted key to token using random partitioner
|
Python
|
apache-2.0
|
bharatendra/ctools
|
Add a script to convert hex formatted key to token using random partitioner
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specif
# a script to convert a given key in hex format to token using RandomPartitioner
import hashlib
import binascii
import sys
if len(sys.argv) < 2:
print "usage: python token.py <key in hex format>"
sys.exit(1)
key = binascii.unhexlify(sys.argv[1])
# Calculate MD5 digest and convert it to hex format
digest = hashlib.md5(key).hexdigest()
# Convert the hash digest to 2's complement form
token = long(digest, 16)
bits = 128
if ((token & (1 << (bits - 1))) != 0):
token = token - (1 << bits)
# Convert the resulting number to unsigned form
print abs(token)
|
<commit_before><commit_msg>Add a script to convert hex formatted key to token using random partitioner<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specif
# a script to convert a given key in hex format to token using RandomPartitioner
import hashlib
import binascii
import sys
if len(sys.argv) < 2:
print "usage: python token.py <key in hex format>"
sys.exit(1)
key = binascii.unhexlify(sys.argv[1])
# Calculate MD5 digest and convert it to hex format
digest = hashlib.md5(key).hexdigest()
# Convert the hash digest to 2's complement form
token = long(digest, 16)
bits = 128
if ((token & (1 << (bits - 1))) != 0):
token = token - (1 << bits)
# Convert the resulting number to unsigned form
print abs(token)
|
Add a script to convert hex formatted key to token using random partitioner#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specif
# a script to convert a given key in hex format to token using RandomPartitioner
import hashlib
import binascii
import sys
if len(sys.argv) < 2:
print "usage: python token.py <key in hex format>"
sys.exit(1)
key = binascii.unhexlify(sys.argv[1])
# Calculate MD5 digest and convert it to hex format
digest = hashlib.md5(key).hexdigest()
# Convert the hash digest to 2's complement form
token = long(digest, 16)
bits = 128
if ((token & (1 << (bits - 1))) != 0):
token = token - (1 << bits)
# Convert the resulting number to unsigned form
print abs(token)
|
<commit_before><commit_msg>Add a script to convert hex formatted key to token using random partitioner<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specif
# a script to convert a given key in hex format to token using RandomPartitioner
import hashlib
import binascii
import sys
if len(sys.argv) < 2:
print "usage: python token.py <key in hex format>"
sys.exit(1)
key = binascii.unhexlify(sys.argv[1])
# Calculate MD5 digest and convert it to hex format
digest = hashlib.md5(key).hexdigest()
# Convert the hash digest to 2's complement form
token = long(digest, 16)
bits = 128
if ((token & (1 << (bits - 1))) != 0):
token = token - (1 << bits)
# Convert the resulting number to unsigned form
print abs(token)
|
|
81f983c833d9858ad23f589367bf601babddf858
|
elements/activation_functions.py
|
elements/activation_functions.py
|
import theano
import theano.tensor as T
"""
A set of activation functions for Neural Network layers.
They're in the form of class so we can take advantage of constructor
to set initial value for some parameters.
"""
def tanh(x):
"""
tanh function (-1 to 1)
@input: x, theano shared variable.
@output: element-wise tanh of x
"""
return T.tanh(x)
def sigmoid(x):
"""
sigmoid function (0 to 1, (tanh(x)+1)/2).
@input: x, theano shared variable.
@output: element-wise sigmoid of x
"""
return (T.tanh(x)+1)/2
def linier(x):
"""
linier function.
@input: x, theano shared variable.
@output: x
"""
return x
def relu_generator(alpha=0):
"""
this function returns a relu function with proper alpha value.
@input: alpha, slope of negative side of ReLU.
@output: ReLU function
"""
def relu(x):
"""
rectified linier function (-alpha*x if x<0, x if x>0).
@input: x, theano shared variable.
@output: x<0?-alpha*x:x
"""
return T.nnet.relu(x, alpha)
return relu
# TODO:
# add RBF activation function
#
# def RBF(x):
# """
# radial basis function.
# @input: x, theano shared variable.
# @output: Not Implimented
# """
|
Add some useful activation functions.
|
Add some useful activation functions.
|
Python
|
mit
|
mmohaveri/DeepNetTookKit
|
Add some useful activation functions.
|
import theano
import theano.tensor as T
"""
A set of activation functions for Neural Network layers.
They're in the form of class so we can take advantage of constructor
to set initial value for some parameters.
"""
def tanh(x):
"""
tanh function (-1 to 1)
@input: x, theano shared variable.
@output: element-wise tanh of x
"""
return T.tanh(x)
def sigmoid(x):
"""
sigmoid function (0 to 1, (tanh(x)+1)/2).
@input: x, theano shared variable.
@output: element-wise sigmoid of x
"""
return (T.tanh(x)+1)/2
def linier(x):
"""
linier function.
@input: x, theano shared variable.
@output: x
"""
return x
def relu_generator(alpha=0):
"""
this function returns a relu function with proper alpha value.
@input: alpha, slope of negative side of ReLU.
@output: ReLU function
"""
def relu(x):
"""
rectified linier function (-alpha*x if x<0, x if x>0).
@input: x, theano shared variable.
@output: x<0?-alpha*x:x
"""
return T.nnet.relu(x, alpha)
return relu
# TODO:
# add RBF activation function
#
# def RBF(x):
# """
# radial basis function.
# @input: x, theano shared variable.
# @output: Not Implimented
# """
|
<commit_before><commit_msg>Add some useful activation functions.<commit_after>
|
import theano
import theano.tensor as T
"""
A set of activation functions for Neural Network layers.
They're in the form of class so we can take advantage of constructor
to set initial value for some parameters.
"""
def tanh(x):
"""
tanh function (-1 to 1)
@input: x, theano shared variable.
@output: element-wise tanh of x
"""
return T.tanh(x)
def sigmoid(x):
"""
sigmoid function (0 to 1, (tanh(x)+1)/2).
@input: x, theano shared variable.
@output: element-wise sigmoid of x
"""
return (T.tanh(x)+1)/2
def linier(x):
"""
linier function.
@input: x, theano shared variable.
@output: x
"""
return x
def relu_generator(alpha=0):
"""
this function returns a relu function with proper alpha value.
@input: alpha, slope of negative side of ReLU.
@output: ReLU function
"""
def relu(x):
"""
rectified linier function (-alpha*x if x<0, x if x>0).
@input: x, theano shared variable.
@output: x<0?-alpha*x:x
"""
return T.nnet.relu(x, alpha)
return relu
# TODO:
# add RBF activation function
#
# def RBF(x):
# """
# radial basis function.
# @input: x, theano shared variable.
# @output: Not Implimented
# """
|
Add some useful activation functions.import theano
import theano.tensor as T
"""
A set of activation functions for Neural Network layers.
They're in the form of class so we can take advantage of constructor
to set initial value for some parameters.
"""
def tanh(x):
"""
tanh function (-1 to 1)
@input: x, theano shared variable.
@output: element-wise tanh of x
"""
return T.tanh(x)
def sigmoid(x):
"""
sigmoid function (0 to 1, (tanh(x)+1)/2).
@input: x, theano shared variable.
@output: element-wise sigmoid of x
"""
return (T.tanh(x)+1)/2
def linier(x):
"""
linier function.
@input: x, theano shared variable.
@output: x
"""
return x
def relu_generator(alpha=0):
"""
this function returns a relu function with proper alpha value.
@input: alpha, slope of negative side of ReLU.
@output: ReLU function
"""
def relu(x):
"""
rectified linier function (-alpha*x if x<0, x if x>0).
@input: x, theano shared variable.
@output: x<0?-alpha*x:x
"""
return T.nnet.relu(x, alpha)
return relu
# TODO:
# add RBF activation function
#
# def RBF(x):
# """
# radial basis function.
# @input: x, theano shared variable.
# @output: Not Implimented
# """
|
<commit_before><commit_msg>Add some useful activation functions.<commit_after>import theano
import theano.tensor as T
"""
A set of activation functions for Neural Network layers.
They're in the form of class so we can take advantage of constructor
to set initial value for some parameters.
"""
def tanh(x):
"""
tanh function (-1 to 1)
@input: x, theano shared variable.
@output: element-wise tanh of x
"""
return T.tanh(x)
def sigmoid(x):
"""
sigmoid function (0 to 1, (tanh(x)+1)/2).
@input: x, theano shared variable.
@output: element-wise sigmoid of x
"""
return (T.tanh(x)+1)/2
def linier(x):
"""
linier function.
@input: x, theano shared variable.
@output: x
"""
return x
def relu_generator(alpha=0):
"""
this function returns a relu function with proper alpha value.
@input: alpha, slope of negative side of ReLU.
@output: ReLU function
"""
def relu(x):
"""
rectified linier function (-alpha*x if x<0, x if x>0).
@input: x, theano shared variable.
@output: x<0?-alpha*x:x
"""
return T.nnet.relu(x, alpha)
return relu
# TODO:
# add RBF activation function
#
# def RBF(x):
# """
# radial basis function.
# @input: x, theano shared variable.
# @output: Not Implimented
# """
|
|
dbc20f37c7fb1dd00c90ac54d2021fb1ba3b5eda
|
exam.py
|
exam.py
|
import time
import sys
from groupy.client import Client
def read_token_from_file(filename):
with open(filename) as f:
return f.read().strip()
def test_groups(groups):
for group in groups:
print(group)
print('Members:')
for member in group.members[:5]:
print(member)
print('Recent messages:')
for message in group.messages.list()[:5]:
print(message)
print('Leaderboard (day):')
for message in group.leaderboard.list_day()[:5]:
print(message.favorited_by)
print('Gallery:')
for message in group.gallery.list()[:5]:
print(message.attachments)
print()
def test_messages(messages):
for message in messages:
print(message)
print(message.attachments)
print('Liking...', message.like())
time.sleep(1) # you get rate limited by liking/unliking too fast
print('Unliking...', message.unlike())
def test_chats(chats):
for chat in chats:
print(chat)
print('Recent messages:')
for message in chat.messages.list():
print(message)
def main(*args):
token_file = args[0]
token = read_token_from_file(token_file)
client = Client.from_token(token)
groups = list(client.groups.list().autopage())
test_group_ids = ('12268264', '27205597', '27205784', '35799100')
target_groups = []
for group in groups:
if group.id in test_group_ids:
print('Found {0} (id={0.group_id})'.format(group))
target_groups.append(group)
if len(target_groups) < len(test_group_ids):
raise Exception('could not find group test groups')
chats = list(client.chats.list())
test_chat_ids = ('14529712+14612048',)
target_chats = []
for chat in chats:
if chat.last_message['conversation_id'] in test_chat_ids:
print('Found {}'.format(chat))
target_chats.append(group)
if len(target_chats) < len(test_chat_ids):
raise Exception('could not find group test chats')
target_messages = []
for group in target_groups:
target_messages.append(group.messages.list()[0])
for chat in target_chats:
target_messages.append(chat.messages.list()[0])
print_header('test groups')
test_groups(target_groups)
print_header('test chats')
test_chats(target_chats)
print_header('test messages')
test_messages(target_messages)
def print_header(header):
print('\n')
print('=' * 50)
print('| {}'.format(header))
print('=' * 50)
print()
if __name__ == '__main__':
main(*sys.argv[1:])
|
Add some end-to-end functional tests
|
Add some end-to-end functional tests
Since running these tests is not part of the automated tests,
and since the test runner picks up any file that has a name
containing "test" I have to be clever and call it "exam.py."
|
Python
|
apache-2.0
|
rhgrant10/Groupy
|
Add some end-to-end functional tests
Since running these tests is not part of the automated tests,
and since the test runner picks up any file that has a name
containing "test" I have to be clever and call it "exam.py."
|
import time
import sys
from groupy.client import Client
def read_token_from_file(filename):
with open(filename) as f:
return f.read().strip()
def test_groups(groups):
for group in groups:
print(group)
print('Members:')
for member in group.members[:5]:
print(member)
print('Recent messages:')
for message in group.messages.list()[:5]:
print(message)
print('Leaderboard (day):')
for message in group.leaderboard.list_day()[:5]:
print(message.favorited_by)
print('Gallery:')
for message in group.gallery.list()[:5]:
print(message.attachments)
print()
def test_messages(messages):
for message in messages:
print(message)
print(message.attachments)
print('Liking...', message.like())
time.sleep(1) # you get rate limited by liking/unliking too fast
print('Unliking...', message.unlike())
def test_chats(chats):
for chat in chats:
print(chat)
print('Recent messages:')
for message in chat.messages.list():
print(message)
def main(*args):
token_file = args[0]
token = read_token_from_file(token_file)
client = Client.from_token(token)
groups = list(client.groups.list().autopage())
test_group_ids = ('12268264', '27205597', '27205784', '35799100')
target_groups = []
for group in groups:
if group.id in test_group_ids:
print('Found {0} (id={0.group_id})'.format(group))
target_groups.append(group)
if len(target_groups) < len(test_group_ids):
raise Exception('could not find group test groups')
chats = list(client.chats.list())
test_chat_ids = ('14529712+14612048',)
target_chats = []
for chat in chats:
if chat.last_message['conversation_id'] in test_chat_ids:
print('Found {}'.format(chat))
target_chats.append(group)
if len(target_chats) < len(test_chat_ids):
raise Exception('could not find group test chats')
target_messages = []
for group in target_groups:
target_messages.append(group.messages.list()[0])
for chat in target_chats:
target_messages.append(chat.messages.list()[0])
print_header('test groups')
test_groups(target_groups)
print_header('test chats')
test_chats(target_chats)
print_header('test messages')
test_messages(target_messages)
def print_header(header):
print('\n')
print('=' * 50)
print('| {}'.format(header))
print('=' * 50)
print()
if __name__ == '__main__':
main(*sys.argv[1:])
|
<commit_before><commit_msg>Add some end-to-end functional tests
Since running these tests is not part of the automated tests,
and since the test runner picks up any file that has a name
containing "test" I have to be clever and call it "exam.py."<commit_after>
|
import time
import sys
from groupy.client import Client
def read_token_from_file(filename):
with open(filename) as f:
return f.read().strip()
def test_groups(groups):
for group in groups:
print(group)
print('Members:')
for member in group.members[:5]:
print(member)
print('Recent messages:')
for message in group.messages.list()[:5]:
print(message)
print('Leaderboard (day):')
for message in group.leaderboard.list_day()[:5]:
print(message.favorited_by)
print('Gallery:')
for message in group.gallery.list()[:5]:
print(message.attachments)
print()
def test_messages(messages):
for message in messages:
print(message)
print(message.attachments)
print('Liking...', message.like())
time.sleep(1) # you get rate limited by liking/unliking too fast
print('Unliking...', message.unlike())
def test_chats(chats):
for chat in chats:
print(chat)
print('Recent messages:')
for message in chat.messages.list():
print(message)
def main(*args):
token_file = args[0]
token = read_token_from_file(token_file)
client = Client.from_token(token)
groups = list(client.groups.list().autopage())
test_group_ids = ('12268264', '27205597', '27205784', '35799100')
target_groups = []
for group in groups:
if group.id in test_group_ids:
print('Found {0} (id={0.group_id})'.format(group))
target_groups.append(group)
if len(target_groups) < len(test_group_ids):
raise Exception('could not find group test groups')
chats = list(client.chats.list())
test_chat_ids = ('14529712+14612048',)
target_chats = []
for chat in chats:
if chat.last_message['conversation_id'] in test_chat_ids:
print('Found {}'.format(chat))
target_chats.append(group)
if len(target_chats) < len(test_chat_ids):
raise Exception('could not find group test chats')
target_messages = []
for group in target_groups:
target_messages.append(group.messages.list()[0])
for chat in target_chats:
target_messages.append(chat.messages.list()[0])
print_header('test groups')
test_groups(target_groups)
print_header('test chats')
test_chats(target_chats)
print_header('test messages')
test_messages(target_messages)
def print_header(header):
print('\n')
print('=' * 50)
print('| {}'.format(header))
print('=' * 50)
print()
if __name__ == '__main__':
main(*sys.argv[1:])
|
Add some end-to-end functional tests
Since running these tests is not part of the automated tests,
and since the test runner picks up any file that has a name
containing "test" I have to be clever and call it "exam.py."import time
import sys
from groupy.client import Client
def read_token_from_file(filename):
with open(filename) as f:
return f.read().strip()
def test_groups(groups):
for group in groups:
print(group)
print('Members:')
for member in group.members[:5]:
print(member)
print('Recent messages:')
for message in group.messages.list()[:5]:
print(message)
print('Leaderboard (day):')
for message in group.leaderboard.list_day()[:5]:
print(message.favorited_by)
print('Gallery:')
for message in group.gallery.list()[:5]:
print(message.attachments)
print()
def test_messages(messages):
for message in messages:
print(message)
print(message.attachments)
print('Liking...', message.like())
time.sleep(1) # you get rate limited by liking/unliking too fast
print('Unliking...', message.unlike())
def test_chats(chats):
for chat in chats:
print(chat)
print('Recent messages:')
for message in chat.messages.list():
print(message)
def main(*args):
token_file = args[0]
token = read_token_from_file(token_file)
client = Client.from_token(token)
groups = list(client.groups.list().autopage())
test_group_ids = ('12268264', '27205597', '27205784', '35799100')
target_groups = []
for group in groups:
if group.id in test_group_ids:
print('Found {0} (id={0.group_id})'.format(group))
target_groups.append(group)
if len(target_groups) < len(test_group_ids):
raise Exception('could not find group test groups')
chats = list(client.chats.list())
test_chat_ids = ('14529712+14612048',)
target_chats = []
for chat in chats:
if chat.last_message['conversation_id'] in test_chat_ids:
print('Found {}'.format(chat))
target_chats.append(group)
if len(target_chats) < len(test_chat_ids):
raise Exception('could not find group test chats')
target_messages = []
for group in target_groups:
target_messages.append(group.messages.list()[0])
for chat in target_chats:
target_messages.append(chat.messages.list()[0])
print_header('test groups')
test_groups(target_groups)
print_header('test chats')
test_chats(target_chats)
print_header('test messages')
test_messages(target_messages)
def print_header(header):
print('\n')
print('=' * 50)
print('| {}'.format(header))
print('=' * 50)
print()
if __name__ == '__main__':
main(*sys.argv[1:])
|
<commit_before><commit_msg>Add some end-to-end functional tests
Since running these tests is not part of the automated tests,
and since the test runner picks up any file that has a name
containing "test" I have to be clever and call it "exam.py."<commit_after>import time
import sys
from groupy.client import Client
def read_token_from_file(filename):
with open(filename) as f:
return f.read().strip()
def test_groups(groups):
for group in groups:
print(group)
print('Members:')
for member in group.members[:5]:
print(member)
print('Recent messages:')
for message in group.messages.list()[:5]:
print(message)
print('Leaderboard (day):')
for message in group.leaderboard.list_day()[:5]:
print(message.favorited_by)
print('Gallery:')
for message in group.gallery.list()[:5]:
print(message.attachments)
print()
def test_messages(messages):
for message in messages:
print(message)
print(message.attachments)
print('Liking...', message.like())
time.sleep(1) # you get rate limited by liking/unliking too fast
print('Unliking...', message.unlike())
def test_chats(chats):
for chat in chats:
print(chat)
print('Recent messages:')
for message in chat.messages.list():
print(message)
def main(*args):
token_file = args[0]
token = read_token_from_file(token_file)
client = Client.from_token(token)
groups = list(client.groups.list().autopage())
test_group_ids = ('12268264', '27205597', '27205784', '35799100')
target_groups = []
for group in groups:
if group.id in test_group_ids:
print('Found {0} (id={0.group_id})'.format(group))
target_groups.append(group)
if len(target_groups) < len(test_group_ids):
raise Exception('could not find group test groups')
chats = list(client.chats.list())
test_chat_ids = ('14529712+14612048',)
target_chats = []
for chat in chats:
if chat.last_message['conversation_id'] in test_chat_ids:
print('Found {}'.format(chat))
target_chats.append(group)
if len(target_chats) < len(test_chat_ids):
raise Exception('could not find group test chats')
target_messages = []
for group in target_groups:
target_messages.append(group.messages.list()[0])
for chat in target_chats:
target_messages.append(chat.messages.list()[0])
print_header('test groups')
test_groups(target_groups)
print_header('test chats')
test_chats(target_chats)
print_header('test messages')
test_messages(target_messages)
def print_header(header):
print('\n')
print('=' * 50)
print('| {}'.format(header))
print('=' * 50)
print()
if __name__ == '__main__':
main(*sys.argv[1:])
|
|
64139e0a41c1b1da81e9b5e244b2d7095c4a7a2b
|
core/management/commands/delete_old_sessions.py
|
core/management/commands/delete_old_sessions.py
|
from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
Add delete old sessions command
|
Add delete old sessions command
|
Python
|
mit
|
nanuxbe/djangopackages,QLGu/djangopackages,nanuxbe/djangopackages,QLGu/djangopackages,pydanny/djangopackages,QLGu/djangopackages,pydanny/djangopackages,pydanny/djangopackages,nanuxbe/djangopackages
|
Add delete old sessions command
|
from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
<commit_before><commit_msg>Add delete old sessions command<commit_after>
|
from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
Add delete old sessions commandfrom datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
<commit_before><commit_msg>Add delete old sessions command<commit_after>from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
|
05bf0cd188d4666c9c0aeb56a95d7867f25952c2
|
demo_dqn_continuous.py
|
demo_dqn_continuous.py
|
import argparse
import chainer
from chainer import serializers
import gym
import numpy as np
import random_seed
import env_modifiers
import q_function
def eval_single_run(env, model, phi):
test_r = 0
obs = env.reset()
done = False
while not done:
s = chainer.Variable(np.expand_dims(phi(obs), 0))
qout = model(s)
a = qout.greedy_actions.data[0]
obs, r, done, info = env.step(a)
test_r += r
return test_r
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str)
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n-runs', type=int, default=10)
parser.add_argument('--window-visible', action='store_true')
parser.add_argument('--render', action='store_true')
parser.set_defaults(render=False)
args = parser.parse_args()
random_seed.set_random_seed(args.seed)
env = gym.make(args.env)
timestep_limit = env.spec.timestep_limit
env_modifiers.make_timestep_limited(env, timestep_limit)
if args.render:
env_modifiers.make_rendered(env)
obs_size = np.asarray(env.observation_space.shape).prod()
action_size = np.asarray(env.action_space.shape).prod()
q_func = q_function.FCSIContinuousQFunction(
obs_size, action_size, 100, 2, env.action_space)
serializers.load_hdf5(args.model, q_func)
scores = []
def phi(obs):
return obs.astype(np.float32)
for i in range(args.n_runs):
score = eval_single_run(env, q_func, phi)
print('Run {}: {}'.format(i, score))
scores.append(score)
print('Average: {}'.format(sum(scores) / args.n_runs))
if __name__ == '__main__':
main()
|
Add a script for dqn continuous task demo
|
Add a script for dqn continuous task demo
|
Python
|
mit
|
toslunar/chainerrl,toslunar/chainerrl
|
Add a script for dqn continuous task demo
|
import argparse
import chainer
from chainer import serializers
import gym
import numpy as np
import random_seed
import env_modifiers
import q_function
def eval_single_run(env, model, phi):
test_r = 0
obs = env.reset()
done = False
while not done:
s = chainer.Variable(np.expand_dims(phi(obs), 0))
qout = model(s)
a = qout.greedy_actions.data[0]
obs, r, done, info = env.step(a)
test_r += r
return test_r
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str)
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n-runs', type=int, default=10)
parser.add_argument('--window-visible', action='store_true')
parser.add_argument('--render', action='store_true')
parser.set_defaults(render=False)
args = parser.parse_args()
random_seed.set_random_seed(args.seed)
env = gym.make(args.env)
timestep_limit = env.spec.timestep_limit
env_modifiers.make_timestep_limited(env, timestep_limit)
if args.render:
env_modifiers.make_rendered(env)
obs_size = np.asarray(env.observation_space.shape).prod()
action_size = np.asarray(env.action_space.shape).prod()
q_func = q_function.FCSIContinuousQFunction(
obs_size, action_size, 100, 2, env.action_space)
serializers.load_hdf5(args.model, q_func)
scores = []
def phi(obs):
return obs.astype(np.float32)
for i in range(args.n_runs):
score = eval_single_run(env, q_func, phi)
print('Run {}: {}'.format(i, score))
scores.append(score)
print('Average: {}'.format(sum(scores) / args.n_runs))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script for dqn continuous task demo<commit_after>
|
import argparse
import chainer
from chainer import serializers
import gym
import numpy as np
import random_seed
import env_modifiers
import q_function
def eval_single_run(env, model, phi):
test_r = 0
obs = env.reset()
done = False
while not done:
s = chainer.Variable(np.expand_dims(phi(obs), 0))
qout = model(s)
a = qout.greedy_actions.data[0]
obs, r, done, info = env.step(a)
test_r += r
return test_r
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str)
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n-runs', type=int, default=10)
parser.add_argument('--window-visible', action='store_true')
parser.add_argument('--render', action='store_true')
parser.set_defaults(render=False)
args = parser.parse_args()
random_seed.set_random_seed(args.seed)
env = gym.make(args.env)
timestep_limit = env.spec.timestep_limit
env_modifiers.make_timestep_limited(env, timestep_limit)
if args.render:
env_modifiers.make_rendered(env)
obs_size = np.asarray(env.observation_space.shape).prod()
action_size = np.asarray(env.action_space.shape).prod()
q_func = q_function.FCSIContinuousQFunction(
obs_size, action_size, 100, 2, env.action_space)
serializers.load_hdf5(args.model, q_func)
scores = []
def phi(obs):
return obs.astype(np.float32)
for i in range(args.n_runs):
score = eval_single_run(env, q_func, phi)
print('Run {}: {}'.format(i, score))
scores.append(score)
print('Average: {}'.format(sum(scores) / args.n_runs))
if __name__ == '__main__':
main()
|
Add a script for dqn continuous task demoimport argparse
import chainer
from chainer import serializers
import gym
import numpy as np
import random_seed
import env_modifiers
import q_function
def eval_single_run(env, model, phi):
test_r = 0
obs = env.reset()
done = False
while not done:
s = chainer.Variable(np.expand_dims(phi(obs), 0))
qout = model(s)
a = qout.greedy_actions.data[0]
obs, r, done, info = env.step(a)
test_r += r
return test_r
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str)
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n-runs', type=int, default=10)
parser.add_argument('--window-visible', action='store_true')
parser.add_argument('--render', action='store_true')
parser.set_defaults(render=False)
args = parser.parse_args()
random_seed.set_random_seed(args.seed)
env = gym.make(args.env)
timestep_limit = env.spec.timestep_limit
env_modifiers.make_timestep_limited(env, timestep_limit)
if args.render:
env_modifiers.make_rendered(env)
obs_size = np.asarray(env.observation_space.shape).prod()
action_size = np.asarray(env.action_space.shape).prod()
q_func = q_function.FCSIContinuousQFunction(
obs_size, action_size, 100, 2, env.action_space)
serializers.load_hdf5(args.model, q_func)
scores = []
def phi(obs):
return obs.astype(np.float32)
for i in range(args.n_runs):
score = eval_single_run(env, q_func, phi)
print('Run {}: {}'.format(i, score))
scores.append(score)
print('Average: {}'.format(sum(scores) / args.n_runs))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script for dqn continuous task demo<commit_after>import argparse
import chainer
from chainer import serializers
import gym
import numpy as np
import random_seed
import env_modifiers
import q_function
def eval_single_run(env, model, phi):
test_r = 0
obs = env.reset()
done = False
while not done:
s = chainer.Variable(np.expand_dims(phi(obs), 0))
qout = model(s)
a = qout.greedy_actions.data[0]
obs, r, done, info = env.step(a)
test_r += r
return test_r
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str)
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n-runs', type=int, default=10)
parser.add_argument('--window-visible', action='store_true')
parser.add_argument('--render', action='store_true')
parser.set_defaults(render=False)
args = parser.parse_args()
random_seed.set_random_seed(args.seed)
env = gym.make(args.env)
timestep_limit = env.spec.timestep_limit
env_modifiers.make_timestep_limited(env, timestep_limit)
if args.render:
env_modifiers.make_rendered(env)
obs_size = np.asarray(env.observation_space.shape).prod()
action_size = np.asarray(env.action_space.shape).prod()
q_func = q_function.FCSIContinuousQFunction(
obs_size, action_size, 100, 2, env.action_space)
serializers.load_hdf5(args.model, q_func)
scores = []
def phi(obs):
return obs.astype(np.float32)
for i in range(args.n_runs):
score = eval_single_run(env, q_func, phi)
print('Run {}: {}'.format(i, score))
scores.append(score)
print('Average: {}'.format(sum(scores) / args.n_runs))
if __name__ == '__main__':
main()
|
|
c39b95eebb402d1d0137448b3f0efd9b6d7ec169
|
tests/managers/test_repository.py
|
tests/managers/test_repository.py
|
from unittest import TestCase
from mock import MagicMock, patch
from nose.tools import eq_
from pyolite.managers.repository import RepositoryManager
class TestRepositoryManager(TestCase):
def test_get_repository(self):
mocked_repository = MagicMock()
mocked_repository.get_by_name.return_value = 'my_repo'
mocked_path = MagicMock()
mocked_git = MagicMock()
with patch.multiple('pyolite.managers.manager',
Path=MagicMock(return_value=mocked_path),
Git=MagicMock(return_value=mocked_git)):
with patch.multiple('pyolite.managers.repository',
Repository=mocked_repository):
repos = RepositoryManager('/path/to/admin/repo/')
eq_(repos.get('my_repo'), 'my_repo')
mocked_repository.get_by_name.assert_called_once_with('my_repo',
mocked_path,
mocked_git)
|
Test if repository manager if retrieving a repository when we lookup after one
|
Test if repository manager if retrieving a repository when we lookup after one
|
Python
|
bsd-2-clause
|
shawkinsl/pyolite,PressLabs/pyolite
|
Test if repository manager if retrieving a repository when we lookup after one
|
from unittest import TestCase
from mock import MagicMock, patch
from nose.tools import eq_
from pyolite.managers.repository import RepositoryManager
class TestRepositoryManager(TestCase):
def test_get_repository(self):
mocked_repository = MagicMock()
mocked_repository.get_by_name.return_value = 'my_repo'
mocked_path = MagicMock()
mocked_git = MagicMock()
with patch.multiple('pyolite.managers.manager',
Path=MagicMock(return_value=mocked_path),
Git=MagicMock(return_value=mocked_git)):
with patch.multiple('pyolite.managers.repository',
Repository=mocked_repository):
repos = RepositoryManager('/path/to/admin/repo/')
eq_(repos.get('my_repo'), 'my_repo')
mocked_repository.get_by_name.assert_called_once_with('my_repo',
mocked_path,
mocked_git)
|
<commit_before><commit_msg>Test if repository manager if retrieving a repository when we lookup after one<commit_after>
|
from unittest import TestCase
from mock import MagicMock, patch
from nose.tools import eq_
from pyolite.managers.repository import RepositoryManager
class TestRepositoryManager(TestCase):
def test_get_repository(self):
mocked_repository = MagicMock()
mocked_repository.get_by_name.return_value = 'my_repo'
mocked_path = MagicMock()
mocked_git = MagicMock()
with patch.multiple('pyolite.managers.manager',
Path=MagicMock(return_value=mocked_path),
Git=MagicMock(return_value=mocked_git)):
with patch.multiple('pyolite.managers.repository',
Repository=mocked_repository):
repos = RepositoryManager('/path/to/admin/repo/')
eq_(repos.get('my_repo'), 'my_repo')
mocked_repository.get_by_name.assert_called_once_with('my_repo',
mocked_path,
mocked_git)
|
Test if repository manager if retrieving a repository when we lookup after onefrom unittest import TestCase
from mock import MagicMock, patch
from nose.tools import eq_
from pyolite.managers.repository import RepositoryManager
class TestRepositoryManager(TestCase):
def test_get_repository(self):
mocked_repository = MagicMock()
mocked_repository.get_by_name.return_value = 'my_repo'
mocked_path = MagicMock()
mocked_git = MagicMock()
with patch.multiple('pyolite.managers.manager',
Path=MagicMock(return_value=mocked_path),
Git=MagicMock(return_value=mocked_git)):
with patch.multiple('pyolite.managers.repository',
Repository=mocked_repository):
repos = RepositoryManager('/path/to/admin/repo/')
eq_(repos.get('my_repo'), 'my_repo')
mocked_repository.get_by_name.assert_called_once_with('my_repo',
mocked_path,
mocked_git)
|
<commit_before><commit_msg>Test if repository manager if retrieving a repository when we lookup after one<commit_after>from unittest import TestCase
from mock import MagicMock, patch
from nose.tools import eq_
from pyolite.managers.repository import RepositoryManager
class TestRepositoryManager(TestCase):
def test_get_repository(self):
mocked_repository = MagicMock()
mocked_repository.get_by_name.return_value = 'my_repo'
mocked_path = MagicMock()
mocked_git = MagicMock()
with patch.multiple('pyolite.managers.manager',
Path=MagicMock(return_value=mocked_path),
Git=MagicMock(return_value=mocked_git)):
with patch.multiple('pyolite.managers.repository',
Repository=mocked_repository):
repos = RepositoryManager('/path/to/admin/repo/')
eq_(repos.get('my_repo'), 'my_repo')
mocked_repository.get_by_name.assert_called_once_with('my_repo',
mocked_path,
mocked_git)
|
|
9ad755263fe12fa16c0b27381893c380626c85d8
|
bindings/pyroot/test/conversions.py
|
bindings/pyroot/test/conversions.py
|
import unittest
import ROOT
cppcode = """
void stringViewConv(std::string_view) {};
"""
class ListInitialization(unittest.TestCase):
@classmethod
def setUpClass(cls):
ROOT.gInterpreter.Declare(cppcode)
def test_string_view_conv(self):
ROOT.stringViewConv("pyString")
if __name__ == '__main__':
unittest.main()
|
Add unittest for string_view conversion
|
[PyROOT] Add unittest for string_view conversion
|
Python
|
lgpl-2.1
|
olifre/root,olifre/root,karies/root,olifre/root,zzxuanyuan/root,karies/root,olifre/root,root-mirror/root,karies/root,zzxuanyuan/root,olifre/root,root-mirror/root,zzxuanyuan/root,karies/root,karies/root,root-mirror/root,root-mirror/root,zzxuanyuan/root,karies/root,olifre/root,root-mirror/root,zzxuanyuan/root,root-mirror/root,zzxuanyuan/root,olifre/root,olifre/root,zzxuanyuan/root,root-mirror/root,zzxuanyuan/root,olifre/root,karies/root,zzxuanyuan/root,root-mirror/root,root-mirror/root,karies/root,zzxuanyuan/root,karies/root,zzxuanyuan/root,olifre/root,zzxuanyuan/root,karies/root,root-mirror/root,karies/root,root-mirror/root,olifre/root
|
[PyROOT] Add unittest for string_view conversion
|
import unittest
import ROOT
cppcode = """
void stringViewConv(std::string_view) {};
"""
class ListInitialization(unittest.TestCase):
@classmethod
def setUpClass(cls):
ROOT.gInterpreter.Declare(cppcode)
def test_string_view_conv(self):
ROOT.stringViewConv("pyString")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>[PyROOT] Add unittest for string_view conversion<commit_after>
|
import unittest
import ROOT
cppcode = """
void stringViewConv(std::string_view) {};
"""
class ListInitialization(unittest.TestCase):
@classmethod
def setUpClass(cls):
ROOT.gInterpreter.Declare(cppcode)
def test_string_view_conv(self):
ROOT.stringViewConv("pyString")
if __name__ == '__main__':
unittest.main()
|
[PyROOT] Add unittest for string_view conversionimport unittest
import ROOT
cppcode = """
void stringViewConv(std::string_view) {};
"""
class ListInitialization(unittest.TestCase):
@classmethod
def setUpClass(cls):
ROOT.gInterpreter.Declare(cppcode)
def test_string_view_conv(self):
ROOT.stringViewConv("pyString")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>[PyROOT] Add unittest for string_view conversion<commit_after>import unittest
import ROOT
cppcode = """
void stringViewConv(std::string_view) {};
"""
class ListInitialization(unittest.TestCase):
@classmethod
def setUpClass(cls):
ROOT.gInterpreter.Declare(cppcode)
def test_string_view_conv(self):
ROOT.stringViewConv("pyString")
if __name__ == '__main__':
unittest.main()
|
|
9eb5f67a954888c4e14789b5b8acc785c789a77c
|
oidc_provider/management/commands/creatersakey.py
|
oidc_provider/management/commands/creatersakey.py
|
from Crypto.PublicKey import RSA
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Randomly generate a new RSA key for the OpenID server'
def handle(self, *args, **options):
try:
key = RSA.generate(1024)
file_path = settings.BASE_DIR + '/OIDC_RSA_KEY.pem'
with open(file_path, 'w') as f:
f.write(key.exportKey('PEM'))
self.stdout.write('RSA key successfully created at: ' + file_path)
except Exception as e:
self.stdout.write('Something goes wrong: ' + e.message)
|
Add a command for creating rsa key.
|
Add a command for creating rsa key.
|
Python
|
mit
|
juanifioren/django-oidc-provider,wojtek-fliposports/django-oidc-provider,nmohoric/django-oidc-provider,nmohoric/django-oidc-provider,ByteInternet/django-oidc-provider,juanifioren/django-oidc-provider,wojtek-fliposports/django-oidc-provider,bunnyinc/django-oidc-provider,bunnyinc/django-oidc-provider,wayward710/django-oidc-provider,torreco/django-oidc-provider,wayward710/django-oidc-provider,torreco/django-oidc-provider,ByteInternet/django-oidc-provider
|
Add a command for creating rsa key.
|
from Crypto.PublicKey import RSA
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Randomly generate a new RSA key for the OpenID server'
def handle(self, *args, **options):
try:
key = RSA.generate(1024)
file_path = settings.BASE_DIR + '/OIDC_RSA_KEY.pem'
with open(file_path, 'w') as f:
f.write(key.exportKey('PEM'))
self.stdout.write('RSA key successfully created at: ' + file_path)
except Exception as e:
self.stdout.write('Something goes wrong: ' + e.message)
|
<commit_before><commit_msg>Add a command for creating rsa key.<commit_after>
|
from Crypto.PublicKey import RSA
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Randomly generate a new RSA key for the OpenID server'
def handle(self, *args, **options):
try:
key = RSA.generate(1024)
file_path = settings.BASE_DIR + '/OIDC_RSA_KEY.pem'
with open(file_path, 'w') as f:
f.write(key.exportKey('PEM'))
self.stdout.write('RSA key successfully created at: ' + file_path)
except Exception as e:
self.stdout.write('Something goes wrong: ' + e.message)
|
Add a command for creating rsa key.from Crypto.PublicKey import RSA
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Randomly generate a new RSA key for the OpenID server'
def handle(self, *args, **options):
try:
key = RSA.generate(1024)
file_path = settings.BASE_DIR + '/OIDC_RSA_KEY.pem'
with open(file_path, 'w') as f:
f.write(key.exportKey('PEM'))
self.stdout.write('RSA key successfully created at: ' + file_path)
except Exception as e:
self.stdout.write('Something goes wrong: ' + e.message)
|
<commit_before><commit_msg>Add a command for creating rsa key.<commit_after>from Crypto.PublicKey import RSA
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Randomly generate a new RSA key for the OpenID server'
def handle(self, *args, **options):
try:
key = RSA.generate(1024)
file_path = settings.BASE_DIR + '/OIDC_RSA_KEY.pem'
with open(file_path, 'w') as f:
f.write(key.exportKey('PEM'))
self.stdout.write('RSA key successfully created at: ' + file_path)
except Exception as e:
self.stdout.write('Something goes wrong: ' + e.message)
|
|
38bfc1a536f43ece367a49a62501b57c89f689a1
|
django-server/feel/core/db/reset.py
|
django-server/feel/core/db/reset.py
|
from django.db.models.base import ModelBase
from quiz.models import Quiz, ShortAnswer, Choice, QuizAttempt
from codequiz.models import CodeQuiz, CodeQuizAttempt
from concept.models import Concept, ConceptSection
from course.models import Course, CourseSlug, CourseConcept, ConceptDependency
def reset():
for key, item in globals().items():
if type(item) == ModelBase and item != ModelBase:
Model = item
Model.objects.all().delete()
if __name__ == '__main__':
reset()
|
Add script to delete tables.
|
Data: Add script to delete tables.
|
Python
|
mit
|
pixyj/feel,pixyj/feel,pixyj/feel,pixyj/feel,pixyj/feel
|
Data: Add script to delete tables.
|
from django.db.models.base import ModelBase
from quiz.models import Quiz, ShortAnswer, Choice, QuizAttempt
from codequiz.models import CodeQuiz, CodeQuizAttempt
from concept.models import Concept, ConceptSection
from course.models import Course, CourseSlug, CourseConcept, ConceptDependency
def reset():
for key, item in globals().items():
if type(item) == ModelBase and item != ModelBase:
Model = item
Model.objects.all().delete()
if __name__ == '__main__':
reset()
|
<commit_before><commit_msg>Data: Add script to delete tables.<commit_after>
|
from django.db.models.base import ModelBase
from quiz.models import Quiz, ShortAnswer, Choice, QuizAttempt
from codequiz.models import CodeQuiz, CodeQuizAttempt
from concept.models import Concept, ConceptSection
from course.models import Course, CourseSlug, CourseConcept, ConceptDependency
def reset():
for key, item in globals().items():
if type(item) == ModelBase and item != ModelBase:
Model = item
Model.objects.all().delete()
if __name__ == '__main__':
reset()
|
Data: Add script to delete tables.from django.db.models.base import ModelBase
from quiz.models import Quiz, ShortAnswer, Choice, QuizAttempt
from codequiz.models import CodeQuiz, CodeQuizAttempt
from concept.models import Concept, ConceptSection
from course.models import Course, CourseSlug, CourseConcept, ConceptDependency
def reset():
for key, item in globals().items():
if type(item) == ModelBase and item != ModelBase:
Model = item
Model.objects.all().delete()
if __name__ == '__main__':
reset()
|
<commit_before><commit_msg>Data: Add script to delete tables.<commit_after>from django.db.models.base import ModelBase
from quiz.models import Quiz, ShortAnswer, Choice, QuizAttempt
from codequiz.models import CodeQuiz, CodeQuizAttempt
from concept.models import Concept, ConceptSection
from course.models import Course, CourseSlug, CourseConcept, ConceptDependency
def reset():
for key, item in globals().items():
if type(item) == ModelBase and item != ModelBase:
Model = item
Model.objects.all().delete()
if __name__ == '__main__':
reset()
|
|
9386236d41298ed8888a6774f40a15d44b7e53fe
|
data_log/management/commands/generate_report_fixture.py
|
data_log/management/commands/generate_report_fixture.py
|
from django.core.management.base import BaseCommand
from django.core import serializers
from data_log import models
import json
class Command(BaseCommand):
help = 'Create Data Log Report fixtures'
def handle(self, *args, **kwargs):
self.stdout.write('Creating fixtures for Data Log Reports...')
JSONSerializer = serializers.get_serializer("json")
j = JSONSerializer()
data = []
models_to_serialize = [
models.LevelReport,
models.SummonReport,
models.MagicShopRefreshReport,
models.MagicBoxCraftingReport,
models.WishReport,
models.RuneCraftingReport
]
for model in models_to_serialize:
data += json.loads(j.serialize(model.objects.order_by('-generated_on')[:100]))
data += json.loads(j.serialize(models.Report.objects.order_by('-generated_on')[:1000]))
with open("fixture_reports.json", "w+") as f:
json.dump(data, f)
self.stdout.write(self.style.SUCCESS('Done!'))
|
Create command for Data Log Report fixtures
|
Create command for Data Log Report fixtures
|
Python
|
apache-2.0
|
porksmash/swarfarm,porksmash/swarfarm,porksmash/swarfarm,porksmash/swarfarm
|
Create command for Data Log Report fixtures
|
from django.core.management.base import BaseCommand
from django.core import serializers
from data_log import models
import json
class Command(BaseCommand):
help = 'Create Data Log Report fixtures'
def handle(self, *args, **kwargs):
self.stdout.write('Creating fixtures for Data Log Reports...')
JSONSerializer = serializers.get_serializer("json")
j = JSONSerializer()
data = []
models_to_serialize = [
models.LevelReport,
models.SummonReport,
models.MagicShopRefreshReport,
models.MagicBoxCraftingReport,
models.WishReport,
models.RuneCraftingReport
]
for model in models_to_serialize:
data += json.loads(j.serialize(model.objects.order_by('-generated_on')[:100]))
data += json.loads(j.serialize(models.Report.objects.order_by('-generated_on')[:1000]))
with open("fixture_reports.json", "w+") as f:
json.dump(data, f)
self.stdout.write(self.style.SUCCESS('Done!'))
|
<commit_before><commit_msg>Create command for Data Log Report fixtures<commit_after>
|
from django.core.management.base import BaseCommand
from django.core import serializers
from data_log import models
import json
class Command(BaseCommand):
help = 'Create Data Log Report fixtures'
def handle(self, *args, **kwargs):
self.stdout.write('Creating fixtures for Data Log Reports...')
JSONSerializer = serializers.get_serializer("json")
j = JSONSerializer()
data = []
models_to_serialize = [
models.LevelReport,
models.SummonReport,
models.MagicShopRefreshReport,
models.MagicBoxCraftingReport,
models.WishReport,
models.RuneCraftingReport
]
for model in models_to_serialize:
data += json.loads(j.serialize(model.objects.order_by('-generated_on')[:100]))
data += json.loads(j.serialize(models.Report.objects.order_by('-generated_on')[:1000]))
with open("fixture_reports.json", "w+") as f:
json.dump(data, f)
self.stdout.write(self.style.SUCCESS('Done!'))
|
Create command for Data Log Report fixturesfrom django.core.management.base import BaseCommand
from django.core import serializers
from data_log import models
import json
class Command(BaseCommand):
help = 'Create Data Log Report fixtures'
def handle(self, *args, **kwargs):
self.stdout.write('Creating fixtures for Data Log Reports...')
JSONSerializer = serializers.get_serializer("json")
j = JSONSerializer()
data = []
models_to_serialize = [
models.LevelReport,
models.SummonReport,
models.MagicShopRefreshReport,
models.MagicBoxCraftingReport,
models.WishReport,
models.RuneCraftingReport
]
for model in models_to_serialize:
data += json.loads(j.serialize(model.objects.order_by('-generated_on')[:100]))
data += json.loads(j.serialize(models.Report.objects.order_by('-generated_on')[:1000]))
with open("fixture_reports.json", "w+") as f:
json.dump(data, f)
self.stdout.write(self.style.SUCCESS('Done!'))
|
<commit_before><commit_msg>Create command for Data Log Report fixtures<commit_after>from django.core.management.base import BaseCommand
from django.core import serializers
from data_log import models
import json
class Command(BaseCommand):
help = 'Create Data Log Report fixtures'
def handle(self, *args, **kwargs):
self.stdout.write('Creating fixtures for Data Log Reports...')
JSONSerializer = serializers.get_serializer("json")
j = JSONSerializer()
data = []
models_to_serialize = [
models.LevelReport,
models.SummonReport,
models.MagicShopRefreshReport,
models.MagicBoxCraftingReport,
models.WishReport,
models.RuneCraftingReport
]
for model in models_to_serialize:
data += json.loads(j.serialize(model.objects.order_by('-generated_on')[:100]))
data += json.loads(j.serialize(models.Report.objects.order_by('-generated_on')[:1000]))
with open("fixture_reports.json", "w+") as f:
json.dump(data, f)
self.stdout.write(self.style.SUCCESS('Done!'))
|
|
496754c54005cf7e1b49ada8e612207f5e2846ff
|
python/example_code/sqs/dead_letter_queue.py
|
python/example_code/sqs/dead_letter_queue.py
|
# Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import boto3
# Create SQS client
sqs = boto3.client('sqs')
queue_url = 'SOURCE_QUEUE_URL'
dead_letter_queue_arn = 'DEAD_LETTER_QUEUE_ARN'
redrive_policy = {
'deadLetterTargetArn': dead_letter_queue_arn,
'maxReceiveCount': '10'
}
# Configure queue to send messages to dead letter queue
sqs.set_queue_attributes(
QueueUrl=queue_url,
Attributes={
'RedrivePolicy': json.dumps(redrive_policy)
}
)
|
Add dead letter SQS queue example
|
Add dead letter SQS queue example
|
Python
|
apache-2.0
|
awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,imshashank/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,imshashank/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,imshashank/aws-doc-sdk-examples,imshashank/aws-doc-sdk-examples,imshashank/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,imshashank/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,imshashank/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,imshashank/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,imshashank/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples
|
Add dead letter SQS queue example
|
# Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import boto3
# Create SQS client
sqs = boto3.client('sqs')
queue_url = 'SOURCE_QUEUE_URL'
dead_letter_queue_arn = 'DEAD_LETTER_QUEUE_ARN'
redrive_policy = {
'deadLetterTargetArn': dead_letter_queue_arn,
'maxReceiveCount': '10'
}
# Configure queue to send messages to dead letter queue
sqs.set_queue_attributes(
QueueUrl=queue_url,
Attributes={
'RedrivePolicy': json.dumps(redrive_policy)
}
)
|
<commit_before><commit_msg>Add dead letter SQS queue example<commit_after>
|
# Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import boto3
# Create SQS client
sqs = boto3.client('sqs')
queue_url = 'SOURCE_QUEUE_URL'
dead_letter_queue_arn = 'DEAD_LETTER_QUEUE_ARN'
redrive_policy = {
'deadLetterTargetArn': dead_letter_queue_arn,
'maxReceiveCount': '10'
}
# Configure queue to send messages to dead letter queue
sqs.set_queue_attributes(
QueueUrl=queue_url,
Attributes={
'RedrivePolicy': json.dumps(redrive_policy)
}
)
|
Add dead letter SQS queue example# Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import boto3
# Create SQS client
sqs = boto3.client('sqs')
queue_url = 'SOURCE_QUEUE_URL'
dead_letter_queue_arn = 'DEAD_LETTER_QUEUE_ARN'
redrive_policy = {
'deadLetterTargetArn': dead_letter_queue_arn,
'maxReceiveCount': '10'
}
# Configure queue to send messages to dead letter queue
sqs.set_queue_attributes(
QueueUrl=queue_url,
Attributes={
'RedrivePolicy': json.dumps(redrive_policy)
}
)
|
<commit_before><commit_msg>Add dead letter SQS queue example<commit_after># Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import boto3
# Create SQS client
sqs = boto3.client('sqs')
queue_url = 'SOURCE_QUEUE_URL'
dead_letter_queue_arn = 'DEAD_LETTER_QUEUE_ARN'
redrive_policy = {
'deadLetterTargetArn': dead_letter_queue_arn,
'maxReceiveCount': '10'
}
# Configure queue to send messages to dead letter queue
sqs.set_queue_attributes(
QueueUrl=queue_url,
Attributes={
'RedrivePolicy': json.dumps(redrive_policy)
}
)
|
|
1718926c99692fefb90627c55589990cd0e0225b
|
wagtail/project_template/home/migrations/0002_create_homepage.py
|
wagtail/project_template/home/migrations/0002_create_homepage.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
Page.objects.get(id=2).delete()
# Create content type for homepage model
homepage_content_type, created = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# If migration is run multiple times, it may have already been deleted
Page.objects.filter(id=2).delete()
# Create content type for homepage model
homepage_content_type, __ = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
def remove_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# Page and Site objects CASCADE
HomePage.objects.filter(slug='home', depth=2).delete()
# Delete content type for homepage model
ContentType.objects.filter(model='homepage', app_label='home').delete()
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage, remove_homepage),
]
|
Make migrations in project_template home app reversible
|
Make migrations in project_template home app reversible
|
Python
|
bsd-3-clause
|
rsalmaso/wagtail,FlipperPA/wagtail,chrxr/wagtail,nutztherookie/wagtail,mikedingjan/wagtail,nimasmi/wagtail,rsalmaso/wagtail,Toshakins/wagtail,mikedingjan/wagtail,thenewguy/wagtail,mikedingjan/wagtail,iansprice/wagtail,nilnvoid/wagtail,nimasmi/wagtail,mikedingjan/wagtail,nutztherookie/wagtail,zerolab/wagtail,FlipperPA/wagtail,mixxorz/wagtail,nimasmi/wagtail,torchbox/wagtail,torchbox/wagtail,torchbox/wagtail,gasman/wagtail,jnns/wagtail,gasman/wagtail,Toshakins/wagtail,jnns/wagtail,jnns/wagtail,timorieber/wagtail,nealtodd/wagtail,FlipperPA/wagtail,wagtail/wagtail,gasman/wagtail,Toshakins/wagtail,nilnvoid/wagtail,takeflight/wagtail,nilnvoid/wagtail,kaedroho/wagtail,nutztherookie/wagtail,zerolab/wagtail,thenewguy/wagtail,nealtodd/wagtail,gasman/wagtail,kaedroho/wagtail,mixxorz/wagtail,kaedroho/wagtail,rsalmaso/wagtail,iansprice/wagtail,rsalmaso/wagtail,nutztherookie/wagtail,takeflight/wagtail,thenewguy/wagtail,wagtail/wagtail,zerolab/wagtail,kaedroho/wagtail,zerolab/wagtail,chrxr/wagtail,takeflight/wagtail,takeflight/wagtail,mixxorz/wagtail,torchbox/wagtail,timorieber/wagtail,Toshakins/wagtail,mixxorz/wagtail,nilnvoid/wagtail,thenewguy/wagtail,zerolab/wagtail,nimasmi/wagtail,FlipperPA/wagtail,nealtodd/wagtail,gasman/wagtail,wagtail/wagtail,iansprice/wagtail,timorieber/wagtail,chrxr/wagtail,nealtodd/wagtail,mixxorz/wagtail,kaedroho/wagtail,wagtail/wagtail,thenewguy/wagtail,rsalmaso/wagtail,timorieber/wagtail,chrxr/wagtail,wagtail/wagtail,jnns/wagtail,iansprice/wagtail
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
Page.objects.get(id=2).delete()
# Create content type for homepage model
homepage_content_type, created = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage),
]
Make migrations in project_template home app reversible
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# If migration is run multiple times, it may have already been deleted
Page.objects.filter(id=2).delete()
# Create content type for homepage model
homepage_content_type, __ = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
def remove_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# Page and Site objects CASCADE
HomePage.objects.filter(slug='home', depth=2).delete()
# Delete content type for homepage model
ContentType.objects.filter(model='homepage', app_label='home').delete()
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage, remove_homepage),
]
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
Page.objects.get(id=2).delete()
# Create content type for homepage model
homepage_content_type, created = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage),
]
<commit_msg>Make migrations in project_template home app reversible<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# If migration is run multiple times, it may have already been deleted
Page.objects.filter(id=2).delete()
# Create content type for homepage model
homepage_content_type, __ = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
def remove_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# Page and Site objects CASCADE
HomePage.objects.filter(slug='home', depth=2).delete()
# Delete content type for homepage model
ContentType.objects.filter(model='homepage', app_label='home').delete()
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage, remove_homepage),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
Page.objects.get(id=2).delete()
# Create content type for homepage model
homepage_content_type, created = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage),
]
Make migrations in project_template home app reversible# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# If migration is run multiple times, it may have already been deleted
Page.objects.filter(id=2).delete()
# Create content type for homepage model
homepage_content_type, __ = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
def remove_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# Page and Site objects CASCADE
HomePage.objects.filter(slug='home', depth=2).delete()
# Delete content type for homepage model
ContentType.objects.filter(model='homepage', app_label='home').delete()
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage, remove_homepage),
]
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
Page.objects.get(id=2).delete()
# Create content type for homepage model
homepage_content_type, created = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage),
]
<commit_msg>Make migrations in project_template home app reversible<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# If migration is run multiple times, it may have already been deleted
Page.objects.filter(id=2).delete()
# Create content type for homepage model
homepage_content_type, __ = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
def remove_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# Page and Site objects CASCADE
HomePage.objects.filter(slug='home', depth=2).delete()
# Delete content type for homepage model
ContentType.objects.filter(model='homepage', app_label='home').delete()
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage, remove_homepage),
]
|
631faacaf077c2b4d0d446e42076fd4e4f27ed37
|
djlotrek/tests/test_templatetags.py
|
djlotrek/tests/test_templatetags.py
|
import os
import mock
from django.test import TestCase
from djlotrek.templatetags.djlotrek_tags import absolute_url
from django.test import RequestFactory
class TemplateTagsTestCase(TestCase):
def setUp(self):
pass
def test_absolute_url(self):
"""Our beloved get_host_url utility"""
request_factory = RequestFactory()
request = request_factory.get('/path')
request.META['HTTP_HOST'] = 'localhost'
context = {
'request' : request
}
abs_url = absolute_url(context, '/ciao/')
self.assertEqual(abs_url, 'http://localhost/ciao/')
abs_url = absolute_url(context, 'ciao/')
self.assertEqual(abs_url, 'http://localhost/ciao/')
abs_url = absolute_url(context, 'ciao')
self.assertEqual(abs_url, 'http://localhost/ciao')
abs_url = absolute_url(context, 'ciao/a/tutti')
self.assertEqual(abs_url, 'http://localhost/ciao/a/tutti')
abs_url = absolute_url(context, 'ciao/a/tutti?language=it')
self.assertEqual(abs_url, 'http://localhost/ciao/a/tutti?language=it')
def test_absolute_url_without_request(self):
"""Our beloved get_host_url utility"""
context = {}
abs_url = absolute_url(context, '/ciao/')
self.assertEqual(abs_url, '/ciao/')
|
Add tests for template tags
|
Add tests for template tags
|
Python
|
mit
|
lotrekagency/djlotrek,lotrekagency/djlotrek
|
Add tests for template tags
|
import os
import mock
from django.test import TestCase
from djlotrek.templatetags.djlotrek_tags import absolute_url
from django.test import RequestFactory
class TemplateTagsTestCase(TestCase):
def setUp(self):
pass
def test_absolute_url(self):
"""Our beloved get_host_url utility"""
request_factory = RequestFactory()
request = request_factory.get('/path')
request.META['HTTP_HOST'] = 'localhost'
context = {
'request' : request
}
abs_url = absolute_url(context, '/ciao/')
self.assertEqual(abs_url, 'http://localhost/ciao/')
abs_url = absolute_url(context, 'ciao/')
self.assertEqual(abs_url, 'http://localhost/ciao/')
abs_url = absolute_url(context, 'ciao')
self.assertEqual(abs_url, 'http://localhost/ciao')
abs_url = absolute_url(context, 'ciao/a/tutti')
self.assertEqual(abs_url, 'http://localhost/ciao/a/tutti')
abs_url = absolute_url(context, 'ciao/a/tutti?language=it')
self.assertEqual(abs_url, 'http://localhost/ciao/a/tutti?language=it')
def test_absolute_url_without_request(self):
"""Our beloved get_host_url utility"""
context = {}
abs_url = absolute_url(context, '/ciao/')
self.assertEqual(abs_url, '/ciao/')
|
<commit_before><commit_msg>Add tests for template tags<commit_after>
|
import os
import mock
from django.test import TestCase
from djlotrek.templatetags.djlotrek_tags import absolute_url
from django.test import RequestFactory
class TemplateTagsTestCase(TestCase):
def setUp(self):
pass
def test_absolute_url(self):
"""Our beloved get_host_url utility"""
request_factory = RequestFactory()
request = request_factory.get('/path')
request.META['HTTP_HOST'] = 'localhost'
context = {
'request' : request
}
abs_url = absolute_url(context, '/ciao/')
self.assertEqual(abs_url, 'http://localhost/ciao/')
abs_url = absolute_url(context, 'ciao/')
self.assertEqual(abs_url, 'http://localhost/ciao/')
abs_url = absolute_url(context, 'ciao')
self.assertEqual(abs_url, 'http://localhost/ciao')
abs_url = absolute_url(context, 'ciao/a/tutti')
self.assertEqual(abs_url, 'http://localhost/ciao/a/tutti')
abs_url = absolute_url(context, 'ciao/a/tutti?language=it')
self.assertEqual(abs_url, 'http://localhost/ciao/a/tutti?language=it')
def test_absolute_url_without_request(self):
"""Our beloved get_host_url utility"""
context = {}
abs_url = absolute_url(context, '/ciao/')
self.assertEqual(abs_url, '/ciao/')
|
Add tests for template tagsimport os
import mock
from django.test import TestCase
from djlotrek.templatetags.djlotrek_tags import absolute_url
from django.test import RequestFactory
class TemplateTagsTestCase(TestCase):
def setUp(self):
pass
def test_absolute_url(self):
"""Our beloved get_host_url utility"""
request_factory = RequestFactory()
request = request_factory.get('/path')
request.META['HTTP_HOST'] = 'localhost'
context = {
'request' : request
}
abs_url = absolute_url(context, '/ciao/')
self.assertEqual(abs_url, 'http://localhost/ciao/')
abs_url = absolute_url(context, 'ciao/')
self.assertEqual(abs_url, 'http://localhost/ciao/')
abs_url = absolute_url(context, 'ciao')
self.assertEqual(abs_url, 'http://localhost/ciao')
abs_url = absolute_url(context, 'ciao/a/tutti')
self.assertEqual(abs_url, 'http://localhost/ciao/a/tutti')
abs_url = absolute_url(context, 'ciao/a/tutti?language=it')
self.assertEqual(abs_url, 'http://localhost/ciao/a/tutti?language=it')
def test_absolute_url_without_request(self):
"""Our beloved get_host_url utility"""
context = {}
abs_url = absolute_url(context, '/ciao/')
self.assertEqual(abs_url, '/ciao/')
|
<commit_before><commit_msg>Add tests for template tags<commit_after>import os
import mock
from django.test import TestCase
from djlotrek.templatetags.djlotrek_tags import absolute_url
from django.test import RequestFactory
class TemplateTagsTestCase(TestCase):
def setUp(self):
pass
def test_absolute_url(self):
"""Our beloved get_host_url utility"""
request_factory = RequestFactory()
request = request_factory.get('/path')
request.META['HTTP_HOST'] = 'localhost'
context = {
'request' : request
}
abs_url = absolute_url(context, '/ciao/')
self.assertEqual(abs_url, 'http://localhost/ciao/')
abs_url = absolute_url(context, 'ciao/')
self.assertEqual(abs_url, 'http://localhost/ciao/')
abs_url = absolute_url(context, 'ciao')
self.assertEqual(abs_url, 'http://localhost/ciao')
abs_url = absolute_url(context, 'ciao/a/tutti')
self.assertEqual(abs_url, 'http://localhost/ciao/a/tutti')
abs_url = absolute_url(context, 'ciao/a/tutti?language=it')
self.assertEqual(abs_url, 'http://localhost/ciao/a/tutti?language=it')
def test_absolute_url_without_request(self):
"""Our beloved get_host_url utility"""
context = {}
abs_url = absolute_url(context, '/ciao/')
self.assertEqual(abs_url, '/ciao/')
|
|
f2028ab194fe7c1c1497ee9320ddddbbece6406a
|
nova/common/eventlet_backdoor.py
|
nova/common/eventlet_backdoor.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Openstack, LLC.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gc
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from nova import flags
from nova.openstack.common import cfg
eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port',
default=None,
help='port for eventlet backdoor to listen')
]
FLAGS = flags.FLAGS
FLAGS.register_opts(eventlet_backdoor_opts)
def dont_use_this():
print "Don't use this, just disconnect instead"
def find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects())
def print_greenthreads():
for i, gt in enumerate(find_objects(greenlet.greenlet)):
print i, gt
traceback.print_stack(gt.gr_frame)
print
backdoor_locals = {
'_': None, # So it doesn't interfere with the global
'exit': dont_use_this, # So we don't exit the entire process
'quit': dont_use_this, # So we don't exit the entire process
'fo': find_objects,
'pgt': print_greenthreads,
}
def initialize_if_enabled():
if FLAGS.backdoor_port is None:
return
eventlet.spawn(eventlet.backdoor.backdoor_server,
eventlet.listen(('localhost', FLAGS.backdoor_port)),
locals=backdoor_locals)
|
Add eventlet backdoor to facilitate troubleshooting.
|
Add eventlet backdoor to facilitate troubleshooting.
This provides a FLAG to turn on Eventlet's builtin backdoor server which
allows you to connect over telnet and receive a Python prompt (which is
immensely helpful for debugging running systems).
Fixes bug 1000366
Change-Id: I779247a0796d34ba2a5478436d85b30ba76c4a01
|
Python
|
apache-2.0
|
n0ano/ganttclient
|
Add eventlet backdoor to facilitate troubleshooting.
This provides a FLAG to turn on Eventlet's builtin backdoor server which
allows you to connect over telnet and receive a Python prompt (which is
immensely helpful for debugging running systems).
Fixes bug 1000366
Change-Id: I779247a0796d34ba2a5478436d85b30ba76c4a01
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Openstack, LLC.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gc
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from nova import flags
from nova.openstack.common import cfg
eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port',
default=None,
help='port for eventlet backdoor to listen')
]
FLAGS = flags.FLAGS
FLAGS.register_opts(eventlet_backdoor_opts)
def dont_use_this():
print "Don't use this, just disconnect instead"
def find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects())
def print_greenthreads():
for i, gt in enumerate(find_objects(greenlet.greenlet)):
print i, gt
traceback.print_stack(gt.gr_frame)
print
backdoor_locals = {
'_': None, # So it doesn't interfere with the global
'exit': dont_use_this, # So we don't exit the entire process
'quit': dont_use_this, # So we don't exit the entire process
'fo': find_objects,
'pgt': print_greenthreads,
}
def initialize_if_enabled():
if FLAGS.backdoor_port is None:
return
eventlet.spawn(eventlet.backdoor.backdoor_server,
eventlet.listen(('localhost', FLAGS.backdoor_port)),
locals=backdoor_locals)
|
<commit_before><commit_msg>Add eventlet backdoor to facilitate troubleshooting.
This provides a FLAG to turn on Eventlet's builtin backdoor server which
allows you to connect over telnet and receive a Python prompt (which is
immensely helpful for debugging running systems).
Fixes bug 1000366
Change-Id: I779247a0796d34ba2a5478436d85b30ba76c4a01<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Openstack, LLC.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gc
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from nova import flags
from nova.openstack.common import cfg
eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port',
default=None,
help='port for eventlet backdoor to listen')
]
FLAGS = flags.FLAGS
FLAGS.register_opts(eventlet_backdoor_opts)
def dont_use_this():
print "Don't use this, just disconnect instead"
def find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects())
def print_greenthreads():
for i, gt in enumerate(find_objects(greenlet.greenlet)):
print i, gt
traceback.print_stack(gt.gr_frame)
print
backdoor_locals = {
'_': None, # So it doesn't interfere with the global
'exit': dont_use_this, # So we don't exit the entire process
'quit': dont_use_this, # So we don't exit the entire process
'fo': find_objects,
'pgt': print_greenthreads,
}
def initialize_if_enabled():
if FLAGS.backdoor_port is None:
return
eventlet.spawn(eventlet.backdoor.backdoor_server,
eventlet.listen(('localhost', FLAGS.backdoor_port)),
locals=backdoor_locals)
|
Add eventlet backdoor to facilitate troubleshooting.
This provides a FLAG to turn on Eventlet's builtin backdoor server which
allows you to connect over telnet and receive a Python prompt (which is
immensely helpful for debugging running systems).
Fixes bug 1000366
Change-Id: I779247a0796d34ba2a5478436d85b30ba76c4a01# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Openstack, LLC.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gc
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from nova import flags
from nova.openstack.common import cfg
eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port',
default=None,
help='port for eventlet backdoor to listen')
]
FLAGS = flags.FLAGS
FLAGS.register_opts(eventlet_backdoor_opts)
def dont_use_this():
print "Don't use this, just disconnect instead"
def find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects())
def print_greenthreads():
for i, gt in enumerate(find_objects(greenlet.greenlet)):
print i, gt
traceback.print_stack(gt.gr_frame)
print
backdoor_locals = {
'_': None, # So it doesn't interfere with the global
'exit': dont_use_this, # So we don't exit the entire process
'quit': dont_use_this, # So we don't exit the entire process
'fo': find_objects,
'pgt': print_greenthreads,
}
def initialize_if_enabled():
if FLAGS.backdoor_port is None:
return
eventlet.spawn(eventlet.backdoor.backdoor_server,
eventlet.listen(('localhost', FLAGS.backdoor_port)),
locals=backdoor_locals)
|
<commit_before><commit_msg>Add eventlet backdoor to facilitate troubleshooting.
This provides a FLAG to turn on Eventlet's builtin backdoor server which
allows you to connect over telnet and receive a Python prompt (which is
immensely helpful for debugging running systems).
Fixes bug 1000366
Change-Id: I779247a0796d34ba2a5478436d85b30ba76c4a01<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Openstack, LLC.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gc
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from nova import flags
from nova.openstack.common import cfg
eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port',
default=None,
help='port for eventlet backdoor to listen')
]
FLAGS = flags.FLAGS
FLAGS.register_opts(eventlet_backdoor_opts)
def dont_use_this():
print "Don't use this, just disconnect instead"
def find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects())
def print_greenthreads():
for i, gt in enumerate(find_objects(greenlet.greenlet)):
print i, gt
traceback.print_stack(gt.gr_frame)
print
backdoor_locals = {
'_': None, # So it doesn't interfere with the global
'exit': dont_use_this, # So we don't exit the entire process
'quit': dont_use_this, # So we don't exit the entire process
'fo': find_objects,
'pgt': print_greenthreads,
}
def initialize_if_enabled():
if FLAGS.backdoor_port is None:
return
eventlet.spawn(eventlet.backdoor.backdoor_server,
eventlet.listen(('localhost', FLAGS.backdoor_port)),
locals=backdoor_locals)
|
|
d81ba7d656f11e817eb610b1c65a4880fddc9004
|
saylua/modules/arcade/api.py
|
saylua/modules/arcade/api.py
|
from saylua.wrappers import api_login_required
from flask import g, request
from models.db import Game, GameLog
from saylua.utils import int_or_none
import json
# Send a score to the API.
@api_login_required()
def api_send_score(game_id):
try:
gameName = Game(game_id)
except IndexError:
return json.dumps(dict(error='Invalid game!')), 400
finally:
if gameName == "blocks":
# TODO sanity check the game log and other variables sent to catch
# low hanging fruit attempts at cheating.
data = request.get_json()
score = int_or_none(data.get('score')) or 0
GameLog.record_score(g.user.id, game_id, score)
g.user.cloud_coins += score
return json.dumps(dict(cloud_coins=g.user.cloud_coins, star_shards=g.user.star_shards))
return json.dumps(dict(error='Bad request.')), 400
|
from saylua import db
from saylua.wrappers import api_login_required
from flask import g, request
from models.db import Game, GameLog
from saylua.utils import int_or_none
import json
# Send a score to the API.
@api_login_required()
def api_send_score(game_id):
try:
gameName = Game(game_id)
except IndexError:
return json.dumps(dict(error='Invalid game!')), 400
finally:
if gameName == "blocks":
# TODO sanity check the game log and other variables sent to catch
# low hanging fruit attempts at cheating.
data = request.get_json()
score = int_or_none(data.get('score')) or 0
GameLog.record_score(g.user.id, game_id, score)
g.user.cloud_coins += score
db.session.commit()
return json.dumps(dict(cloud_coins=g.user.cloud_coins, star_shards=g.user.star_shards))
return json.dumps(dict(error='Bad request.')), 400
|
Fix getting money from arcade games.
|
Fix getting money from arcade games.
|
Python
|
agpl-3.0
|
saylua/SayluaV2,saylua/SayluaV2,LikeMyBread/Saylua,saylua/SayluaV2,LikeMyBread/Saylua,LikeMyBread/Saylua,LikeMyBread/Saylua
|
from saylua.wrappers import api_login_required
from flask import g, request
from models.db import Game, GameLog
from saylua.utils import int_or_none
import json
# Send a score to the API.
@api_login_required()
def api_send_score(game_id):
try:
gameName = Game(game_id)
except IndexError:
return json.dumps(dict(error='Invalid game!')), 400
finally:
if gameName == "blocks":
# TODO sanity check the game log and other variables sent to catch
# low hanging fruit attempts at cheating.
data = request.get_json()
score = int_or_none(data.get('score')) or 0
GameLog.record_score(g.user.id, game_id, score)
g.user.cloud_coins += score
return json.dumps(dict(cloud_coins=g.user.cloud_coins, star_shards=g.user.star_shards))
return json.dumps(dict(error='Bad request.')), 400
Fix getting money from arcade games.
|
from saylua import db
from saylua.wrappers import api_login_required
from flask import g, request
from models.db import Game, GameLog
from saylua.utils import int_or_none
import json
# Send a score to the API.
@api_login_required()
def api_send_score(game_id):
try:
gameName = Game(game_id)
except IndexError:
return json.dumps(dict(error='Invalid game!')), 400
finally:
if gameName == "blocks":
# TODO sanity check the game log and other variables sent to catch
# low hanging fruit attempts at cheating.
data = request.get_json()
score = int_or_none(data.get('score')) or 0
GameLog.record_score(g.user.id, game_id, score)
g.user.cloud_coins += score
db.session.commit()
return json.dumps(dict(cloud_coins=g.user.cloud_coins, star_shards=g.user.star_shards))
return json.dumps(dict(error='Bad request.')), 400
|
<commit_before>from saylua.wrappers import api_login_required
from flask import g, request
from models.db import Game, GameLog
from saylua.utils import int_or_none
import json
# Send a score to the API.
@api_login_required()
def api_send_score(game_id):
try:
gameName = Game(game_id)
except IndexError:
return json.dumps(dict(error='Invalid game!')), 400
finally:
if gameName == "blocks":
# TODO sanity check the game log and other variables sent to catch
# low hanging fruit attempts at cheating.
data = request.get_json()
score = int_or_none(data.get('score')) or 0
GameLog.record_score(g.user.id, game_id, score)
g.user.cloud_coins += score
return json.dumps(dict(cloud_coins=g.user.cloud_coins, star_shards=g.user.star_shards))
return json.dumps(dict(error='Bad request.')), 400
<commit_msg>Fix getting money from arcade games.<commit_after>
|
from saylua import db
from saylua.wrappers import api_login_required
from flask import g, request
from models.db import Game, GameLog
from saylua.utils import int_or_none
import json
# Send a score to the API.
@api_login_required()
def api_send_score(game_id):
try:
gameName = Game(game_id)
except IndexError:
return json.dumps(dict(error='Invalid game!')), 400
finally:
if gameName == "blocks":
# TODO sanity check the game log and other variables sent to catch
# low hanging fruit attempts at cheating.
data = request.get_json()
score = int_or_none(data.get('score')) or 0
GameLog.record_score(g.user.id, game_id, score)
g.user.cloud_coins += score
db.session.commit()
return json.dumps(dict(cloud_coins=g.user.cloud_coins, star_shards=g.user.star_shards))
return json.dumps(dict(error='Bad request.')), 400
|
from saylua.wrappers import api_login_required
from flask import g, request
from models.db import Game, GameLog
from saylua.utils import int_or_none
import json
# Send a score to the API.
@api_login_required()
def api_send_score(game_id):
try:
gameName = Game(game_id)
except IndexError:
return json.dumps(dict(error='Invalid game!')), 400
finally:
if gameName == "blocks":
# TODO sanity check the game log and other variables sent to catch
# low hanging fruit attempts at cheating.
data = request.get_json()
score = int_or_none(data.get('score')) or 0
GameLog.record_score(g.user.id, game_id, score)
g.user.cloud_coins += score
return json.dumps(dict(cloud_coins=g.user.cloud_coins, star_shards=g.user.star_shards))
return json.dumps(dict(error='Bad request.')), 400
Fix getting money from arcade games.from saylua import db
from saylua.wrappers import api_login_required
from flask import g, request
from models.db import Game, GameLog
from saylua.utils import int_or_none
import json
# Send a score to the API.
@api_login_required()
def api_send_score(game_id):
try:
gameName = Game(game_id)
except IndexError:
return json.dumps(dict(error='Invalid game!')), 400
finally:
if gameName == "blocks":
# TODO sanity check the game log and other variables sent to catch
# low hanging fruit attempts at cheating.
data = request.get_json()
score = int_or_none(data.get('score')) or 0
GameLog.record_score(g.user.id, game_id, score)
g.user.cloud_coins += score
db.session.commit()
return json.dumps(dict(cloud_coins=g.user.cloud_coins, star_shards=g.user.star_shards))
return json.dumps(dict(error='Bad request.')), 400
|
<commit_before>from saylua.wrappers import api_login_required
from flask import g, request
from models.db import Game, GameLog
from saylua.utils import int_or_none
import json
# Send a score to the API.
@api_login_required()
def api_send_score(game_id):
try:
gameName = Game(game_id)
except IndexError:
return json.dumps(dict(error='Invalid game!')), 400
finally:
if gameName == "blocks":
# TODO sanity check the game log and other variables sent to catch
# low hanging fruit attempts at cheating.
data = request.get_json()
score = int_or_none(data.get('score')) or 0
GameLog.record_score(g.user.id, game_id, score)
g.user.cloud_coins += score
return json.dumps(dict(cloud_coins=g.user.cloud_coins, star_shards=g.user.star_shards))
return json.dumps(dict(error='Bad request.')), 400
<commit_msg>Fix getting money from arcade games.<commit_after>from saylua import db
from saylua.wrappers import api_login_required
from flask import g, request
from models.db import Game, GameLog
from saylua.utils import int_or_none
import json
# Send a score to the API.
@api_login_required()
def api_send_score(game_id):
try:
gameName = Game(game_id)
except IndexError:
return json.dumps(dict(error='Invalid game!')), 400
finally:
if gameName == "blocks":
# TODO sanity check the game log and other variables sent to catch
# low hanging fruit attempts at cheating.
data = request.get_json()
score = int_or_none(data.get('score')) or 0
GameLog.record_score(g.user.id, game_id, score)
g.user.cloud_coins += score
db.session.commit()
return json.dumps(dict(cloud_coins=g.user.cloud_coins, star_shards=g.user.star_shards))
return json.dumps(dict(error='Bad request.')), 400
|
61e56ad3feecef6fe422db8fb5d7b9b26dc03d6a
|
day3-2.py
|
day3-2.py
|
"""This module checks how many valid triangles are in the input data."""
def main():
"""Run main function."""
with open('data/day3data.txt', 'r') as f:
input = f.readlines()
dataList = [map(int, i.strip('\n').split()) for i in input]
# Transpose the data.
dataList = [list(i) for i in zip(*dataList)]
# Flatten the list.
triList = [item for sublist in dataList for item in sublist]
triangles = 0
for i in range(0, len(triList), 3):
print([triList[i], triList[i + 1], triList[i + 2]])
if isTriangle([triList[i], triList[i + 1], triList[i + 2]]):
triangles += 1
print('There are {} valid triagles.').format(triangles)
def isTriangle(input):
"""Check if list of three sides is a triangle."""
if 2 * max(input) < sum(input):
return True
return False
if __name__ == '__main__':
main()
|
Add day 3 part 2.
|
Add day 3 part 2.
|
Python
|
mit
|
SayWhat1/adventofcode2016
|
Add day 3 part 2.
|
"""This module checks how many valid triangles are in the input data."""
def main():
"""Run main function."""
with open('data/day3data.txt', 'r') as f:
input = f.readlines()
dataList = [map(int, i.strip('\n').split()) for i in input]
# Transpose the data.
dataList = [list(i) for i in zip(*dataList)]
# Flatten the list.
triList = [item for sublist in dataList for item in sublist]
triangles = 0
for i in range(0, len(triList), 3):
print([triList[i], triList[i + 1], triList[i + 2]])
if isTriangle([triList[i], triList[i + 1], triList[i + 2]]):
triangles += 1
print('There are {} valid triagles.').format(triangles)
def isTriangle(input):
"""Check if list of three sides is a triangle."""
if 2 * max(input) < sum(input):
return True
return False
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add day 3 part 2.<commit_after>
|
"""This module checks how many valid triangles are in the input data."""
def main():
"""Run main function."""
with open('data/day3data.txt', 'r') as f:
input = f.readlines()
dataList = [map(int, i.strip('\n').split()) for i in input]
# Transpose the data.
dataList = [list(i) for i in zip(*dataList)]
# Flatten the list.
triList = [item for sublist in dataList for item in sublist]
triangles = 0
for i in range(0, len(triList), 3):
print([triList[i], triList[i + 1], triList[i + 2]])
if isTriangle([triList[i], triList[i + 1], triList[i + 2]]):
triangles += 1
print('There are {} valid triagles.').format(triangles)
def isTriangle(input):
"""Check if list of three sides is a triangle."""
if 2 * max(input) < sum(input):
return True
return False
if __name__ == '__main__':
main()
|
Add day 3 part 2."""This module checks how many valid triangles are in the input data."""
def main():
"""Run main function."""
with open('data/day3data.txt', 'r') as f:
input = f.readlines()
dataList = [map(int, i.strip('\n').split()) for i in input]
# Transpose the data.
dataList = [list(i) for i in zip(*dataList)]
# Flatten the list.
triList = [item for sublist in dataList for item in sublist]
triangles = 0
for i in range(0, len(triList), 3):
print([triList[i], triList[i + 1], triList[i + 2]])
if isTriangle([triList[i], triList[i + 1], triList[i + 2]]):
triangles += 1
print('There are {} valid triagles.').format(triangles)
def isTriangle(input):
"""Check if list of three sides is a triangle."""
if 2 * max(input) < sum(input):
return True
return False
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add day 3 part 2.<commit_after>"""This module checks how many valid triangles are in the input data."""
def main():
"""Run main function."""
with open('data/day3data.txt', 'r') as f:
input = f.readlines()
dataList = [map(int, i.strip('\n').split()) for i in input]
# Transpose the data.
dataList = [list(i) for i in zip(*dataList)]
# Flatten the list.
triList = [item for sublist in dataList for item in sublist]
triangles = 0
for i in range(0, len(triList), 3):
print([triList[i], triList[i + 1], triList[i + 2]])
if isTriangle([triList[i], triList[i + 1], triList[i + 2]]):
triangles += 1
print('There are {} valid triagles.').format(triangles)
def isTriangle(input):
"""Check if list of three sides is a triangle."""
if 2 * max(input) < sum(input):
return True
return False
if __name__ == '__main__':
main()
|
|
a07e4d08b475e0d921265f9da104f109943901bc
|
simlammps/tests/cuds_test.py
|
simlammps/tests/cuds_test.py
|
"""Tests for running lammps using CUDS and Simulation classes."""
import unittest
from simphony.core.cuba import CUBA
from simphony import CUDS, Simulation
from simphony.engine import EngineInterface
from simphony.testing.utils import create_particles_with_id
from simphony.cuds.particles import Particle, Particles
class LAMMPSCUDSTestCase(unittest.TestCase):
def setUp(self):
self.cuds = self.generate_cuds()
def generate_cuds(self):
pset1 = create_particles_with_id(restrict=[CUBA.VELOCITY])
pset2 = create_particles_with_id(restrict=[CUBA.VELOCITY])
ps1 = Particles('ps1')
ps2 = Particles('ps2')
ps1.add_particles(pset1)
ps2.add_particles(pset2)
c = CUDS()
c.add(ps1)
c.add(ps2)
return c
def test_create_lammps_internal_simulation(self):
self.assertRaisesRegexp(RuntimeError,
'CUBAExtension.BOX_VECTORS',
Simulation,
self.cuds,
'LAMMPS',
EngineInterface.Internal)
def test_create_lammps_fileio_simulation(self):
sim = Simulation(self.cuds, 'LAMMPS', EngineInterface.FileIO)
def test_create_liggghts_internal_simulation(self):
self.assertRaisesRegexp(RuntimeError,
'DEM using the INTERNAL interface is not yet supported',
Simulation,
self.cuds,
'LIGGGHTS',
EngineInterface.Internal)
def test_create_liggghts_fileio_simulation(self):
sim = Simulation(self.cuds, 'LIGGGHTS', EngineInterface.FileIO)
|
Add lammps wrapper tests with cuds
|
Add lammps wrapper tests with cuds
|
Python
|
bsd-2-clause
|
simphony/simphony-lammps-md,simphony/simphony-lammps-md
|
Add lammps wrapper tests with cuds
|
"""Tests for running lammps using CUDS and Simulation classes."""
import unittest
from simphony.core.cuba import CUBA
from simphony import CUDS, Simulation
from simphony.engine import EngineInterface
from simphony.testing.utils import create_particles_with_id
from simphony.cuds.particles import Particle, Particles
class LAMMPSCUDSTestCase(unittest.TestCase):
def setUp(self):
self.cuds = self.generate_cuds()
def generate_cuds(self):
pset1 = create_particles_with_id(restrict=[CUBA.VELOCITY])
pset2 = create_particles_with_id(restrict=[CUBA.VELOCITY])
ps1 = Particles('ps1')
ps2 = Particles('ps2')
ps1.add_particles(pset1)
ps2.add_particles(pset2)
c = CUDS()
c.add(ps1)
c.add(ps2)
return c
def test_create_lammps_internal_simulation(self):
self.assertRaisesRegexp(RuntimeError,
'CUBAExtension.BOX_VECTORS',
Simulation,
self.cuds,
'LAMMPS',
EngineInterface.Internal)
def test_create_lammps_fileio_simulation(self):
sim = Simulation(self.cuds, 'LAMMPS', EngineInterface.FileIO)
def test_create_liggghts_internal_simulation(self):
self.assertRaisesRegexp(RuntimeError,
'DEM using the INTERNAL interface is not yet supported',
Simulation,
self.cuds,
'LIGGGHTS',
EngineInterface.Internal)
def test_create_liggghts_fileio_simulation(self):
sim = Simulation(self.cuds, 'LIGGGHTS', EngineInterface.FileIO)
|
<commit_before><commit_msg>Add lammps wrapper tests with cuds<commit_after>
|
"""Tests for running lammps using CUDS and Simulation classes."""
import unittest
from simphony.core.cuba import CUBA
from simphony import CUDS, Simulation
from simphony.engine import EngineInterface
from simphony.testing.utils import create_particles_with_id
from simphony.cuds.particles import Particle, Particles
class LAMMPSCUDSTestCase(unittest.TestCase):
def setUp(self):
self.cuds = self.generate_cuds()
def generate_cuds(self):
pset1 = create_particles_with_id(restrict=[CUBA.VELOCITY])
pset2 = create_particles_with_id(restrict=[CUBA.VELOCITY])
ps1 = Particles('ps1')
ps2 = Particles('ps2')
ps1.add_particles(pset1)
ps2.add_particles(pset2)
c = CUDS()
c.add(ps1)
c.add(ps2)
return c
def test_create_lammps_internal_simulation(self):
self.assertRaisesRegexp(RuntimeError,
'CUBAExtension.BOX_VECTORS',
Simulation,
self.cuds,
'LAMMPS',
EngineInterface.Internal)
def test_create_lammps_fileio_simulation(self):
sim = Simulation(self.cuds, 'LAMMPS', EngineInterface.FileIO)
def test_create_liggghts_internal_simulation(self):
self.assertRaisesRegexp(RuntimeError,
'DEM using the INTERNAL interface is not yet supported',
Simulation,
self.cuds,
'LIGGGHTS',
EngineInterface.Internal)
def test_create_liggghts_fileio_simulation(self):
sim = Simulation(self.cuds, 'LIGGGHTS', EngineInterface.FileIO)
|
Add lammps wrapper tests with cuds"""Tests for running lammps using CUDS and Simulation classes."""
import unittest
from simphony.core.cuba import CUBA
from simphony import CUDS, Simulation
from simphony.engine import EngineInterface
from simphony.testing.utils import create_particles_with_id
from simphony.cuds.particles import Particle, Particles
class LAMMPSCUDSTestCase(unittest.TestCase):
def setUp(self):
self.cuds = self.generate_cuds()
def generate_cuds(self):
pset1 = create_particles_with_id(restrict=[CUBA.VELOCITY])
pset2 = create_particles_with_id(restrict=[CUBA.VELOCITY])
ps1 = Particles('ps1')
ps2 = Particles('ps2')
ps1.add_particles(pset1)
ps2.add_particles(pset2)
c = CUDS()
c.add(ps1)
c.add(ps2)
return c
def test_create_lammps_internal_simulation(self):
self.assertRaisesRegexp(RuntimeError,
'CUBAExtension.BOX_VECTORS',
Simulation,
self.cuds,
'LAMMPS',
EngineInterface.Internal)
def test_create_lammps_fileio_simulation(self):
sim = Simulation(self.cuds, 'LAMMPS', EngineInterface.FileIO)
def test_create_liggghts_internal_simulation(self):
self.assertRaisesRegexp(RuntimeError,
'DEM using the INTERNAL interface is not yet supported',
Simulation,
self.cuds,
'LIGGGHTS',
EngineInterface.Internal)
def test_create_liggghts_fileio_simulation(self):
sim = Simulation(self.cuds, 'LIGGGHTS', EngineInterface.FileIO)
|
<commit_before><commit_msg>Add lammps wrapper tests with cuds<commit_after>"""Tests for running lammps using CUDS and Simulation classes."""
import unittest
from simphony.core.cuba import CUBA
from simphony import CUDS, Simulation
from simphony.engine import EngineInterface
from simphony.testing.utils import create_particles_with_id
from simphony.cuds.particles import Particle, Particles
class LAMMPSCUDSTestCase(unittest.TestCase):
def setUp(self):
self.cuds = self.generate_cuds()
def generate_cuds(self):
pset1 = create_particles_with_id(restrict=[CUBA.VELOCITY])
pset2 = create_particles_with_id(restrict=[CUBA.VELOCITY])
ps1 = Particles('ps1')
ps2 = Particles('ps2')
ps1.add_particles(pset1)
ps2.add_particles(pset2)
c = CUDS()
c.add(ps1)
c.add(ps2)
return c
def test_create_lammps_internal_simulation(self):
self.assertRaisesRegexp(RuntimeError,
'CUBAExtension.BOX_VECTORS',
Simulation,
self.cuds,
'LAMMPS',
EngineInterface.Internal)
def test_create_lammps_fileio_simulation(self):
sim = Simulation(self.cuds, 'LAMMPS', EngineInterface.FileIO)
def test_create_liggghts_internal_simulation(self):
self.assertRaisesRegexp(RuntimeError,
'DEM using the INTERNAL interface is not yet supported',
Simulation,
self.cuds,
'LIGGGHTS',
EngineInterface.Internal)
def test_create_liggghts_fileio_simulation(self):
sim = Simulation(self.cuds, 'LIGGGHTS', EngineInterface.FileIO)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.