commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4587ca62906c668d17356b12c96c89a302f6df4f
|
examples/text_diff_reporter_example.py
|
examples/text_diff_reporter_example.py
|
from approvaltests import Approvals
from approvaltests.TextDiffReporter import TextDiffReporter
import os
import unittest
class Test(unittest.TestCase):
def test(self):
# This environment variable should be set somewhere outside of the test
# but is here to make the example clearer.
os.environ["APPROVALS_TEXT_DIFF_TOOL"] = "meld"
reporter = TextDiffReporter()
Approvals.verify("x", reporter)
if __name__ == "__main__":
unittest.main()
|
Add an example of how to use the TextDiffReporter.
|
Add an example of how to use the TextDiffReporter.
|
Python
|
apache-2.0
|
approvals/ApprovalTests.Python,approvals/ApprovalTests.Python,approvals/ApprovalTests.Python,tdpreece/ApprovalTests.Python
|
Add an example of how to use the TextDiffReporter.
|
from approvaltests import Approvals
from approvaltests.TextDiffReporter import TextDiffReporter
import os
import unittest
class Test(unittest.TestCase):
def test(self):
# This environment variable should be set somewhere outside of the test
# but is here to make the example clearer.
os.environ["APPROVALS_TEXT_DIFF_TOOL"] = "meld"
reporter = TextDiffReporter()
Approvals.verify("x", reporter)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add an example of how to use the TextDiffReporter.<commit_after>
|
from approvaltests import Approvals
from approvaltests.TextDiffReporter import TextDiffReporter
import os
import unittest
class Test(unittest.TestCase):
def test(self):
# This environment variable should be set somewhere outside of the test
# but is here to make the example clearer.
os.environ["APPROVALS_TEXT_DIFF_TOOL"] = "meld"
reporter = TextDiffReporter()
Approvals.verify("x", reporter)
if __name__ == "__main__":
unittest.main()
|
Add an example of how to use the TextDiffReporter.from approvaltests import Approvals
from approvaltests.TextDiffReporter import TextDiffReporter
import os
import unittest
class Test(unittest.TestCase):
def test(self):
# This environment variable should be set somewhere outside of the test
# but is here to make the example clearer.
os.environ["APPROVALS_TEXT_DIFF_TOOL"] = "meld"
reporter = TextDiffReporter()
Approvals.verify("x", reporter)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add an example of how to use the TextDiffReporter.<commit_after>from approvaltests import Approvals
from approvaltests.TextDiffReporter import TextDiffReporter
import os
import unittest
class Test(unittest.TestCase):
def test(self):
# This environment variable should be set somewhere outside of the test
# but is here to make the example clearer.
os.environ["APPROVALS_TEXT_DIFF_TOOL"] = "meld"
reporter = TextDiffReporter()
Approvals.verify("x", reporter)
if __name__ == "__main__":
unittest.main()
|
|
6844f4e3c526ad10df7777c6cef79c32a78b7f23
|
datapipe/denoising/cdf/__init__.py
|
datapipe/denoising/cdf/__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
PACKAGE_PATH = os.path.split(__file__)[0]
ASTRI_CDF_FILE = os.path.join(PACKAGE_PATH, 'astri_inaf_cdf.json')
FLASHCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'flashcam_grid_prod3b_north_cdf.json')
LSTCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'lstcam_grid_prod3b_north_cdf.json')
NECTARCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'nectarcam_grid_prod3b_north_cdf.json')
__all__ = ['ASTRI_CDF_FILE',
'FLASHCAM_CDF_FILE',
'LSTCAM_CDF_FILE',
'NECTARCAM_CDF_FILE']
|
Simplify the usage of CDF files.
|
Simplify the usage of CDF files.
|
Python
|
mit
|
jdhp-sap/sap-cta-data-pipeline,jdhp-sap/sap-cta-data-pipeline,jdhp-sap/data-pipeline-standalone-scripts,jdhp-sap/data-pipeline-standalone-scripts
|
Simplify the usage of CDF files.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
PACKAGE_PATH = os.path.split(__file__)[0]
ASTRI_CDF_FILE = os.path.join(PACKAGE_PATH, 'astri_inaf_cdf.json')
FLASHCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'flashcam_grid_prod3b_north_cdf.json')
LSTCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'lstcam_grid_prod3b_north_cdf.json')
NECTARCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'nectarcam_grid_prod3b_north_cdf.json')
__all__ = ['ASTRI_CDF_FILE',
'FLASHCAM_CDF_FILE',
'LSTCAM_CDF_FILE',
'NECTARCAM_CDF_FILE']
|
<commit_before><commit_msg>Simplify the usage of CDF files.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
PACKAGE_PATH = os.path.split(__file__)[0]
ASTRI_CDF_FILE = os.path.join(PACKAGE_PATH, 'astri_inaf_cdf.json')
FLASHCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'flashcam_grid_prod3b_north_cdf.json')
LSTCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'lstcam_grid_prod3b_north_cdf.json')
NECTARCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'nectarcam_grid_prod3b_north_cdf.json')
__all__ = ['ASTRI_CDF_FILE',
'FLASHCAM_CDF_FILE',
'LSTCAM_CDF_FILE',
'NECTARCAM_CDF_FILE']
|
Simplify the usage of CDF files.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
PACKAGE_PATH = os.path.split(__file__)[0]
ASTRI_CDF_FILE = os.path.join(PACKAGE_PATH, 'astri_inaf_cdf.json')
FLASHCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'flashcam_grid_prod3b_north_cdf.json')
LSTCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'lstcam_grid_prod3b_north_cdf.json')
NECTARCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'nectarcam_grid_prod3b_north_cdf.json')
__all__ = ['ASTRI_CDF_FILE',
'FLASHCAM_CDF_FILE',
'LSTCAM_CDF_FILE',
'NECTARCAM_CDF_FILE']
|
<commit_before><commit_msg>Simplify the usage of CDF files.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
PACKAGE_PATH = os.path.split(__file__)[0]
ASTRI_CDF_FILE = os.path.join(PACKAGE_PATH, 'astri_inaf_cdf.json')
FLASHCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'flashcam_grid_prod3b_north_cdf.json')
LSTCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'lstcam_grid_prod3b_north_cdf.json')
NECTARCAM_CDF_FILE = os.path.join(PACKAGE_PATH, 'nectarcam_grid_prod3b_north_cdf.json')
__all__ = ['ASTRI_CDF_FILE',
'FLASHCAM_CDF_FILE',
'LSTCAM_CDF_FILE',
'NECTARCAM_CDF_FILE']
|
|
eac58dcfede3b17163ca625813f55dbaf1d72d38
|
tests/test-icon-layout.py
|
tests/test-icon-layout.py
|
#!/usr/bin/python
import pygtk
pygtk.require('2.0')
from sugar.session.UITestSession import UITestSession
session = UITestSession()
session.start()
import sys
import random
import gtk
import goocanvas
from view.home.IconLayout import IconLayout
from sugar.canvas import IconColor
from sugar.canvas.IconItem import IconItem
from sugar.canvas.CanvasView import CanvasView
window = gtk.Window()
window.connect("destroy", lambda w: gtk.main_quit())
window.show()
canvas = CanvasView()
canvas.show()
window.add(canvas)
canvas_model = goocanvas.CanvasModelSimple()
root = canvas_model.get_root_item()
item = goocanvas.Rect(x=0, y=0, width=1200, height=900,
line_width=0.0, fill_color='#e2e2e2')
root.add_child(item)
icon_layout = IconLayout(1200, 900)
for i in range(0, 20):
color = IconColor.IconColor()
icon = IconItem(size=75, color=color,
icon_name='stock-buddy')
root.add_child(icon)
icon_layout.add_icon(icon)
canvas.set_model(canvas_model)
gtk.main()
|
Add test for icon layout
|
Add test for icon layout
|
Python
|
lgpl-2.1
|
puneetgkaur/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,tchx84/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,gusDuarte/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,ceibal-tatu/sugar-toolkit,puneetgkaur/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,tchx84/debian-pkg-sugar-toolkit,samdroid-apps/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,godiard/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,gusDuarte/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,tchx84/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,samdroid-apps/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,tchx84/debian-pkg-sugar-toolkit,quozl/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,samdroid-apps/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3
|
Add test for icon layout
|
#!/usr/bin/python
import pygtk
pygtk.require('2.0')
from sugar.session.UITestSession import UITestSession
session = UITestSession()
session.start()
import sys
import random
import gtk
import goocanvas
from view.home.IconLayout import IconLayout
from sugar.canvas import IconColor
from sugar.canvas.IconItem import IconItem
from sugar.canvas.CanvasView import CanvasView
window = gtk.Window()
window.connect("destroy", lambda w: gtk.main_quit())
window.show()
canvas = CanvasView()
canvas.show()
window.add(canvas)
canvas_model = goocanvas.CanvasModelSimple()
root = canvas_model.get_root_item()
item = goocanvas.Rect(x=0, y=0, width=1200, height=900,
line_width=0.0, fill_color='#e2e2e2')
root.add_child(item)
icon_layout = IconLayout(1200, 900)
for i in range(0, 20):
color = IconColor.IconColor()
icon = IconItem(size=75, color=color,
icon_name='stock-buddy')
root.add_child(icon)
icon_layout.add_icon(icon)
canvas.set_model(canvas_model)
gtk.main()
|
<commit_before><commit_msg>Add test for icon layout<commit_after>
|
#!/usr/bin/python
import pygtk
pygtk.require('2.0')
from sugar.session.UITestSession import UITestSession
session = UITestSession()
session.start()
import sys
import random
import gtk
import goocanvas
from view.home.IconLayout import IconLayout
from sugar.canvas import IconColor
from sugar.canvas.IconItem import IconItem
from sugar.canvas.CanvasView import CanvasView
window = gtk.Window()
window.connect("destroy", lambda w: gtk.main_quit())
window.show()
canvas = CanvasView()
canvas.show()
window.add(canvas)
canvas_model = goocanvas.CanvasModelSimple()
root = canvas_model.get_root_item()
item = goocanvas.Rect(x=0, y=0, width=1200, height=900,
line_width=0.0, fill_color='#e2e2e2')
root.add_child(item)
icon_layout = IconLayout(1200, 900)
for i in range(0, 20):
color = IconColor.IconColor()
icon = IconItem(size=75, color=color,
icon_name='stock-buddy')
root.add_child(icon)
icon_layout.add_icon(icon)
canvas.set_model(canvas_model)
gtk.main()
|
Add test for icon layout#!/usr/bin/python
import pygtk
pygtk.require('2.0')
from sugar.session.UITestSession import UITestSession
session = UITestSession()
session.start()
import sys
import random
import gtk
import goocanvas
from view.home.IconLayout import IconLayout
from sugar.canvas import IconColor
from sugar.canvas.IconItem import IconItem
from sugar.canvas.CanvasView import CanvasView
window = gtk.Window()
window.connect("destroy", lambda w: gtk.main_quit())
window.show()
canvas = CanvasView()
canvas.show()
window.add(canvas)
canvas_model = goocanvas.CanvasModelSimple()
root = canvas_model.get_root_item()
item = goocanvas.Rect(x=0, y=0, width=1200, height=900,
line_width=0.0, fill_color='#e2e2e2')
root.add_child(item)
icon_layout = IconLayout(1200, 900)
for i in range(0, 20):
color = IconColor.IconColor()
icon = IconItem(size=75, color=color,
icon_name='stock-buddy')
root.add_child(icon)
icon_layout.add_icon(icon)
canvas.set_model(canvas_model)
gtk.main()
|
<commit_before><commit_msg>Add test for icon layout<commit_after>#!/usr/bin/python
import pygtk
pygtk.require('2.0')
from sugar.session.UITestSession import UITestSession
session = UITestSession()
session.start()
import sys
import random
import gtk
import goocanvas
from view.home.IconLayout import IconLayout
from sugar.canvas import IconColor
from sugar.canvas.IconItem import IconItem
from sugar.canvas.CanvasView import CanvasView
window = gtk.Window()
window.connect("destroy", lambda w: gtk.main_quit())
window.show()
canvas = CanvasView()
canvas.show()
window.add(canvas)
canvas_model = goocanvas.CanvasModelSimple()
root = canvas_model.get_root_item()
item = goocanvas.Rect(x=0, y=0, width=1200, height=900,
line_width=0.0, fill_color='#e2e2e2')
root.add_child(item)
icon_layout = IconLayout(1200, 900)
for i in range(0, 20):
color = IconColor.IconColor()
icon = IconItem(size=75, color=color,
icon_name='stock-buddy')
root.add_child(icon)
icon_layout.add_icon(icon)
canvas.set_model(canvas_model)
gtk.main()
|
|
3bfc26205ea6ebc36a54991472e8c495430af0d3
|
test/test_git_utilities.py
|
test/test_git_utilities.py
|
"""Test group class"""
import unittest
from clowder.utility.git_utilities import (
git_ref_type,
git_truncate_ref
)
class GroupTest(unittest.TestCase):
"""group test subclass"""
def setUp(self):
self.branch_ref = 'refs/heads/master'
self.tag_ref = 'refs/tags/v1.0'
self.sha_ref = '7083e8840e1bb972b7664cfa20bbd7a25f004018'
self.unknown_ref = 'unknown'
def test_git_ref_type_branch(self):
"""Test git_ref_type() function for branch ref"""
self.assertEqual(git_ref_type(self.branch_ref), 'branch')
def test_git_ref_type_sha(self):
"""Test git_ref_type() function for sha ref"""
self.assertEqual(git_ref_type(self.sha_ref), 'sha')
def test_git_ref_type_tag(self):
"""Test git_ref_type() function for tag ref"""
self.assertEqual(git_ref_type(self.tag_ref), 'tag')
def test_git_ref_type_unknown(self):
"""Test git_ref_type() function for unknown ref type"""
self.assertEqual(git_ref_type(self.unknown_ref), 'unknown')
def test_git_truncate_ref_branch(self):
"""Test git_truncate_ref() function for branch ref"""
self.assertEqual(git_truncate_ref(self.branch_ref), 'master')
def test_git_truncate_ref_sha(self):
"""Test git_truncate_ref() function for sha ref"""
self.assertEqual(git_truncate_ref(self.sha_ref), self.sha_ref)
def test_git_truncate_ref_tag(self):
"""Test git_truncate_ref() function for tag ref"""
self.assertEqual(git_truncate_ref(self.tag_ref), 'v1.0')
if __name__ == '__main__':
unittest.main()
|
Add unit tests for git_utilities
|
Add unit tests for git_utilities
|
Python
|
mit
|
JrGoodle/clowder,JrGoodle/clowder,JrGoodle/clowder
|
Add unit tests for git_utilities
|
"""Test group class"""
import unittest
from clowder.utility.git_utilities import (
git_ref_type,
git_truncate_ref
)
class GroupTest(unittest.TestCase):
"""group test subclass"""
def setUp(self):
self.branch_ref = 'refs/heads/master'
self.tag_ref = 'refs/tags/v1.0'
self.sha_ref = '7083e8840e1bb972b7664cfa20bbd7a25f004018'
self.unknown_ref = 'unknown'
def test_git_ref_type_branch(self):
"""Test git_ref_type() function for branch ref"""
self.assertEqual(git_ref_type(self.branch_ref), 'branch')
def test_git_ref_type_sha(self):
"""Test git_ref_type() function for sha ref"""
self.assertEqual(git_ref_type(self.sha_ref), 'sha')
def test_git_ref_type_tag(self):
"""Test git_ref_type() function for tag ref"""
self.assertEqual(git_ref_type(self.tag_ref), 'tag')
def test_git_ref_type_unknown(self):
"""Test git_ref_type() function for unknown ref type"""
self.assertEqual(git_ref_type(self.unknown_ref), 'unknown')
def test_git_truncate_ref_branch(self):
"""Test git_truncate_ref() function for branch ref"""
self.assertEqual(git_truncate_ref(self.branch_ref), 'master')
def test_git_truncate_ref_sha(self):
"""Test git_truncate_ref() function for sha ref"""
self.assertEqual(git_truncate_ref(self.sha_ref), self.sha_ref)
def test_git_truncate_ref_tag(self):
"""Test git_truncate_ref() function for tag ref"""
self.assertEqual(git_truncate_ref(self.tag_ref), 'v1.0')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for git_utilities<commit_after>
|
"""Test group class"""
import unittest
from clowder.utility.git_utilities import (
git_ref_type,
git_truncate_ref
)
class GroupTest(unittest.TestCase):
"""group test subclass"""
def setUp(self):
self.branch_ref = 'refs/heads/master'
self.tag_ref = 'refs/tags/v1.0'
self.sha_ref = '7083e8840e1bb972b7664cfa20bbd7a25f004018'
self.unknown_ref = 'unknown'
def test_git_ref_type_branch(self):
"""Test git_ref_type() function for branch ref"""
self.assertEqual(git_ref_type(self.branch_ref), 'branch')
def test_git_ref_type_sha(self):
"""Test git_ref_type() function for sha ref"""
self.assertEqual(git_ref_type(self.sha_ref), 'sha')
def test_git_ref_type_tag(self):
"""Test git_ref_type() function for tag ref"""
self.assertEqual(git_ref_type(self.tag_ref), 'tag')
def test_git_ref_type_unknown(self):
"""Test git_ref_type() function for unknown ref type"""
self.assertEqual(git_ref_type(self.unknown_ref), 'unknown')
def test_git_truncate_ref_branch(self):
"""Test git_truncate_ref() function for branch ref"""
self.assertEqual(git_truncate_ref(self.branch_ref), 'master')
def test_git_truncate_ref_sha(self):
"""Test git_truncate_ref() function for sha ref"""
self.assertEqual(git_truncate_ref(self.sha_ref), self.sha_ref)
def test_git_truncate_ref_tag(self):
"""Test git_truncate_ref() function for tag ref"""
self.assertEqual(git_truncate_ref(self.tag_ref), 'v1.0')
if __name__ == '__main__':
unittest.main()
|
Add unit tests for git_utilities"""Test group class"""
import unittest
from clowder.utility.git_utilities import (
git_ref_type,
git_truncate_ref
)
class GroupTest(unittest.TestCase):
"""group test subclass"""
def setUp(self):
self.branch_ref = 'refs/heads/master'
self.tag_ref = 'refs/tags/v1.0'
self.sha_ref = '7083e8840e1bb972b7664cfa20bbd7a25f004018'
self.unknown_ref = 'unknown'
def test_git_ref_type_branch(self):
"""Test git_ref_type() function for branch ref"""
self.assertEqual(git_ref_type(self.branch_ref), 'branch')
def test_git_ref_type_sha(self):
"""Test git_ref_type() function for sha ref"""
self.assertEqual(git_ref_type(self.sha_ref), 'sha')
def test_git_ref_type_tag(self):
"""Test git_ref_type() function for tag ref"""
self.assertEqual(git_ref_type(self.tag_ref), 'tag')
def test_git_ref_type_unknown(self):
"""Test git_ref_type() function for unknown ref type"""
self.assertEqual(git_ref_type(self.unknown_ref), 'unknown')
def test_git_truncate_ref_branch(self):
"""Test git_truncate_ref() function for branch ref"""
self.assertEqual(git_truncate_ref(self.branch_ref), 'master')
def test_git_truncate_ref_sha(self):
"""Test git_truncate_ref() function for sha ref"""
self.assertEqual(git_truncate_ref(self.sha_ref), self.sha_ref)
def test_git_truncate_ref_tag(self):
"""Test git_truncate_ref() function for tag ref"""
self.assertEqual(git_truncate_ref(self.tag_ref), 'v1.0')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for git_utilities<commit_after>"""Test group class"""
import unittest
from clowder.utility.git_utilities import (
git_ref_type,
git_truncate_ref
)
class GroupTest(unittest.TestCase):
"""group test subclass"""
def setUp(self):
self.branch_ref = 'refs/heads/master'
self.tag_ref = 'refs/tags/v1.0'
self.sha_ref = '7083e8840e1bb972b7664cfa20bbd7a25f004018'
self.unknown_ref = 'unknown'
def test_git_ref_type_branch(self):
"""Test git_ref_type() function for branch ref"""
self.assertEqual(git_ref_type(self.branch_ref), 'branch')
def test_git_ref_type_sha(self):
"""Test git_ref_type() function for sha ref"""
self.assertEqual(git_ref_type(self.sha_ref), 'sha')
def test_git_ref_type_tag(self):
"""Test git_ref_type() function for tag ref"""
self.assertEqual(git_ref_type(self.tag_ref), 'tag')
def test_git_ref_type_unknown(self):
"""Test git_ref_type() function for unknown ref type"""
self.assertEqual(git_ref_type(self.unknown_ref), 'unknown')
def test_git_truncate_ref_branch(self):
"""Test git_truncate_ref() function for branch ref"""
self.assertEqual(git_truncate_ref(self.branch_ref), 'master')
def test_git_truncate_ref_sha(self):
"""Test git_truncate_ref() function for sha ref"""
self.assertEqual(git_truncate_ref(self.sha_ref), self.sha_ref)
def test_git_truncate_ref_tag(self):
"""Test git_truncate_ref() function for tag ref"""
self.assertEqual(git_truncate_ref(self.tag_ref), 'v1.0')
if __name__ == '__main__':
unittest.main()
|
|
ab158de23187fd0ca838d7f89c2be81cab0cb0a2
|
tools/compare_resample.py
|
tools/compare_resample.py
|
# Script to plot original wav file against wav file resampled at 16 kHz
# Licensed under Apache v2 (see LICENSE)
from __future__ import division
import sys
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import randint
from scipy.signal import resample
# Problems doing this import
# May need to move this file to top level directory
from opensauce.helpers import wavread, round_half_away_from_zero
def main(wav_dir, fs_rs):
"""Compare original data vs resampled data for all wav files in wav_dir,
where resampling frequency is given in Hz by fs_rs
"""
# Find all .wav files in test/data directory
wav_files = glob.glob(os.path.join(wav_dir, '*.wav'))
for wav_file in wav_files:
print('Processing wav file {}'.format(wav_file))
# y is data points, fs is sampling frequency
y, fs = wavread(wav_file)
# ns is number of samples
ns = len(y)
period = 1.0 / fs
# Time points corresponding to samples
t = np.arange(0, ns*period, period)
# Resample to 16 kHz
period_rs = 1.0 / fs_rs
# Number of points in resample
ns_rs = round_half_away_from_zero(ns * fs_rs / fs)
# Do resample
y_rs, t_rs = resample(y, ns_rs, t)
# Number of points to plot
n = 1000
# Start plotting from this data point
s = randint(10000, 35015-n)
plt.figure()
plt.plot(t, y, 'b-', markersize=1)
plt.plot(t_rs, y_rs, 'ro', markersize=3)
plt.xlim(s*period, (s+n)*period)
plt.title(os.path.basename(wav_file))
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.pdf')
if __name__ == '__main__':
main(sys.argv[1], int(sys.argv[2]))
|
Add script to check resampling of wav files
|
Add script to check resampling of wav files
|
Python
|
apache-2.0
|
voicesauce/opensauce-python,voicesauce/opensauce-python,voicesauce/opensauce-python
|
Add script to check resampling of wav files
|
# Script to plot original wav file against wav file resampled at 16 kHz
# Licensed under Apache v2 (see LICENSE)
from __future__ import division
import sys
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import randint
from scipy.signal import resample
# Problems doing this import
# May need to move this file to top level directory
from opensauce.helpers import wavread, round_half_away_from_zero
def main(wav_dir, fs_rs):
"""Compare original data vs resampled data for all wav files in wav_dir,
where resampling frequency is given in Hz by fs_rs
"""
# Find all .wav files in test/data directory
wav_files = glob.glob(os.path.join(wav_dir, '*.wav'))
for wav_file in wav_files:
print('Processing wav file {}'.format(wav_file))
# y is data points, fs is sampling frequency
y, fs = wavread(wav_file)
# ns is number of samples
ns = len(y)
period = 1.0 / fs
# Time points corresponding to samples
t = np.arange(0, ns*period, period)
# Resample to 16 kHz
period_rs = 1.0 / fs_rs
# Number of points in resample
ns_rs = round_half_away_from_zero(ns * fs_rs / fs)
# Do resample
y_rs, t_rs = resample(y, ns_rs, t)
# Number of points to plot
n = 1000
# Start plotting from this data point
s = randint(10000, 35015-n)
plt.figure()
plt.plot(t, y, 'b-', markersize=1)
plt.plot(t_rs, y_rs, 'ro', markersize=3)
plt.xlim(s*period, (s+n)*period)
plt.title(os.path.basename(wav_file))
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.pdf')
if __name__ == '__main__':
main(sys.argv[1], int(sys.argv[2]))
|
<commit_before><commit_msg>Add script to check resampling of wav files<commit_after>
|
# Script to plot original wav file against wav file resampled at 16 kHz
# Licensed under Apache v2 (see LICENSE)
from __future__ import division
import sys
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import randint
from scipy.signal import resample
# Problems doing this import
# May need to move this file to top level directory
from opensauce.helpers import wavread, round_half_away_from_zero
def main(wav_dir, fs_rs):
"""Compare original data vs resampled data for all wav files in wav_dir,
where resampling frequency is given in Hz by fs_rs
"""
# Find all .wav files in test/data directory
wav_files = glob.glob(os.path.join(wav_dir, '*.wav'))
for wav_file in wav_files:
print('Processing wav file {}'.format(wav_file))
# y is data points, fs is sampling frequency
y, fs = wavread(wav_file)
# ns is number of samples
ns = len(y)
period = 1.0 / fs
# Time points corresponding to samples
t = np.arange(0, ns*period, period)
# Resample to 16 kHz
period_rs = 1.0 / fs_rs
# Number of points in resample
ns_rs = round_half_away_from_zero(ns * fs_rs / fs)
# Do resample
y_rs, t_rs = resample(y, ns_rs, t)
# Number of points to plot
n = 1000
# Start plotting from this data point
s = randint(10000, 35015-n)
plt.figure()
plt.plot(t, y, 'b-', markersize=1)
plt.plot(t_rs, y_rs, 'ro', markersize=3)
plt.xlim(s*period, (s+n)*period)
plt.title(os.path.basename(wav_file))
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.pdf')
if __name__ == '__main__':
main(sys.argv[1], int(sys.argv[2]))
|
Add script to check resampling of wav files# Script to plot original wav file against wav file resampled at 16 kHz
# Licensed under Apache v2 (see LICENSE)
from __future__ import division
import sys
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import randint
from scipy.signal import resample
# Problems doing this import
# May need to move this file to top level directory
from opensauce.helpers import wavread, round_half_away_from_zero
def main(wav_dir, fs_rs):
"""Compare original data vs resampled data for all wav files in wav_dir,
where resampling frequency is given in Hz by fs_rs
"""
# Find all .wav files in test/data directory
wav_files = glob.glob(os.path.join(wav_dir, '*.wav'))
for wav_file in wav_files:
print('Processing wav file {}'.format(wav_file))
# y is data points, fs is sampling frequency
y, fs = wavread(wav_file)
# ns is number of samples
ns = len(y)
period = 1.0 / fs
# Time points corresponding to samples
t = np.arange(0, ns*period, period)
# Resample to 16 kHz
period_rs = 1.0 / fs_rs
# Number of points in resample
ns_rs = round_half_away_from_zero(ns * fs_rs / fs)
# Do resample
y_rs, t_rs = resample(y, ns_rs, t)
# Number of points to plot
n = 1000
# Start plotting from this data point
s = randint(10000, 35015-n)
plt.figure()
plt.plot(t, y, 'b-', markersize=1)
plt.plot(t_rs, y_rs, 'ro', markersize=3)
plt.xlim(s*period, (s+n)*period)
plt.title(os.path.basename(wav_file))
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.pdf')
if __name__ == '__main__':
main(sys.argv[1], int(sys.argv[2]))
|
<commit_before><commit_msg>Add script to check resampling of wav files<commit_after># Script to plot original wav file against wav file resampled at 16 kHz
# Licensed under Apache v2 (see LICENSE)
from __future__ import division
import sys
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import randint
from scipy.signal import resample
# Problems doing this import
# May need to move this file to top level directory
from opensauce.helpers import wavread, round_half_away_from_zero
def main(wav_dir, fs_rs):
"""Compare original data vs resampled data for all wav files in wav_dir,
where resampling frequency is given in Hz by fs_rs
"""
# Find all .wav files in test/data directory
wav_files = glob.glob(os.path.join(wav_dir, '*.wav'))
for wav_file in wav_files:
print('Processing wav file {}'.format(wav_file))
# y is data points, fs is sampling frequency
y, fs = wavread(wav_file)
# ns is number of samples
ns = len(y)
period = 1.0 / fs
# Time points corresponding to samples
t = np.arange(0, ns*period, period)
# Resample to 16 kHz
period_rs = 1.0 / fs_rs
# Number of points in resample
ns_rs = round_half_away_from_zero(ns * fs_rs / fs)
# Do resample
y_rs, t_rs = resample(y, ns_rs, t)
# Number of points to plot
n = 1000
# Start plotting from this data point
s = randint(10000, 35015-n)
plt.figure()
plt.plot(t, y, 'b-', markersize=1)
plt.plot(t_rs, y_rs, 'ro', markersize=3)
plt.xlim(s*period, (s+n)*period)
plt.title(os.path.basename(wav_file))
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.savefig(os.path.splitext(os.path.basename(wav_file))[0] + '.pdf')
if __name__ == '__main__':
main(sys.argv[1], int(sys.argv[2]))
|
|
10b163de791495f89367be979a724cce95f4915d
|
logger/pattern.py
|
logger/pattern.py
|
#!/usr/bin/env python3
"""Patterns and parsers for various purposes."""
__all__ = ["FormatSpecifierParser"]
import re
class FormatSpecifierParser:
"""Parser for the format specifiers used in the Interpolater."""
parse_format_spec = re.compile(
r"""
\A
(?:
(?P<fill>.)?
(?P<align>[<>=^])
)?
(?P<sign>[-+ ])?
(?P<alt>\#)?
(?P<zeropad>0)?
(?P<width>(?!0)\d+)?
(?P<sep>,)?
(?:\.(?P<precision>0|(?!0)\d+))?
(?P<type>[eEfFgGn%])?
\Z
""",
re.VERBOSE | re.DOTALL)
def __init__(self, format_spec, *, is_digit):
"""Parse a format specifier and store attributes to self."""
match = self.parse_format_spec.match(format_spec)
if match is None:
raise ValueError("Invalid format specifier: {!r}".format(format_spec))
match_dict = match.groupdict()
self.fill = match_dict["fill"]
self.align = match_dict["align"] or (is_digit and ">" or "<")
self.zeropad = (match_dict["zeropad"] is not None)
if self.zeropad:
if self.fill is not None and self.fill != "0":
raise ValueError("Fill character conflicts with format specifier"
": {!r}".format(format_spec))
if self.align is not None and self.align != ">":
raise ValueError("Alignment conflicts with format specifier"
": {!r}".format(format_spec))
self.fill = "0"
self.align = ">"
self.fill = self.fill or " "
self.width = int(match_dict["width"] or 0)
if self.align == "=" and not is_digit:
raise ValueError("'=' not allowed in string format specifier")
self.sign = match_dict["sign"]
if self.sign is not None and not is_digit:
raise ValueError("Sign not allowed in string format specifier")
self.alt = match_dict["alt"]
if self.alt is not None and not is_digit:
raise ValueError("Alternate form (#) not allowed in string format specifier")
self.width = match_dict["width"]
self.sep = match_dict["sep"]
self.precision = match_dict["precision"]
self.type = match_dict["type"]
|
Add first draft of FormatSpecifierParser
|
Add first draft of FormatSpecifierParser
|
Python
|
bsd-2-clause
|
Vgr255/logging
|
Add first draft of FormatSpecifierParser
|
#!/usr/bin/env python3
"""Patterns and parsers for various purposes."""
__all__ = ["FormatSpecifierParser"]
import re
class FormatSpecifierParser:
"""Parser for the format specifiers used in the Interpolater."""
parse_format_spec = re.compile(
r"""
\A
(?:
(?P<fill>.)?
(?P<align>[<>=^])
)?
(?P<sign>[-+ ])?
(?P<alt>\#)?
(?P<zeropad>0)?
(?P<width>(?!0)\d+)?
(?P<sep>,)?
(?:\.(?P<precision>0|(?!0)\d+))?
(?P<type>[eEfFgGn%])?
\Z
""",
re.VERBOSE | re.DOTALL)
def __init__(self, format_spec, *, is_digit):
"""Parse a format specifier and store attributes to self."""
match = self.parse_format_spec.match(format_spec)
if match is None:
raise ValueError("Invalid format specifier: {!r}".format(format_spec))
match_dict = match.groupdict()
self.fill = match_dict["fill"]
self.align = match_dict["align"] or (is_digit and ">" or "<")
self.zeropad = (match_dict["zeropad"] is not None)
if self.zeropad:
if self.fill is not None and self.fill != "0":
raise ValueError("Fill character conflicts with format specifier"
": {!r}".format(format_spec))
if self.align is not None and self.align != ">":
raise ValueError("Alignment conflicts with format specifier"
": {!r}".format(format_spec))
self.fill = "0"
self.align = ">"
self.fill = self.fill or " "
self.width = int(match_dict["width"] or 0)
if self.align == "=" and not is_digit:
raise ValueError("'=' not allowed in string format specifier")
self.sign = match_dict["sign"]
if self.sign is not None and not is_digit:
raise ValueError("Sign not allowed in string format specifier")
self.alt = match_dict["alt"]
if self.alt is not None and not is_digit:
raise ValueError("Alternate form (#) not allowed in string format specifier")
self.width = match_dict["width"]
self.sep = match_dict["sep"]
self.precision = match_dict["precision"]
self.type = match_dict["type"]
|
<commit_before><commit_msg>Add first draft of FormatSpecifierParser<commit_after>
|
#!/usr/bin/env python3
"""Patterns and parsers for various purposes."""
__all__ = ["FormatSpecifierParser"]
import re
class FormatSpecifierParser:
"""Parser for the format specifiers used in the Interpolater."""
parse_format_spec = re.compile(
r"""
\A
(?:
(?P<fill>.)?
(?P<align>[<>=^])
)?
(?P<sign>[-+ ])?
(?P<alt>\#)?
(?P<zeropad>0)?
(?P<width>(?!0)\d+)?
(?P<sep>,)?
(?:\.(?P<precision>0|(?!0)\d+))?
(?P<type>[eEfFgGn%])?
\Z
""",
re.VERBOSE | re.DOTALL)
def __init__(self, format_spec, *, is_digit):
"""Parse a format specifier and store attributes to self."""
match = self.parse_format_spec.match(format_spec)
if match is None:
raise ValueError("Invalid format specifier: {!r}".format(format_spec))
match_dict = match.groupdict()
self.fill = match_dict["fill"]
self.align = match_dict["align"] or (is_digit and ">" or "<")
self.zeropad = (match_dict["zeropad"] is not None)
if self.zeropad:
if self.fill is not None and self.fill != "0":
raise ValueError("Fill character conflicts with format specifier"
": {!r}".format(format_spec))
if self.align is not None and self.align != ">":
raise ValueError("Alignment conflicts with format specifier"
": {!r}".format(format_spec))
self.fill = "0"
self.align = ">"
self.fill = self.fill or " "
self.width = int(match_dict["width"] or 0)
if self.align == "=" and not is_digit:
raise ValueError("'=' not allowed in string format specifier")
self.sign = match_dict["sign"]
if self.sign is not None and not is_digit:
raise ValueError("Sign not allowed in string format specifier")
self.alt = match_dict["alt"]
if self.alt is not None and not is_digit:
raise ValueError("Alternate form (#) not allowed in string format specifier")
self.width = match_dict["width"]
self.sep = match_dict["sep"]
self.precision = match_dict["precision"]
self.type = match_dict["type"]
|
Add first draft of FormatSpecifierParser#!/usr/bin/env python3
"""Patterns and parsers for various purposes."""
__all__ = ["FormatSpecifierParser"]
import re
class FormatSpecifierParser:
"""Parser for the format specifiers used in the Interpolater."""
parse_format_spec = re.compile(
r"""
\A
(?:
(?P<fill>.)?
(?P<align>[<>=^])
)?
(?P<sign>[-+ ])?
(?P<alt>\#)?
(?P<zeropad>0)?
(?P<width>(?!0)\d+)?
(?P<sep>,)?
(?:\.(?P<precision>0|(?!0)\d+))?
(?P<type>[eEfFgGn%])?
\Z
""",
re.VERBOSE | re.DOTALL)
def __init__(self, format_spec, *, is_digit):
"""Parse a format specifier and store attributes to self."""
match = self.parse_format_spec.match(format_spec)
if match is None:
raise ValueError("Invalid format specifier: {!r}".format(format_spec))
match_dict = match.groupdict()
self.fill = match_dict["fill"]
self.align = match_dict["align"] or (is_digit and ">" or "<")
self.zeropad = (match_dict["zeropad"] is not None)
if self.zeropad:
if self.fill is not None and self.fill != "0":
raise ValueError("Fill character conflicts with format specifier"
": {!r}".format(format_spec))
if self.align is not None and self.align != ">":
raise ValueError("Alignment conflicts with format specifier"
": {!r}".format(format_spec))
self.fill = "0"
self.align = ">"
self.fill = self.fill or " "
self.width = int(match_dict["width"] or 0)
if self.align == "=" and not is_digit:
raise ValueError("'=' not allowed in string format specifier")
self.sign = match_dict["sign"]
if self.sign is not None and not is_digit:
raise ValueError("Sign not allowed in string format specifier")
self.alt = match_dict["alt"]
if self.alt is not None and not is_digit:
raise ValueError("Alternate form (#) not allowed in string format specifier")
self.width = match_dict["width"]
self.sep = match_dict["sep"]
self.precision = match_dict["precision"]
self.type = match_dict["type"]
|
<commit_before><commit_msg>Add first draft of FormatSpecifierParser<commit_after>#!/usr/bin/env python3
"""Patterns and parsers for various purposes."""
__all__ = ["FormatSpecifierParser"]
import re
class FormatSpecifierParser:
"""Parser for the format specifiers used in the Interpolater."""
parse_format_spec = re.compile(
r"""
\A
(?:
(?P<fill>.)?
(?P<align>[<>=^])
)?
(?P<sign>[-+ ])?
(?P<alt>\#)?
(?P<zeropad>0)?
(?P<width>(?!0)\d+)?
(?P<sep>,)?
(?:\.(?P<precision>0|(?!0)\d+))?
(?P<type>[eEfFgGn%])?
\Z
""",
re.VERBOSE | re.DOTALL)
def __init__(self, format_spec, *, is_digit):
"""Parse a format specifier and store attributes to self."""
match = self.parse_format_spec.match(format_spec)
if match is None:
raise ValueError("Invalid format specifier: {!r}".format(format_spec))
match_dict = match.groupdict()
self.fill = match_dict["fill"]
self.align = match_dict["align"] or (is_digit and ">" or "<")
self.zeropad = (match_dict["zeropad"] is not None)
if self.zeropad:
if self.fill is not None and self.fill != "0":
raise ValueError("Fill character conflicts with format specifier"
": {!r}".format(format_spec))
if self.align is not None and self.align != ">":
raise ValueError("Alignment conflicts with format specifier"
": {!r}".format(format_spec))
self.fill = "0"
self.align = ">"
self.fill = self.fill or " "
self.width = int(match_dict["width"] or 0)
if self.align == "=" and not is_digit:
raise ValueError("'=' not allowed in string format specifier")
self.sign = match_dict["sign"]
if self.sign is not None and not is_digit:
raise ValueError("Sign not allowed in string format specifier")
self.alt = match_dict["alt"]
if self.alt is not None and not is_digit:
raise ValueError("Alternate form (#) not allowed in string format specifier")
self.width = match_dict["width"]
self.sep = match_dict["sep"]
self.precision = match_dict["precision"]
self.type = match_dict["type"]
|
|
5ca652d1f04a629c78ee3345c79dbded898a83dd
|
tests/test_cli.py
|
tests/test_cli.py
|
from click.testing import CliRunner
from webstore_deployer.deployer import init
def test_init():
runner = CliRunner()
result = runner.invoke(init, ['testing_client_ID'])
assert result.exit_code == 0
assert result.output.find('testing_client_ID') != -1
assert result.output.find('testing_client_IDnic') == -1
|
Add basic test for init
|
Add basic test for init
|
Python
|
mit
|
melkamar/webstore-manager,melkamar/webstore-manager
|
Add basic test for init
|
from click.testing import CliRunner
from webstore_deployer.deployer import init
def test_init():
runner = CliRunner()
result = runner.invoke(init, ['testing_client_ID'])
assert result.exit_code == 0
assert result.output.find('testing_client_ID') != -1
assert result.output.find('testing_client_IDnic') == -1
|
<commit_before><commit_msg>Add basic test for init<commit_after>
|
from click.testing import CliRunner
from webstore_deployer.deployer import init
def test_init():
runner = CliRunner()
result = runner.invoke(init, ['testing_client_ID'])
assert result.exit_code == 0
assert result.output.find('testing_client_ID') != -1
assert result.output.find('testing_client_IDnic') == -1
|
Add basic test for initfrom click.testing import CliRunner
from webstore_deployer.deployer import init
def test_init():
runner = CliRunner()
result = runner.invoke(init, ['testing_client_ID'])
assert result.exit_code == 0
assert result.output.find('testing_client_ID') != -1
assert result.output.find('testing_client_IDnic') == -1
|
<commit_before><commit_msg>Add basic test for init<commit_after>from click.testing import CliRunner
from webstore_deployer.deployer import init
def test_init():
runner = CliRunner()
result = runner.invoke(init, ['testing_client_ID'])
assert result.exit_code == 0
assert result.output.find('testing_client_ID') != -1
assert result.output.find('testing_client_IDnic') == -1
|
|
e07c2f1c27e3d10e0a660e1434e66d8b26ad0810
|
openedx/core/djangoapps/schedules/migrations/0009_schedule_copy_column_values.py
|
openedx/core/djangoapps/schedules/migrations/0009_schedule_copy_column_values.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2019-11-24 20:52
from __future__ import unicode_literals
from django.db import migrations, transaction
def copy_column_value_forwards(apps, schema_editor):
"""
Copy the start field into start_date field
This table has around 25 million rows, we'll follow non-atomic migrations.
https://docs.djangoproject.com/en/2.2/howto/writing-migrations/#non-atomic-migrations
"""
Schedule = apps.get_model('schedules', 'Schedule')
while Schedule.objects.filter(start_date__isnull=True).exists():
with transaction.atomic():
for row in Schedule.objects.filter(start_date__isnull=True)[:1000]:
row.start_date = row.start
row.save()
class Migration(migrations.Migration):
atomic = False
dependencies = [
('schedules', '0008_add_new_start_date_field'),
]
operations = [
migrations.RunPython(
copy_column_value_forwards,
reverse_code=migrations.RunPython.noop, # Allow reverse migrations, but make it a no-op.
)
]
|
Copy values from old to new field in Schedules Model
|
Copy values from old to new field in Schedules Model
|
Python
|
agpl-3.0
|
appsembler/edx-platform,msegado/edx-platform,EDUlib/edx-platform,arbrandes/edx-platform,eduNEXT/edunext-platform,stvstnfrd/edx-platform,EDUlib/edx-platform,edx-solutions/edx-platform,edx/edx-platform,appsembler/edx-platform,edx-solutions/edx-platform,appsembler/edx-platform,EDUlib/edx-platform,msegado/edx-platform,appsembler/edx-platform,edx/edx-platform,cpennington/edx-platform,stvstnfrd/edx-platform,msegado/edx-platform,eduNEXT/edx-platform,stvstnfrd/edx-platform,mitocw/edx-platform,angelapper/edx-platform,angelapper/edx-platform,eduNEXT/edx-platform,EDUlib/edx-platform,cpennington/edx-platform,arbrandes/edx-platform,arbrandes/edx-platform,edx-solutions/edx-platform,angelapper/edx-platform,cpennington/edx-platform,eduNEXT/edunext-platform,cpennington/edx-platform,eduNEXT/edunext-platform,eduNEXT/edx-platform,angelapper/edx-platform,mitocw/edx-platform,eduNEXT/edx-platform,edx-solutions/edx-platform,mitocw/edx-platform,stvstnfrd/edx-platform,mitocw/edx-platform,arbrandes/edx-platform,eduNEXT/edunext-platform,edx/edx-platform,edx/edx-platform,msegado/edx-platform,msegado/edx-platform
|
Copy values from old to new field in Schedules Model
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2019-11-24 20:52
from __future__ import unicode_literals
from django.db import migrations, transaction
def copy_column_value_forwards(apps, schema_editor):
"""
Copy the start field into start_date field
This table has around 25 million rows, we'll follow non-atomic migrations.
https://docs.djangoproject.com/en/2.2/howto/writing-migrations/#non-atomic-migrations
"""
Schedule = apps.get_model('schedules', 'Schedule')
while Schedule.objects.filter(start_date__isnull=True).exists():
with transaction.atomic():
for row in Schedule.objects.filter(start_date__isnull=True)[:1000]:
row.start_date = row.start
row.save()
class Migration(migrations.Migration):
atomic = False
dependencies = [
('schedules', '0008_add_new_start_date_field'),
]
operations = [
migrations.RunPython(
copy_column_value_forwards,
reverse_code=migrations.RunPython.noop, # Allow reverse migrations, but make it a no-op.
)
]
|
<commit_before><commit_msg>Copy values from old to new field in Schedules Model<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2019-11-24 20:52
from __future__ import unicode_literals
from django.db import migrations, transaction
def copy_column_value_forwards(apps, schema_editor):
"""
Copy the start field into start_date field
This table has around 25 million rows, we'll follow non-atomic migrations.
https://docs.djangoproject.com/en/2.2/howto/writing-migrations/#non-atomic-migrations
"""
Schedule = apps.get_model('schedules', 'Schedule')
while Schedule.objects.filter(start_date__isnull=True).exists():
with transaction.atomic():
for row in Schedule.objects.filter(start_date__isnull=True)[:1000]:
row.start_date = row.start
row.save()
class Migration(migrations.Migration):
atomic = False
dependencies = [
('schedules', '0008_add_new_start_date_field'),
]
operations = [
migrations.RunPython(
copy_column_value_forwards,
reverse_code=migrations.RunPython.noop, # Allow reverse migrations, but make it a no-op.
)
]
|
Copy values from old to new field in Schedules Model# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2019-11-24 20:52
from __future__ import unicode_literals
from django.db import migrations, transaction
def copy_column_value_forwards(apps, schema_editor):
"""
Copy the start field into start_date field
This table has around 25 million rows, we'll follow non-atomic migrations.
https://docs.djangoproject.com/en/2.2/howto/writing-migrations/#non-atomic-migrations
"""
Schedule = apps.get_model('schedules', 'Schedule')
while Schedule.objects.filter(start_date__isnull=True).exists():
with transaction.atomic():
for row in Schedule.objects.filter(start_date__isnull=True)[:1000]:
row.start_date = row.start
row.save()
class Migration(migrations.Migration):
atomic = False
dependencies = [
('schedules', '0008_add_new_start_date_field'),
]
operations = [
migrations.RunPython(
copy_column_value_forwards,
reverse_code=migrations.RunPython.noop, # Allow reverse migrations, but make it a no-op.
)
]
|
<commit_before><commit_msg>Copy values from old to new field in Schedules Model<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2019-11-24 20:52
from __future__ import unicode_literals
from django.db import migrations, transaction
def copy_column_value_forwards(apps, schema_editor):
"""
Copy the start field into start_date field
This table has around 25 million rows, we'll follow non-atomic migrations.
https://docs.djangoproject.com/en/2.2/howto/writing-migrations/#non-atomic-migrations
"""
Schedule = apps.get_model('schedules', 'Schedule')
while Schedule.objects.filter(start_date__isnull=True).exists():
with transaction.atomic():
for row in Schedule.objects.filter(start_date__isnull=True)[:1000]:
row.start_date = row.start
row.save()
class Migration(migrations.Migration):
atomic = False
dependencies = [
('schedules', '0008_add_new_start_date_field'),
]
operations = [
migrations.RunPython(
copy_column_value_forwards,
reverse_code=migrations.RunPython.noop, # Allow reverse migrations, but make it a no-op.
)
]
|
|
0d7e851f4acb609b55c162b772e41293b7f00b17
|
syntacticframes_project/syntacticframes/migrations/0002_auto_20140929_1135.py
|
syntacticframes_project/syntacticframes/migrations/0002_auto_20140929_1135.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('syntacticframes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='levinclass',
options={'ordering': ['number']},
),
]
|
Add migration for Levin class order
|
Add migration for Levin class order
|
Python
|
mit
|
aymara/verbenet-editor,aymara/verbenet-editor,aymara/verbenet-editor
|
Add migration for Levin class order
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('syntacticframes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='levinclass',
options={'ordering': ['number']},
),
]
|
<commit_before><commit_msg>Add migration for Levin class order<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('syntacticframes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='levinclass',
options={'ordering': ['number']},
),
]
|
Add migration for Levin class order# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('syntacticframes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='levinclass',
options={'ordering': ['number']},
),
]
|
<commit_before><commit_msg>Add migration for Levin class order<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('syntacticframes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='levinclass',
options={'ordering': ['number']},
),
]
|
|
9e5ec6fc67c863dea2de26ce742f0940ad43562c
|
Communication/benchPrototypeAPI.py
|
Communication/benchPrototypeAPI.py
|
import sys
sys.path.append('/home/bat/Python-Arduino-Proto-API-v2/arduino')
import time
locations = ['/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM2','/dev/ttyACM3','/dev/ttyACM4','/dev/ttyACM5','/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyUSB3','/dev/ttyS0','/dev/ttyS1','/dev/ttyS2','/dev/ttyS3']
from firmata import Arduino, OUTPUT
for device in locations:
try:
print "Trying...",device
board = Arduino(device)
break
except:
print "Failed to connect on ", device
'''
pin = 0
#declare output pins as a list/tuple
board.output([pin])
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
value = board.analogRead(pin)
n = n+1
print "n: ",n, value
t = time.time()
b.close()
from pyfirmata import util, Arduino
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
it= util.Iterator(board)
it.start()
board.analog[0].enable_reporting()
value = board.analog[0].read()
n = n+1
print "n: ",n, value
#board.pass_time(1)
t = time.time()
board.exit()
'''
board.pin_mode(0, OUTPUT)
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
board.parse()
value = board.analog_read(0) # Reading from analog pin #0
n = n+1
print "n: ",n,
|
Add a script to benchmark python to arduino libraries
|
Add a script to benchmark python to arduino libraries
|
Python
|
mit
|
baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite
|
Add a script to benchmark python to arduino libraries
|
import sys
sys.path.append('/home/bat/Python-Arduino-Proto-API-v2/arduino')
import time
locations = ['/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM2','/dev/ttyACM3','/dev/ttyACM4','/dev/ttyACM5','/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyUSB3','/dev/ttyS0','/dev/ttyS1','/dev/ttyS2','/dev/ttyS3']
from firmata import Arduino, OUTPUT
for device in locations:
try:
print "Trying...",device
board = Arduino(device)
break
except:
print "Failed to connect on ", device
'''
pin = 0
#declare output pins as a list/tuple
board.output([pin])
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
value = board.analogRead(pin)
n = n+1
print "n: ",n, value
t = time.time()
b.close()
from pyfirmata import util, Arduino
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
it= util.Iterator(board)
it.start()
board.analog[0].enable_reporting()
value = board.analog[0].read()
n = n+1
print "n: ",n, value
#board.pass_time(1)
t = time.time()
board.exit()
'''
board.pin_mode(0, OUTPUT)
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
board.parse()
value = board.analog_read(0) # Reading from analog pin #0
n = n+1
print "n: ",n,
|
<commit_before><commit_msg>Add a script to benchmark python to arduino libraries<commit_after>
|
import sys
sys.path.append('/home/bat/Python-Arduino-Proto-API-v2/arduino')
import time
locations = ['/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM2','/dev/ttyACM3','/dev/ttyACM4','/dev/ttyACM5','/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyUSB3','/dev/ttyS0','/dev/ttyS1','/dev/ttyS2','/dev/ttyS3']
from firmata import Arduino, OUTPUT
for device in locations:
try:
print "Trying...",device
board = Arduino(device)
break
except:
print "Failed to connect on ", device
'''
pin = 0
#declare output pins as a list/tuple
board.output([pin])
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
value = board.analogRead(pin)
n = n+1
print "n: ",n, value
t = time.time()
b.close()
from pyfirmata import util, Arduino
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
it= util.Iterator(board)
it.start()
board.analog[0].enable_reporting()
value = board.analog[0].read()
n = n+1
print "n: ",n, value
#board.pass_time(1)
t = time.time()
board.exit()
'''
board.pin_mode(0, OUTPUT)
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
board.parse()
value = board.analog_read(0) # Reading from analog pin #0
n = n+1
print "n: ",n,
|
Add a script to benchmark python to arduino librariesimport sys
sys.path.append('/home/bat/Python-Arduino-Proto-API-v2/arduino')
import time
locations = ['/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM2','/dev/ttyACM3','/dev/ttyACM4','/dev/ttyACM5','/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyUSB3','/dev/ttyS0','/dev/ttyS1','/dev/ttyS2','/dev/ttyS3']
from firmata import Arduino, OUTPUT
for device in locations:
try:
print "Trying...",device
board = Arduino(device)
break
except:
print "Failed to connect on ", device
'''
pin = 0
#declare output pins as a list/tuple
board.output([pin])
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
value = board.analogRead(pin)
n = n+1
print "n: ",n, value
t = time.time()
b.close()
from pyfirmata import util, Arduino
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
it= util.Iterator(board)
it.start()
board.analog[0].enable_reporting()
value = board.analog[0].read()
n = n+1
print "n: ",n, value
#board.pass_time(1)
t = time.time()
board.exit()
'''
board.pin_mode(0, OUTPUT)
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
board.parse()
value = board.analog_read(0) # Reading from analog pin #0
n = n+1
print "n: ",n,
|
<commit_before><commit_msg>Add a script to benchmark python to arduino libraries<commit_after>import sys
sys.path.append('/home/bat/Python-Arduino-Proto-API-v2/arduino')
import time
locations = ['/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM2','/dev/ttyACM3','/dev/ttyACM4','/dev/ttyACM5','/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyUSB3','/dev/ttyS0','/dev/ttyS1','/dev/ttyS2','/dev/ttyS3']
from firmata import Arduino, OUTPUT
for device in locations:
try:
print "Trying...",device
board = Arduino(device)
break
except:
print "Failed to connect on ", device
'''
pin = 0
#declare output pins as a list/tuple
board.output([pin])
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
value = board.analogRead(pin)
n = n+1
print "n: ",n, value
t = time.time()
b.close()
from pyfirmata import util, Arduino
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
it= util.Iterator(board)
it.start()
board.analog[0].enable_reporting()
value = board.analog[0].read()
n = n+1
print "n: ",n, value
#board.pass_time(1)
t = time.time()
board.exit()
'''
board.pin_mode(0, OUTPUT)
t0 = time.time()
n = 0
t = time.time()
while t-t0<10:
board.parse()
value = board.analog_read(0) # Reading from analog pin #0
n = n+1
print "n: ",n,
|
|
b8dd154c6f5d808d4b8916266fa43ca54c37851e
|
pybloom/hashfilter.py
|
pybloom/hashfilter.py
|
import time
class HashFilter(object):
'''
Plain Hash Filter for testing purposes
'''
def __init__(self, expiration):
self.expiration = expiration
self.unique_items = {}
def add(self, key):
if key in self.unique_items:
timestamp = time.time()
self.unique_items[key] = timestamp + self.expiration
return True
else:
timestamp = time.time()
self.unique_items[key] = timestamp + self.expiration
return False
def __contains__(self, key):
timestamp = time.time()
if key in self.unique_items:
if timestamp < self.unique_items[key]:
return True
else:
del self.unique_items[key]
return False
|
Add plain hash filter for testing
|
Add plain hash filter for testing
|
Python
|
mit
|
Parsely/python-bloomfilter
|
Add plain hash filter for testing
|
import time
class HashFilter(object):
'''
Plain Hash Filter for testing purposes
'''
def __init__(self, expiration):
self.expiration = expiration
self.unique_items = {}
def add(self, key):
if key in self.unique_items:
timestamp = time.time()
self.unique_items[key] = timestamp + self.expiration
return True
else:
timestamp = time.time()
self.unique_items[key] = timestamp + self.expiration
return False
def __contains__(self, key):
timestamp = time.time()
if key in self.unique_items:
if timestamp < self.unique_items[key]:
return True
else:
del self.unique_items[key]
return False
|
<commit_before><commit_msg>Add plain hash filter for testing<commit_after>
|
import time
class HashFilter(object):
'''
Plain Hash Filter for testing purposes
'''
def __init__(self, expiration):
self.expiration = expiration
self.unique_items = {}
def add(self, key):
if key in self.unique_items:
timestamp = time.time()
self.unique_items[key] = timestamp + self.expiration
return True
else:
timestamp = time.time()
self.unique_items[key] = timestamp + self.expiration
return False
def __contains__(self, key):
timestamp = time.time()
if key in self.unique_items:
if timestamp < self.unique_items[key]:
return True
else:
del self.unique_items[key]
return False
|
Add plain hash filter for testingimport time
class HashFilter(object):
'''
Plain Hash Filter for testing purposes
'''
def __init__(self, expiration):
self.expiration = expiration
self.unique_items = {}
def add(self, key):
if key in self.unique_items:
timestamp = time.time()
self.unique_items[key] = timestamp + self.expiration
return True
else:
timestamp = time.time()
self.unique_items[key] = timestamp + self.expiration
return False
def __contains__(self, key):
timestamp = time.time()
if key in self.unique_items:
if timestamp < self.unique_items[key]:
return True
else:
del self.unique_items[key]
return False
|
<commit_before><commit_msg>Add plain hash filter for testing<commit_after>import time
class HashFilter(object):
'''
Plain Hash Filter for testing purposes
'''
def __init__(self, expiration):
self.expiration = expiration
self.unique_items = {}
def add(self, key):
if key in self.unique_items:
timestamp = time.time()
self.unique_items[key] = timestamp + self.expiration
return True
else:
timestamp = time.time()
self.unique_items[key] = timestamp + self.expiration
return False
def __contains__(self, key):
timestamp = time.time()
if key in self.unique_items:
if timestamp < self.unique_items[key]:
return True
else:
del self.unique_items[key]
return False
|
|
ad7c88acd0ff74f4dbd2d751d07c63abbbb9db44
|
script_example.py
|
script_example.py
|
## This is an example of how to set up a script to run against a database, should you need to do so.
## It should have everything you need to get started
from uber.config import c
from uber.models import Attendee, initialize_db, Session
with Session() as session:
initialize_db()
session = Session().session
|
Add example script This will give future-me a jumping off point for writing scripts to run against the DB for special cases.
|
Add example script
This will give future-me a jumping off point for writing scripts to run against the DB for special cases.
|
Python
|
agpl-3.0
|
magfest/ubersystem,magfest/ubersystem,magfest/ubersystem,magfest/ubersystem
|
Add example script
This will give future-me a jumping off point for writing scripts to run against the DB for special cases.
|
## This is an example of how to set up a script to run against a database, should you need to do so.
## It should have everything you need to get started
from uber.config import c
from uber.models import Attendee, initialize_db, Session
with Session() as session:
initialize_db()
session = Session().session
|
<commit_before><commit_msg>Add example script
This will give future-me a jumping off point for writing scripts to run against the DB for special cases.<commit_after>
|
## This is an example of how to set up a script to run against a database, should you need to do so.
## It should have everything you need to get started
from uber.config import c
from uber.models import Attendee, initialize_db, Session
with Session() as session:
initialize_db()
session = Session().session
|
Add example script
This will give future-me a jumping off point for writing scripts to run against the DB for special cases.## This is an example of how to set up a script to run against a database, should you need to do so.
## It should have everything you need to get started
from uber.config import c
from uber.models import Attendee, initialize_db, Session
with Session() as session:
initialize_db()
session = Session().session
|
<commit_before><commit_msg>Add example script
This will give future-me a jumping off point for writing scripts to run against the DB for special cases.<commit_after>## This is an example of how to set up a script to run against a database, should you need to do so.
## It should have everything you need to get started
from uber.config import c
from uber.models import Attendee, initialize_db, Session
with Session() as session:
initialize_db()
session = Session().session
|
|
b8d824b5355bcabd6a1bdcf7d8af39076ce75bb6
|
examples/workspace_renumber.py
|
examples/workspace_renumber.py
|
#!/usr/bin/env python3
import i3ipc
# make connection to i3 ipc
i3 = i3ipc.Connection()
# check if workspaces are all in order
def workspaces_ordered(i3conn):
last_workspace_number = 0
for i in sorted(i3conn.get_workspaces(), key=lambda x: x['num']):
if i['num'] != last_workspace_number+1:
return False
last_workspace_number += 1
return True
# find all the workspaces that are out of order and
# the least possible valid workspace number that is unassigned
def find_disordered(i3conn):
last_workspace_number = 0
disordered = []
least_number = None
for i in sorted(i3conn.get_workspaces(), key=lambda x: x['num']):
if i['num'] != last_workspace_number+1:
disordered.append(i['num'])
if least_number is None:
least_number = last_workspace_number + 1
last_workspace_number += 1
return (disordered, least_number)
# renumber all the workspaces that appear out of order from the others
def fix_ordering(i3conn):
if workspaces_ordered(i3conn):
return
else:
workspaces = i3conn.get_tree().workspaces()
disordered_workspaces,least_number = find_disordered(i3conn)
containers = list(filter(lambda x: x.num in disordered_workspaces, workspaces))
for c in containers:
for i in c.leaves():
i.command("move container to workspace %s" % least_number)
least_number += 1
return
# callback for when workspace focus changes
def on_workspace_focus(i3conn, e):
fix_ordering(i3conn)
if __name__ == '__main__':
i3.on('workspace::focus', on_workspace_focus)
i3.main()
|
Add script that makes sure workspace numbers are always in consecutive order by moving windows on out of order workspaces to the appropriate workspace
|
Add script that makes sure workspace numbers are always in consecutive
order by moving windows on out of order workspaces to the appropriate
workspace
|
Python
|
bsd-3-clause
|
acrisci/i3ipc-python
|
Add script that makes sure workspace numbers are always in consecutive
order by moving windows on out of order workspaces to the appropriate
workspace
|
#!/usr/bin/env python3
import i3ipc
# make connection to i3 ipc
i3 = i3ipc.Connection()
# check if workspaces are all in order
def workspaces_ordered(i3conn):
last_workspace_number = 0
for i in sorted(i3conn.get_workspaces(), key=lambda x: x['num']):
if i['num'] != last_workspace_number+1:
return False
last_workspace_number += 1
return True
# find all the workspaces that are out of order and
# the least possible valid workspace number that is unassigned
def find_disordered(i3conn):
last_workspace_number = 0
disordered = []
least_number = None
for i in sorted(i3conn.get_workspaces(), key=lambda x: x['num']):
if i['num'] != last_workspace_number+1:
disordered.append(i['num'])
if least_number is None:
least_number = last_workspace_number + 1
last_workspace_number += 1
return (disordered, least_number)
# renumber all the workspaces that appear out of order from the others
def fix_ordering(i3conn):
if workspaces_ordered(i3conn):
return
else:
workspaces = i3conn.get_tree().workspaces()
disordered_workspaces,least_number = find_disordered(i3conn)
containers = list(filter(lambda x: x.num in disordered_workspaces, workspaces))
for c in containers:
for i in c.leaves():
i.command("move container to workspace %s" % least_number)
least_number += 1
return
# callback for when workspace focus changes
def on_workspace_focus(i3conn, e):
fix_ordering(i3conn)
if __name__ == '__main__':
i3.on('workspace::focus', on_workspace_focus)
i3.main()
|
<commit_before><commit_msg>Add script that makes sure workspace numbers are always in consecutive
order by moving windows on out of order workspaces to the appropriate
workspace<commit_after>
|
#!/usr/bin/env python3
import i3ipc
# make connection to i3 ipc
i3 = i3ipc.Connection()
# check if workspaces are all in order
def workspaces_ordered(i3conn):
last_workspace_number = 0
for i in sorted(i3conn.get_workspaces(), key=lambda x: x['num']):
if i['num'] != last_workspace_number+1:
return False
last_workspace_number += 1
return True
# find all the workspaces that are out of order and
# the least possible valid workspace number that is unassigned
def find_disordered(i3conn):
last_workspace_number = 0
disordered = []
least_number = None
for i in sorted(i3conn.get_workspaces(), key=lambda x: x['num']):
if i['num'] != last_workspace_number+1:
disordered.append(i['num'])
if least_number is None:
least_number = last_workspace_number + 1
last_workspace_number += 1
return (disordered, least_number)
# renumber all the workspaces that appear out of order from the others
def fix_ordering(i3conn):
if workspaces_ordered(i3conn):
return
else:
workspaces = i3conn.get_tree().workspaces()
disordered_workspaces,least_number = find_disordered(i3conn)
containers = list(filter(lambda x: x.num in disordered_workspaces, workspaces))
for c in containers:
for i in c.leaves():
i.command("move container to workspace %s" % least_number)
least_number += 1
return
# callback for when workspace focus changes
def on_workspace_focus(i3conn, e):
fix_ordering(i3conn)
if __name__ == '__main__':
i3.on('workspace::focus', on_workspace_focus)
i3.main()
|
Add script that makes sure workspace numbers are always in consecutive
order by moving windows on out of order workspaces to the appropriate
workspace#!/usr/bin/env python3
import i3ipc
# make connection to i3 ipc
i3 = i3ipc.Connection()
# check if workspaces are all in order
def workspaces_ordered(i3conn):
last_workspace_number = 0
for i in sorted(i3conn.get_workspaces(), key=lambda x: x['num']):
if i['num'] != last_workspace_number+1:
return False
last_workspace_number += 1
return True
# find all the workspaces that are out of order and
# the least possible valid workspace number that is unassigned
def find_disordered(i3conn):
last_workspace_number = 0
disordered = []
least_number = None
for i in sorted(i3conn.get_workspaces(), key=lambda x: x['num']):
if i['num'] != last_workspace_number+1:
disordered.append(i['num'])
if least_number is None:
least_number = last_workspace_number + 1
last_workspace_number += 1
return (disordered, least_number)
# renumber all the workspaces that appear out of order from the others
def fix_ordering(i3conn):
if workspaces_ordered(i3conn):
return
else:
workspaces = i3conn.get_tree().workspaces()
disordered_workspaces,least_number = find_disordered(i3conn)
containers = list(filter(lambda x: x.num in disordered_workspaces, workspaces))
for c in containers:
for i in c.leaves():
i.command("move container to workspace %s" % least_number)
least_number += 1
return
# callback for when workspace focus changes
def on_workspace_focus(i3conn, e):
fix_ordering(i3conn)
if __name__ == '__main__':
i3.on('workspace::focus', on_workspace_focus)
i3.main()
|
<commit_before><commit_msg>Add script that makes sure workspace numbers are always in consecutive
order by moving windows on out of order workspaces to the appropriate
workspace<commit_after>#!/usr/bin/env python3
import i3ipc
# make connection to i3 ipc
i3 = i3ipc.Connection()
# check if workspaces are all in order
def workspaces_ordered(i3conn):
last_workspace_number = 0
for i in sorted(i3conn.get_workspaces(), key=lambda x: x['num']):
if i['num'] != last_workspace_number+1:
return False
last_workspace_number += 1
return True
# find all the workspaces that are out of order and
# the least possible valid workspace number that is unassigned
def find_disordered(i3conn):
last_workspace_number = 0
disordered = []
least_number = None
for i in sorted(i3conn.get_workspaces(), key=lambda x: x['num']):
if i['num'] != last_workspace_number+1:
disordered.append(i['num'])
if least_number is None:
least_number = last_workspace_number + 1
last_workspace_number += 1
return (disordered, least_number)
# renumber all the workspaces that appear out of order from the others
def fix_ordering(i3conn):
if workspaces_ordered(i3conn):
return
else:
workspaces = i3conn.get_tree().workspaces()
disordered_workspaces,least_number = find_disordered(i3conn)
containers = list(filter(lambda x: x.num in disordered_workspaces, workspaces))
for c in containers:
for i in c.leaves():
i.command("move container to workspace %s" % least_number)
least_number += 1
return
# callback for when workspace focus changes
def on_workspace_focus(i3conn, e):
fix_ordering(i3conn)
if __name__ == '__main__':
i3.on('workspace::focus', on_workspace_focus)
i3.main()
|
|
bf16ff8622049a29e2013266d7bb2ac7f5c3a84d
|
designate/backend/impl_powerdns/migrate_repo/versions/017_records_drop_duped_index.py
|
designate/backend/impl_powerdns/migrate_repo/versions/017_records_drop_duped_index.py
|
# Copyright 2015 NetEase, Inc.
#
# Author: Zhang Gengyuan <stanzgy@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Index, Table
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.create()
|
Remove duplicated index on table 'records' of pDNS backend
|
Remove duplicated index on table 'records' of pDNS backend
Remove duplicated index `rec_name_index` on table 'records' of pDNS backend.
Change-Id: I9e8723b464522a588f0e0ef6ff261b71609b0726
Closes-Bug: 1446980
|
Python
|
apache-2.0
|
grahamhayes/designate,tonyli71/designate,cneill/designate-testing,tonyli71/designate,kiall/designate-py3,ionrock/designate,kiall/designate-py3,openstack/designate,kiall/designate-py3,cneill/designate,muraliselva10/designate,cneill/designate-testing,kiall/designate-py3,muraliselva10/designate,muraliselva10/designate,openstack/designate,grahamhayes/designate,ramsateesh/designate,ionrock/designate,grahamhayes/designate,kiall/designate-py3,ramsateesh/designate,openstack/designate,cneill/designate,cneill/designate,ramsateesh/designate,cneill/designate,ionrock/designate,cneill/designate,cneill/designate-testing,tonyli71/designate
|
Remove duplicated index on table 'records' of pDNS backend
Remove duplicated index `rec_name_index` on table 'records' of pDNS backend.
Change-Id: I9e8723b464522a588f0e0ef6ff261b71609b0726
Closes-Bug: 1446980
|
# Copyright 2015 NetEase, Inc.
#
# Author: Zhang Gengyuan <stanzgy@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Index, Table
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.create()
|
<commit_before><commit_msg>Remove duplicated index on table 'records' of pDNS backend
Remove duplicated index `rec_name_index` on table 'records' of pDNS backend.
Change-Id: I9e8723b464522a588f0e0ef6ff261b71609b0726
Closes-Bug: 1446980<commit_after>
|
# Copyright 2015 NetEase, Inc.
#
# Author: Zhang Gengyuan <stanzgy@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Index, Table
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.create()
|
Remove duplicated index on table 'records' of pDNS backend
Remove duplicated index `rec_name_index` on table 'records' of pDNS backend.
Change-Id: I9e8723b464522a588f0e0ef6ff261b71609b0726
Closes-Bug: 1446980# Copyright 2015 NetEase, Inc.
#
# Author: Zhang Gengyuan <stanzgy@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Index, Table
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.create()
|
<commit_before><commit_msg>Remove duplicated index on table 'records' of pDNS backend
Remove duplicated index `rec_name_index` on table 'records' of pDNS backend.
Change-Id: I9e8723b464522a588f0e0ef6ff261b71609b0726
Closes-Bug: 1446980<commit_after># Copyright 2015 NetEase, Inc.
#
# Author: Zhang Gengyuan <stanzgy@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Index, Table
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.create()
|
|
6156181b6635a7dd6cdd8f699a2e5570cb42e89c
|
website/tests/views/test_pathway.py
|
website/tests/views/test_pathway.py
|
from database import db
from models import Pathway, Gene, GeneList, GeneListEntry
from view_testing import ViewTest
def create_pathways():
# not necessarily true ;)
genes = [Gene(name=name) for name in 'KRAS NRAS AKAP13 NF1 BCR'.split()]
significant_genes = [Gene(name=name) for name in 'TP53 AKT1 TXN GPI AKT3 FN TSC1'.split()]
pathways = [
Pathway(
description='TP53 Regulates Transcription of DNA Repair Genes',
reactome=6796648,
genes=genes + significant_genes
),
Pathway(
description='A pathway with more than 5 significant genes but less than 10 at all',
reactome=679,
genes=significant_genes
),
Pathway(
description='Ras protein signal transduction',
gene_ontology=33277,
genes=genes
),
]
db.session.add_all(pathways)
return locals()
class TestPathwaysView(ViewTest):
def test_details(self):
create_pathways()
response = self.client.get('/pathways/details/?gene_ontology_id=33277')
pathway = response.json
assert pathway['description'] == 'Ras protein signal transduction'
assert len(pathway['genes']) == 5
def test_with_significant_genes(self):
created = create_pathways()
gene_list = GeneList(
name='ClinVar',
entries=[
GeneListEntry(gene=gene, fdr=0.001, p=0.001)
for gene in created['significant_genes']
])
db.session.add(gene_list)
db.session.commit()
response = self.client.get('/pathways/significant_data/%s' % gene_list.id)
# only one pathway has more than 10 genes with at least 5 of them significant
assert response.json['total'] == 1
pathway = response.json['rows'].pop()
expected_values = {
'reactome': 6796648,
'description': 'TP53 Regulates Transcription of DNA Repair Genes',
'significant_genes_count': 7,
'gene_count': 12,
'ratio': 7 / 12,
}
for key, value in expected_values.items():
assert pathway[key] == value
|
Add tests for pathways details and significant pathways
|
Add tests for pathways details and significant pathways
|
Python
|
lgpl-2.1
|
reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/ActiveDriverDB
|
Add tests for pathways details and significant pathways
|
from database import db
from models import Pathway, Gene, GeneList, GeneListEntry
from view_testing import ViewTest
def create_pathways():
# not necessarily true ;)
genes = [Gene(name=name) for name in 'KRAS NRAS AKAP13 NF1 BCR'.split()]
significant_genes = [Gene(name=name) for name in 'TP53 AKT1 TXN GPI AKT3 FN TSC1'.split()]
pathways = [
Pathway(
description='TP53 Regulates Transcription of DNA Repair Genes',
reactome=6796648,
genes=genes + significant_genes
),
Pathway(
description='A pathway with more than 5 significant genes but less than 10 at all',
reactome=679,
genes=significant_genes
),
Pathway(
description='Ras protein signal transduction',
gene_ontology=33277,
genes=genes
),
]
db.session.add_all(pathways)
return locals()
class TestPathwaysView(ViewTest):
def test_details(self):
create_pathways()
response = self.client.get('/pathways/details/?gene_ontology_id=33277')
pathway = response.json
assert pathway['description'] == 'Ras protein signal transduction'
assert len(pathway['genes']) == 5
def test_with_significant_genes(self):
created = create_pathways()
gene_list = GeneList(
name='ClinVar',
entries=[
GeneListEntry(gene=gene, fdr=0.001, p=0.001)
for gene in created['significant_genes']
])
db.session.add(gene_list)
db.session.commit()
response = self.client.get('/pathways/significant_data/%s' % gene_list.id)
# only one pathway has more than 10 genes with at least 5 of them significant
assert response.json['total'] == 1
pathway = response.json['rows'].pop()
expected_values = {
'reactome': 6796648,
'description': 'TP53 Regulates Transcription of DNA Repair Genes',
'significant_genes_count': 7,
'gene_count': 12,
'ratio': 7 / 12,
}
for key, value in expected_values.items():
assert pathway[key] == value
|
<commit_before><commit_msg>Add tests for pathways details and significant pathways<commit_after>
|
from database import db
from models import Pathway, Gene, GeneList, GeneListEntry
from view_testing import ViewTest
def create_pathways():
# not necessarily true ;)
genes = [Gene(name=name) for name in 'KRAS NRAS AKAP13 NF1 BCR'.split()]
significant_genes = [Gene(name=name) for name in 'TP53 AKT1 TXN GPI AKT3 FN TSC1'.split()]
pathways = [
Pathway(
description='TP53 Regulates Transcription of DNA Repair Genes',
reactome=6796648,
genes=genes + significant_genes
),
Pathway(
description='A pathway with more than 5 significant genes but less than 10 at all',
reactome=679,
genes=significant_genes
),
Pathway(
description='Ras protein signal transduction',
gene_ontology=33277,
genes=genes
),
]
db.session.add_all(pathways)
return locals()
class TestPathwaysView(ViewTest):
def test_details(self):
create_pathways()
response = self.client.get('/pathways/details/?gene_ontology_id=33277')
pathway = response.json
assert pathway['description'] == 'Ras protein signal transduction'
assert len(pathway['genes']) == 5
def test_with_significant_genes(self):
created = create_pathways()
gene_list = GeneList(
name='ClinVar',
entries=[
GeneListEntry(gene=gene, fdr=0.001, p=0.001)
for gene in created['significant_genes']
])
db.session.add(gene_list)
db.session.commit()
response = self.client.get('/pathways/significant_data/%s' % gene_list.id)
# only one pathway has more than 10 genes with at least 5 of them significant
assert response.json['total'] == 1
pathway = response.json['rows'].pop()
expected_values = {
'reactome': 6796648,
'description': 'TP53 Regulates Transcription of DNA Repair Genes',
'significant_genes_count': 7,
'gene_count': 12,
'ratio': 7 / 12,
}
for key, value in expected_values.items():
assert pathway[key] == value
|
Add tests for pathways details and significant pathwaysfrom database import db
from models import Pathway, Gene, GeneList, GeneListEntry
from view_testing import ViewTest
def create_pathways():
# not necessarily true ;)
genes = [Gene(name=name) for name in 'KRAS NRAS AKAP13 NF1 BCR'.split()]
significant_genes = [Gene(name=name) for name in 'TP53 AKT1 TXN GPI AKT3 FN TSC1'.split()]
pathways = [
Pathway(
description='TP53 Regulates Transcription of DNA Repair Genes',
reactome=6796648,
genes=genes + significant_genes
),
Pathway(
description='A pathway with more than 5 significant genes but less than 10 at all',
reactome=679,
genes=significant_genes
),
Pathway(
description='Ras protein signal transduction',
gene_ontology=33277,
genes=genes
),
]
db.session.add_all(pathways)
return locals()
class TestPathwaysView(ViewTest):
def test_details(self):
create_pathways()
response = self.client.get('/pathways/details/?gene_ontology_id=33277')
pathway = response.json
assert pathway['description'] == 'Ras protein signal transduction'
assert len(pathway['genes']) == 5
def test_with_significant_genes(self):
created = create_pathways()
gene_list = GeneList(
name='ClinVar',
entries=[
GeneListEntry(gene=gene, fdr=0.001, p=0.001)
for gene in created['significant_genes']
])
db.session.add(gene_list)
db.session.commit()
response = self.client.get('/pathways/significant_data/%s' % gene_list.id)
# only one pathway has more than 10 genes with at least 5 of them significant
assert response.json['total'] == 1
pathway = response.json['rows'].pop()
expected_values = {
'reactome': 6796648,
'description': 'TP53 Regulates Transcription of DNA Repair Genes',
'significant_genes_count': 7,
'gene_count': 12,
'ratio': 7 / 12,
}
for key, value in expected_values.items():
assert pathway[key] == value
|
<commit_before><commit_msg>Add tests for pathways details and significant pathways<commit_after>from database import db
from models import Pathway, Gene, GeneList, GeneListEntry
from view_testing import ViewTest
def create_pathways():
# not necessarily true ;)
genes = [Gene(name=name) for name in 'KRAS NRAS AKAP13 NF1 BCR'.split()]
significant_genes = [Gene(name=name) for name in 'TP53 AKT1 TXN GPI AKT3 FN TSC1'.split()]
pathways = [
Pathway(
description='TP53 Regulates Transcription of DNA Repair Genes',
reactome=6796648,
genes=genes + significant_genes
),
Pathway(
description='A pathway with more than 5 significant genes but less than 10 at all',
reactome=679,
genes=significant_genes
),
Pathway(
description='Ras protein signal transduction',
gene_ontology=33277,
genes=genes
),
]
db.session.add_all(pathways)
return locals()
class TestPathwaysView(ViewTest):
def test_details(self):
create_pathways()
response = self.client.get('/pathways/details/?gene_ontology_id=33277')
pathway = response.json
assert pathway['description'] == 'Ras protein signal transduction'
assert len(pathway['genes']) == 5
def test_with_significant_genes(self):
created = create_pathways()
gene_list = GeneList(
name='ClinVar',
entries=[
GeneListEntry(gene=gene, fdr=0.001, p=0.001)
for gene in created['significant_genes']
])
db.session.add(gene_list)
db.session.commit()
response = self.client.get('/pathways/significant_data/%s' % gene_list.id)
# only one pathway has more than 10 genes with at least 5 of them significant
assert response.json['total'] == 1
pathway = response.json['rows'].pop()
expected_values = {
'reactome': 6796648,
'description': 'TP53 Regulates Transcription of DNA Repair Genes',
'significant_genes_count': 7,
'gene_count': 12,
'ratio': 7 / 12,
}
for key, value in expected_values.items():
assert pathway[key] == value
|
|
63cd90c4179f0b68c1af0a174e30b25906e6319e
|
taiga/projects/management/commands/change_project_slug.py
|
taiga/projects/management/commands/change_project_slug.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.test.utils import override_settings
from taiga.base.utils.slug import slugify_uniquely
from taiga.projects.models import Project
from taiga.projects.history.models import HistoryEntry
from taiga.timeline.management.commands.rebuild_timeline import generate_timeline
class Command(BaseCommand):
help = 'Change the project slug from a new one'
def add_arguments(self, parser):
parser.add_argument('current_slug', help="The current project slug")
parser.add_argument('new_slug', help="The new project slug")
@override_settings(DEBUG=False)
def handle(self, *args, **options):
current_slug = options["current_slug"]
new_slug = options["new_slug"]
try:
project = Project.objects.get(slug=current_slug)
except Project.DoesNotExist:
raise CommandError("There is no project with the slug '{}'".format(current_slug))
slug = slugify_uniquely(new_slug, Project)
if slug != new_slug:
raise CommandError("Invalid new slug, maybe you can try with '{}'".format(slug))
# Change slug
self.stdout.write(self.style.SUCCESS("-> Change slug to '{}'.".format(slug)))
project.slug = slug
project.save()
# Reset diff cache in history entries
self.stdout.write(self.style.SUCCESS("-> Reset value_diff cache for history entries."))
HistoryEntry.objects.filter(project=project).update(values_diff_cache=None)
# Regenerate timeline
self.stdout.write(self.style.SUCCESS("-> Regenerate timeline entries."))
generate_timeline(None, None, project.id)
|
Create command to change project slug
|
Create command to change project slug
|
Python
|
agpl-3.0
|
taigaio/taiga-back,taigaio/taiga-back,dayatz/taiga-back,dayatz/taiga-back,taigaio/taiga-back,dayatz/taiga-back
|
Create command to change project slug
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.test.utils import override_settings
from taiga.base.utils.slug import slugify_uniquely
from taiga.projects.models import Project
from taiga.projects.history.models import HistoryEntry
from taiga.timeline.management.commands.rebuild_timeline import generate_timeline
class Command(BaseCommand):
help = 'Change the project slug from a new one'
def add_arguments(self, parser):
parser.add_argument('current_slug', help="The current project slug")
parser.add_argument('new_slug', help="The new project slug")
@override_settings(DEBUG=False)
def handle(self, *args, **options):
current_slug = options["current_slug"]
new_slug = options["new_slug"]
try:
project = Project.objects.get(slug=current_slug)
except Project.DoesNotExist:
raise CommandError("There is no project with the slug '{}'".format(current_slug))
slug = slugify_uniquely(new_slug, Project)
if slug != new_slug:
raise CommandError("Invalid new slug, maybe you can try with '{}'".format(slug))
# Change slug
self.stdout.write(self.style.SUCCESS("-> Change slug to '{}'.".format(slug)))
project.slug = slug
project.save()
# Reset diff cache in history entries
self.stdout.write(self.style.SUCCESS("-> Reset value_diff cache for history entries."))
HistoryEntry.objects.filter(project=project).update(values_diff_cache=None)
# Regenerate timeline
self.stdout.write(self.style.SUCCESS("-> Regenerate timeline entries."))
generate_timeline(None, None, project.id)
|
<commit_before><commit_msg>Create command to change project slug<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.test.utils import override_settings
from taiga.base.utils.slug import slugify_uniquely
from taiga.projects.models import Project
from taiga.projects.history.models import HistoryEntry
from taiga.timeline.management.commands.rebuild_timeline import generate_timeline
class Command(BaseCommand):
help = 'Change the project slug from a new one'
def add_arguments(self, parser):
parser.add_argument('current_slug', help="The current project slug")
parser.add_argument('new_slug', help="The new project slug")
@override_settings(DEBUG=False)
def handle(self, *args, **options):
current_slug = options["current_slug"]
new_slug = options["new_slug"]
try:
project = Project.objects.get(slug=current_slug)
except Project.DoesNotExist:
raise CommandError("There is no project with the slug '{}'".format(current_slug))
slug = slugify_uniquely(new_slug, Project)
if slug != new_slug:
raise CommandError("Invalid new slug, maybe you can try with '{}'".format(slug))
# Change slug
self.stdout.write(self.style.SUCCESS("-> Change slug to '{}'.".format(slug)))
project.slug = slug
project.save()
# Reset diff cache in history entries
self.stdout.write(self.style.SUCCESS("-> Reset value_diff cache for history entries."))
HistoryEntry.objects.filter(project=project).update(values_diff_cache=None)
# Regenerate timeline
self.stdout.write(self.style.SUCCESS("-> Regenerate timeline entries."))
generate_timeline(None, None, project.id)
|
Create command to change project slug# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.test.utils import override_settings
from taiga.base.utils.slug import slugify_uniquely
from taiga.projects.models import Project
from taiga.projects.history.models import HistoryEntry
from taiga.timeline.management.commands.rebuild_timeline import generate_timeline
class Command(BaseCommand):
help = 'Change the project slug from a new one'
def add_arguments(self, parser):
parser.add_argument('current_slug', help="The current project slug")
parser.add_argument('new_slug', help="The new project slug")
@override_settings(DEBUG=False)
def handle(self, *args, **options):
current_slug = options["current_slug"]
new_slug = options["new_slug"]
try:
project = Project.objects.get(slug=current_slug)
except Project.DoesNotExist:
raise CommandError("There is no project with the slug '{}'".format(current_slug))
slug = slugify_uniquely(new_slug, Project)
if slug != new_slug:
raise CommandError("Invalid new slug, maybe you can try with '{}'".format(slug))
# Change slug
self.stdout.write(self.style.SUCCESS("-> Change slug to '{}'.".format(slug)))
project.slug = slug
project.save()
# Reset diff cache in history entries
self.stdout.write(self.style.SUCCESS("-> Reset value_diff cache for history entries."))
HistoryEntry.objects.filter(project=project).update(values_diff_cache=None)
# Regenerate timeline
self.stdout.write(self.style.SUCCESS("-> Regenerate timeline entries."))
generate_timeline(None, None, project.id)
|
<commit_before><commit_msg>Create command to change project slug<commit_after># -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.test.utils import override_settings
from taiga.base.utils.slug import slugify_uniquely
from taiga.projects.models import Project
from taiga.projects.history.models import HistoryEntry
from taiga.timeline.management.commands.rebuild_timeline import generate_timeline
class Command(BaseCommand):
help = 'Change the project slug from a new one'
def add_arguments(self, parser):
parser.add_argument('current_slug', help="The current project slug")
parser.add_argument('new_slug', help="The new project slug")
@override_settings(DEBUG=False)
def handle(self, *args, **options):
current_slug = options["current_slug"]
new_slug = options["new_slug"]
try:
project = Project.objects.get(slug=current_slug)
except Project.DoesNotExist:
raise CommandError("There is no project with the slug '{}'".format(current_slug))
slug = slugify_uniquely(new_slug, Project)
if slug != new_slug:
raise CommandError("Invalid new slug, maybe you can try with '{}'".format(slug))
# Change slug
self.stdout.write(self.style.SUCCESS("-> Change slug to '{}'.".format(slug)))
project.slug = slug
project.save()
# Reset diff cache in history entries
self.stdout.write(self.style.SUCCESS("-> Reset value_diff cache for history entries."))
HistoryEntry.objects.filter(project=project).update(values_diff_cache=None)
# Regenerate timeline
self.stdout.write(self.style.SUCCESS("-> Regenerate timeline entries."))
generate_timeline(None, None, project.id)
|
|
512f98053fbc8df5ac08312e950ef3042fa5a864
|
reverse_complement.py
|
reverse_complement.py
|
import sys
from argparse import ArgumentParser, FileType
from Bio.Seq import Seq
def reverse_complement(seq):
return str(Seq(seq.strip()).reverse_complement())
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("infile", nargs="?", type=FileType('r'), default=sys.stdin)
args = parser.parse_args()
for seq in args.infile:
print reverse_complement(seq)
|
Add script to reverse complement a set of sequences.
|
Add script to reverse complement a set of sequences.
|
Python
|
mit
|
roryk/junkdrawer,roryk/junkdrawer
|
Add script to reverse complement a set of sequences.
|
import sys
from argparse import ArgumentParser, FileType
from Bio.Seq import Seq
def reverse_complement(seq):
return str(Seq(seq.strip()).reverse_complement())
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("infile", nargs="?", type=FileType('r'), default=sys.stdin)
args = parser.parse_args()
for seq in args.infile:
print reverse_complement(seq)
|
<commit_before><commit_msg>Add script to reverse complement a set of sequences.<commit_after>
|
import sys
from argparse import ArgumentParser, FileType
from Bio.Seq import Seq
def reverse_complement(seq):
return str(Seq(seq.strip()).reverse_complement())
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("infile", nargs="?", type=FileType('r'), default=sys.stdin)
args = parser.parse_args()
for seq in args.infile:
print reverse_complement(seq)
|
Add script to reverse complement a set of sequences.import sys
from argparse import ArgumentParser, FileType
from Bio.Seq import Seq
def reverse_complement(seq):
return str(Seq(seq.strip()).reverse_complement())
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("infile", nargs="?", type=FileType('r'), default=sys.stdin)
args = parser.parse_args()
for seq in args.infile:
print reverse_complement(seq)
|
<commit_before><commit_msg>Add script to reverse complement a set of sequences.<commit_after>import sys
from argparse import ArgumentParser, FileType
from Bio.Seq import Seq
def reverse_complement(seq):
return str(Seq(seq.strip()).reverse_complement())
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("infile", nargs="?", type=FileType('r'), default=sys.stdin)
args = parser.parse_args()
for seq in args.infile:
print reverse_complement(seq)
|
|
e7bceb26566265c19009a5be68e3fe744a38176b
|
TWLight/applications/migrations/0022_auto_20181209_1315.py
|
TWLight/applications/migrations/0022_auto_20181209_1315.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2018-12-09 13:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0021_application_hidden'),
]
operations = [
migrations.AlterField(
model_name='application',
name='status',
field=models.IntegerField(choices=[(0, 'Pending'), (1, 'Under discussion'), (2, 'Approved'), (3, 'Not approved'), (4, 'Sent to partner'), (5, 'Invalid')], default=0),
),
]
|
Add migration for previous commit
|
Add migration for previous commit
|
Python
|
mit
|
WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight
|
Add migration for previous commit
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2018-12-09 13:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0021_application_hidden'),
]
operations = [
migrations.AlterField(
model_name='application',
name='status',
field=models.IntegerField(choices=[(0, 'Pending'), (1, 'Under discussion'), (2, 'Approved'), (3, 'Not approved'), (4, 'Sent to partner'), (5, 'Invalid')], default=0),
),
]
|
<commit_before><commit_msg>Add migration for previous commit<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2018-12-09 13:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0021_application_hidden'),
]
operations = [
migrations.AlterField(
model_name='application',
name='status',
field=models.IntegerField(choices=[(0, 'Pending'), (1, 'Under discussion'), (2, 'Approved'), (3, 'Not approved'), (4, 'Sent to partner'), (5, 'Invalid')], default=0),
),
]
|
Add migration for previous commit# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2018-12-09 13:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0021_application_hidden'),
]
operations = [
migrations.AlterField(
model_name='application',
name='status',
field=models.IntegerField(choices=[(0, 'Pending'), (1, 'Under discussion'), (2, 'Approved'), (3, 'Not approved'), (4, 'Sent to partner'), (5, 'Invalid')], default=0),
),
]
|
<commit_before><commit_msg>Add migration for previous commit<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2018-12-09 13:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0021_application_hidden'),
]
operations = [
migrations.AlterField(
model_name='application',
name='status',
field=models.IntegerField(choices=[(0, 'Pending'), (1, 'Under discussion'), (2, 'Approved'), (3, 'Not approved'), (4, 'Sent to partner'), (5, 'Invalid')], default=0),
),
]
|
|
93ba1d34e51a9b959b63e77eae78845dd86203ed
|
camera-capture-test.py
|
camera-capture-test.py
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Application to capture images synchronously
# from two AVT Manta cameras with the Vimba SDK
#
# External dependencies
import collections, cv2, time
import Vimba
# Number of images saved
image_count = 0
# Frame per second counter
counter = 0
fps_counter = 0
fps_buffer = collections.deque( 10*[0], 10 )
# Vimba initialization
print( 'Vimba initialization...' )
vimba = Vimba.VmbDriver()
vimba.Startup()
# Camera connection
print( 'Camera connection...' )
camera = Vimba.VmbCamera( vimba )
camera.Connect( '50-0503323406' )
# Start image acquisition
print( 'Start acquisition...' )
camera.CaptureStart()
# Live display
while True :
# Initialize the clock for counting the number of frames per second
time_start = time.clock()
# Capture an image
camera.CaptureFrame()
# Resize image for display
image_final = cv2.resize( camera.image, None, fx=0.3, fy=0.3 )
# Display the image (scaled down)
cv2.imshow( "Camera", image_final )
# Keyboard interruption
key = cv2.waitKey(1) & 0xFF
# Escape key
if key == 27 :
# Exit live display
break
# Space key
elif key == 32 :
# Save images to disk
image_count += 1
print( 'Save image {} to disk...'.format( image_count ) )
cv2.imwrite( 'camera-{:0>2}.png'.format(image_count), camera.image )
# Frames per second counter
fps_buffer.pop()
fps_buffer.appendleft( time.clock() - time_start )
fps_counter = 10.0 / sum( fps_buffer )
counter += 1
if counter == 20 :
print( '{:.2f} FPS'.format( fps_counter ) )
counter = 0
# Cleanup OpenCV
cv2.destroyAllWindows()
# Stop image acquisition
print( 'Stop acquisition...' )
camera.CaptureStop()
# Camera disconnection
print( 'Camera disconnection...' )
camera.Disconnect()
# Vimba shutdown
print( 'Vimba shutdown...' )
vimba.Shutdown()
|
Test to use the new Vimba module.
|
Test to use the new Vimba module.
|
Python
|
mit
|
microy/VisionToolkit,microy/StereoVision,microy/VisionToolkit,microy/PyStereoVisionToolkit,microy/PyStereoVisionToolkit,microy/StereoVision
|
Test to use the new Vimba module.
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Application to capture images synchronously
# from two AVT Manta cameras with the Vimba SDK
#
# External dependencies
import collections, cv2, time
import Vimba
# Number of images saved
image_count = 0
# Frame per second counter
counter = 0
fps_counter = 0
fps_buffer = collections.deque( 10*[0], 10 )
# Vimba initialization
print( 'Vimba initialization...' )
vimba = Vimba.VmbDriver()
vimba.Startup()
# Camera connection
print( 'Camera connection...' )
camera = Vimba.VmbCamera( vimba )
camera.Connect( '50-0503323406' )
# Start image acquisition
print( 'Start acquisition...' )
camera.CaptureStart()
# Live display
while True :
# Initialize the clock for counting the number of frames per second
time_start = time.clock()
# Capture an image
camera.CaptureFrame()
# Resize image for display
image_final = cv2.resize( camera.image, None, fx=0.3, fy=0.3 )
# Display the image (scaled down)
cv2.imshow( "Camera", image_final )
# Keyboard interruption
key = cv2.waitKey(1) & 0xFF
# Escape key
if key == 27 :
# Exit live display
break
# Space key
elif key == 32 :
# Save images to disk
image_count += 1
print( 'Save image {} to disk...'.format( image_count ) )
cv2.imwrite( 'camera-{:0>2}.png'.format(image_count), camera.image )
# Frames per second counter
fps_buffer.pop()
fps_buffer.appendleft( time.clock() - time_start )
fps_counter = 10.0 / sum( fps_buffer )
counter += 1
if counter == 20 :
print( '{:.2f} FPS'.format( fps_counter ) )
counter = 0
# Cleanup OpenCV
cv2.destroyAllWindows()
# Stop image acquisition
print( 'Stop acquisition...' )
camera.CaptureStop()
# Camera disconnection
print( 'Camera disconnection...' )
camera.Disconnect()
# Vimba shutdown
print( 'Vimba shutdown...' )
vimba.Shutdown()
|
<commit_before><commit_msg>Test to use the new Vimba module.<commit_after>
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Application to capture images synchronously
# from two AVT Manta cameras with the Vimba SDK
#
# External dependencies
import collections, cv2, time
import Vimba
# Number of images saved
image_count = 0
# Frame per second counter
counter = 0
fps_counter = 0
fps_buffer = collections.deque( 10*[0], 10 )
# Vimba initialization
print( 'Vimba initialization...' )
vimba = Vimba.VmbDriver()
vimba.Startup()
# Camera connection
print( 'Camera connection...' )
camera = Vimba.VmbCamera( vimba )
camera.Connect( '50-0503323406' )
# Start image acquisition
print( 'Start acquisition...' )
camera.CaptureStart()
# Live display
while True :
# Initialize the clock for counting the number of frames per second
time_start = time.clock()
# Capture an image
camera.CaptureFrame()
# Resize image for display
image_final = cv2.resize( camera.image, None, fx=0.3, fy=0.3 )
# Display the image (scaled down)
cv2.imshow( "Camera", image_final )
# Keyboard interruption
key = cv2.waitKey(1) & 0xFF
# Escape key
if key == 27 :
# Exit live display
break
# Space key
elif key == 32 :
# Save images to disk
image_count += 1
print( 'Save image {} to disk...'.format( image_count ) )
cv2.imwrite( 'camera-{:0>2}.png'.format(image_count), camera.image )
# Frames per second counter
fps_buffer.pop()
fps_buffer.appendleft( time.clock() - time_start )
fps_counter = 10.0 / sum( fps_buffer )
counter += 1
if counter == 20 :
print( '{:.2f} FPS'.format( fps_counter ) )
counter = 0
# Cleanup OpenCV
cv2.destroyAllWindows()
# Stop image acquisition
print( 'Stop acquisition...' )
camera.CaptureStop()
# Camera disconnection
print( 'Camera disconnection...' )
camera.Disconnect()
# Vimba shutdown
print( 'Vimba shutdown...' )
vimba.Shutdown()
|
Test to use the new Vimba module.#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Application to capture images synchronously
# from two AVT Manta cameras with the Vimba SDK
#
# External dependencies
import collections, cv2, time
import Vimba
# Number of images saved
image_count = 0
# Frame per second counter
counter = 0
fps_counter = 0
fps_buffer = collections.deque( 10*[0], 10 )
# Vimba initialization
print( 'Vimba initialization...' )
vimba = Vimba.VmbDriver()
vimba.Startup()
# Camera connection
print( 'Camera connection...' )
camera = Vimba.VmbCamera( vimba )
camera.Connect( '50-0503323406' )
# Start image acquisition
print( 'Start acquisition...' )
camera.CaptureStart()
# Live display
while True :
# Initialize the clock for counting the number of frames per second
time_start = time.clock()
# Capture an image
camera.CaptureFrame()
# Resize image for display
image_final = cv2.resize( camera.image, None, fx=0.3, fy=0.3 )
# Display the image (scaled down)
cv2.imshow( "Camera", image_final )
# Keyboard interruption
key = cv2.waitKey(1) & 0xFF
# Escape key
if key == 27 :
# Exit live display
break
# Space key
elif key == 32 :
# Save images to disk
image_count += 1
print( 'Save image {} to disk...'.format( image_count ) )
cv2.imwrite( 'camera-{:0>2}.png'.format(image_count), camera.image )
# Frames per second counter
fps_buffer.pop()
fps_buffer.appendleft( time.clock() - time_start )
fps_counter = 10.0 / sum( fps_buffer )
counter += 1
if counter == 20 :
print( '{:.2f} FPS'.format( fps_counter ) )
counter = 0
# Cleanup OpenCV
cv2.destroyAllWindows()
# Stop image acquisition
print( 'Stop acquisition...' )
camera.CaptureStop()
# Camera disconnection
print( 'Camera disconnection...' )
camera.Disconnect()
# Vimba shutdown
print( 'Vimba shutdown...' )
vimba.Shutdown()
|
<commit_before><commit_msg>Test to use the new Vimba module.<commit_after>#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Application to capture images synchronously
# from two AVT Manta cameras with the Vimba SDK
#
# External dependencies
import collections, cv2, time
import Vimba
# Number of images saved
image_count = 0
# Frame per second counter
counter = 0
fps_counter = 0
fps_buffer = collections.deque( 10*[0], 10 )
# Vimba initialization
print( 'Vimba initialization...' )
vimba = Vimba.VmbDriver()
vimba.Startup()
# Camera connection
print( 'Camera connection...' )
camera = Vimba.VmbCamera( vimba )
camera.Connect( '50-0503323406' )
# Start image acquisition
print( 'Start acquisition...' )
camera.CaptureStart()
# Live display
while True :
# Initialize the clock for counting the number of frames per second
time_start = time.clock()
# Capture an image
camera.CaptureFrame()
# Resize image for display
image_final = cv2.resize( camera.image, None, fx=0.3, fy=0.3 )
# Display the image (scaled down)
cv2.imshow( "Camera", image_final )
# Keyboard interruption
key = cv2.waitKey(1) & 0xFF
# Escape key
if key == 27 :
# Exit live display
break
# Space key
elif key == 32 :
# Save images to disk
image_count += 1
print( 'Save image {} to disk...'.format( image_count ) )
cv2.imwrite( 'camera-{:0>2}.png'.format(image_count), camera.image )
# Frames per second counter
fps_buffer.pop()
fps_buffer.appendleft( time.clock() - time_start )
fps_counter = 10.0 / sum( fps_buffer )
counter += 1
if counter == 20 :
print( '{:.2f} FPS'.format( fps_counter ) )
counter = 0
# Cleanup OpenCV
cv2.destroyAllWindows()
# Stop image acquisition
print( 'Stop acquisition...' )
camera.CaptureStop()
# Camera disconnection
print( 'Camera disconnection...' )
camera.Disconnect()
# Vimba shutdown
print( 'Vimba shutdown...' )
vimba.Shutdown()
|
|
03a4dde5c9abebec27ac4ce38b2bfefd5c661274
|
classify_body_parts.py
|
classify_body_parts.py
|
"""Find known body parts in sentences with predicted label 'Lichaamsdeel'.
Extended body parts are saved to new text files.
Usage: python classify_body_parts.py <json file with body part mapping> <dir
with input texts> <dir for output texts>
"""
import os
import codecs
import argparse
import json
from count_labels import load_data
def extend_body_part(text_id, file_name, out_file, word2cat):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
num_body_parts = 0
num_added = 0
with codecs.open(out_file, 'wb', 'utf-8') as f:
for i, labelset in enumerate(Y):
ls = labelset
if 'Lichaamsdeel' in labelset:
num_body_parts += 1
words = X_data[i].split()
added = False
for word in words:
w = word.lower()
if w in word2cat.keys():
ls.append(word2cat[w].capitalize())
added = True
if added:
num_added += 1
ls = sorted(list(set(ls)))
f.write(u'{}\t{}\n'.format(X_data[i].decode('utf-8'),
'_'.join(ls)))
print '{}\t{}\t{}'.format(text_id, num_body_parts, num_added)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='json file containing the body part '
'mapping.')
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
parser.add_argument('output_dir', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
file_name = args.file
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
mapping = {}
# read body part mapping
with codecs.open(file_name, 'rb', 'utf8') as f:
mapping = json.load(f, encoding='utf8')
# reverse body part mapping
word2cat = {}
for la, ws in mapping.iteritems():
for word in ws:
if not word2cat.get(word):
word2cat[word] = la
else:
print 'ignored: {} ({})'.format(word, la)
print 'Text\t#Body parts\t#replaced'
# process texts
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
text_id = text_file.replace('.txt', '')
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
extend_body_part(text_id, in_file, out_file, word2cat)
|
Add script to expand body parts
|
Add script to expand body parts
The script looks for words indicating body parts in sentences with
predicted label Lichaamsdeel and adds the appropriate body part terms to
the labels per sentence. The results are written to text files in a new
directory.
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to expand body parts
The script looks for words indicating body parts in sentences with
predicted label Lichaamsdeel and adds the appropriate body part terms to
the labels per sentence. The results are written to text files in a new
directory.
|
"""Find known body parts in sentences with predicted label 'Lichaamsdeel'.
Extended body parts are saved to new text files.
Usage: python classify_body_parts.py <json file with body part mapping> <dir
with input texts> <dir for output texts>
"""
import os
import codecs
import argparse
import json
from count_labels import load_data
def extend_body_part(text_id, file_name, out_file, word2cat):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
num_body_parts = 0
num_added = 0
with codecs.open(out_file, 'wb', 'utf-8') as f:
for i, labelset in enumerate(Y):
ls = labelset
if 'Lichaamsdeel' in labelset:
num_body_parts += 1
words = X_data[i].split()
added = False
for word in words:
w = word.lower()
if w in word2cat.keys():
ls.append(word2cat[w].capitalize())
added = True
if added:
num_added += 1
ls = sorted(list(set(ls)))
f.write(u'{}\t{}\n'.format(X_data[i].decode('utf-8'),
'_'.join(ls)))
print '{}\t{}\t{}'.format(text_id, num_body_parts, num_added)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='json file containing the body part '
'mapping.')
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
parser.add_argument('output_dir', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
file_name = args.file
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
mapping = {}
# read body part mapping
with codecs.open(file_name, 'rb', 'utf8') as f:
mapping = json.load(f, encoding='utf8')
# reverse body part mapping
word2cat = {}
for la, ws in mapping.iteritems():
for word in ws:
if not word2cat.get(word):
word2cat[word] = la
else:
print 'ignored: {} ({})'.format(word, la)
print 'Text\t#Body parts\t#replaced'
# process texts
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
text_id = text_file.replace('.txt', '')
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
extend_body_part(text_id, in_file, out_file, word2cat)
|
<commit_before><commit_msg>Add script to expand body parts
The script looks for words indicating body parts in sentences with
predicted label Lichaamsdeel and adds the appropriate body part terms to
the labels per sentence. The results are written to text files in a new
directory.<commit_after>
|
"""Find known body parts in sentences with predicted label 'Lichaamsdeel'.
Extended body parts are saved to new text files.
Usage: python classify_body_parts.py <json file with body part mapping> <dir
with input texts> <dir for output texts>
"""
import os
import codecs
import argparse
import json
from count_labels import load_data
def extend_body_part(text_id, file_name, out_file, word2cat):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
num_body_parts = 0
num_added = 0
with codecs.open(out_file, 'wb', 'utf-8') as f:
for i, labelset in enumerate(Y):
ls = labelset
if 'Lichaamsdeel' in labelset:
num_body_parts += 1
words = X_data[i].split()
added = False
for word in words:
w = word.lower()
if w in word2cat.keys():
ls.append(word2cat[w].capitalize())
added = True
if added:
num_added += 1
ls = sorted(list(set(ls)))
f.write(u'{}\t{}\n'.format(X_data[i].decode('utf-8'),
'_'.join(ls)))
print '{}\t{}\t{}'.format(text_id, num_body_parts, num_added)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='json file containing the body part '
'mapping.')
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
parser.add_argument('output_dir', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
file_name = args.file
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
mapping = {}
# read body part mapping
with codecs.open(file_name, 'rb', 'utf8') as f:
mapping = json.load(f, encoding='utf8')
# reverse body part mapping
word2cat = {}
for la, ws in mapping.iteritems():
for word in ws:
if not word2cat.get(word):
word2cat[word] = la
else:
print 'ignored: {} ({})'.format(word, la)
print 'Text\t#Body parts\t#replaced'
# process texts
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
text_id = text_file.replace('.txt', '')
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
extend_body_part(text_id, in_file, out_file, word2cat)
|
Add script to expand body parts
The script looks for words indicating body parts in sentences with
predicted label Lichaamsdeel and adds the appropriate body part terms to
the labels per sentence. The results are written to text files in a new
directory."""Find known body parts in sentences with predicted label 'Lichaamsdeel'.
Extended body parts are saved to new text files.
Usage: python classify_body_parts.py <json file with body part mapping> <dir
with input texts> <dir for output texts>
"""
import os
import codecs
import argparse
import json
from count_labels import load_data
def extend_body_part(text_id, file_name, out_file, word2cat):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
num_body_parts = 0
num_added = 0
with codecs.open(out_file, 'wb', 'utf-8') as f:
for i, labelset in enumerate(Y):
ls = labelset
if 'Lichaamsdeel' in labelset:
num_body_parts += 1
words = X_data[i].split()
added = False
for word in words:
w = word.lower()
if w in word2cat.keys():
ls.append(word2cat[w].capitalize())
added = True
if added:
num_added += 1
ls = sorted(list(set(ls)))
f.write(u'{}\t{}\n'.format(X_data[i].decode('utf-8'),
'_'.join(ls)))
print '{}\t{}\t{}'.format(text_id, num_body_parts, num_added)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='json file containing the body part '
'mapping.')
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
parser.add_argument('output_dir', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
file_name = args.file
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
mapping = {}
# read body part mapping
with codecs.open(file_name, 'rb', 'utf8') as f:
mapping = json.load(f, encoding='utf8')
# reverse body part mapping
word2cat = {}
for la, ws in mapping.iteritems():
for word in ws:
if not word2cat.get(word):
word2cat[word] = la
else:
print 'ignored: {} ({})'.format(word, la)
print 'Text\t#Body parts\t#replaced'
# process texts
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
text_id = text_file.replace('.txt', '')
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
extend_body_part(text_id, in_file, out_file, word2cat)
|
<commit_before><commit_msg>Add script to expand body parts
The script looks for words indicating body parts in sentences with
predicted label Lichaamsdeel and adds the appropriate body part terms to
the labels per sentence. The results are written to text files in a new
directory.<commit_after>"""Find known body parts in sentences with predicted label 'Lichaamsdeel'.
Extended body parts are saved to new text files.
Usage: python classify_body_parts.py <json file with body part mapping> <dir
with input texts> <dir for output texts>
"""
import os
import codecs
import argparse
import json
from count_labels import load_data
def extend_body_part(text_id, file_name, out_file, word2cat):
# load data set
X_data, Y_data = load_data(file_name)
Y = [s.split('_') for s in Y_data]
num_body_parts = 0
num_added = 0
with codecs.open(out_file, 'wb', 'utf-8') as f:
for i, labelset in enumerate(Y):
ls = labelset
if 'Lichaamsdeel' in labelset:
num_body_parts += 1
words = X_data[i].split()
added = False
for word in words:
w = word.lower()
if w in word2cat.keys():
ls.append(word2cat[w].capitalize())
added = True
if added:
num_added += 1
ls = sorted(list(set(ls)))
f.write(u'{}\t{}\n'.format(X_data[i].decode('utf-8'),
'_'.join(ls)))
print '{}\t{}\t{}'.format(text_id, num_body_parts, num_added)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', help='json file containing the body part '
'mapping.')
parser.add_argument('input_dir', help='the directory where the input text '
'files can be found.')
parser.add_argument('output_dir', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
file_name = args.file
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
mapping = {}
# read body part mapping
with codecs.open(file_name, 'rb', 'utf8') as f:
mapping = json.load(f, encoding='utf8')
# reverse body part mapping
word2cat = {}
for la, ws in mapping.iteritems():
for word in ws:
if not word2cat.get(word):
word2cat[word] = la
else:
print 'ignored: {} ({})'.format(word, la)
print 'Text\t#Body parts\t#replaced'
# process texts
text_files = [t for t in os.listdir(input_dir) if t.endswith('.txt')]
for text_file in text_files:
text_id = text_file.replace('.txt', '')
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
extend_body_part(text_id, in_file, out_file, word2cat)
|
|
fca3c0d7ce33dde19191f6c193344ee888b9c97a
|
geotrek/land/tests/test_models.py
|
geotrek/land/tests/test_models.py
|
from django.test import TestCase
from geotrek.land.models import LandType, PhysicalType
class TestModelLand(TestCase):
def test_physicaltype_value_no_structure(self):
pt = PhysicalType.objects.create(name='PhysicalType_1', structure=None)
self.assertEqual(str(pt), 'PhysicalType_1')
def test_physicaltype_value_default_structure(self):
pt = PhysicalType.objects.create(name='PhysicalType_1')
self.assertEqual(str(pt), 'PhysicalType_1 (%s)' % pt.structure)
def test_landtype_value_no_structure(self):
pt = LandType.objects.create(name='LandType_1', structure=None)
self.assertEqual(str(pt), 'LandType_1')
def test_landtype_value_default_structure(self):
pt = LandType.objects.create(name='LandType_1')
self.assertEqual(str(pt), 'LandType_1 (%s)' % pt.structure)
|
Add tests model land values
|
Add tests model land values
|
Python
|
bsd-2-clause
|
makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek
|
Add tests model land values
|
from django.test import TestCase
from geotrek.land.models import LandType, PhysicalType
class TestModelLand(TestCase):
def test_physicaltype_value_no_structure(self):
pt = PhysicalType.objects.create(name='PhysicalType_1', structure=None)
self.assertEqual(str(pt), 'PhysicalType_1')
def test_physicaltype_value_default_structure(self):
pt = PhysicalType.objects.create(name='PhysicalType_1')
self.assertEqual(str(pt), 'PhysicalType_1 (%s)' % pt.structure)
def test_landtype_value_no_structure(self):
pt = LandType.objects.create(name='LandType_1', structure=None)
self.assertEqual(str(pt), 'LandType_1')
def test_landtype_value_default_structure(self):
pt = LandType.objects.create(name='LandType_1')
self.assertEqual(str(pt), 'LandType_1 (%s)' % pt.structure)
|
<commit_before><commit_msg>Add tests model land values<commit_after>
|
from django.test import TestCase
from geotrek.land.models import LandType, PhysicalType
class TestModelLand(TestCase):
def test_physicaltype_value_no_structure(self):
pt = PhysicalType.objects.create(name='PhysicalType_1', structure=None)
self.assertEqual(str(pt), 'PhysicalType_1')
def test_physicaltype_value_default_structure(self):
pt = PhysicalType.objects.create(name='PhysicalType_1')
self.assertEqual(str(pt), 'PhysicalType_1 (%s)' % pt.structure)
def test_landtype_value_no_structure(self):
pt = LandType.objects.create(name='LandType_1', structure=None)
self.assertEqual(str(pt), 'LandType_1')
def test_landtype_value_default_structure(self):
pt = LandType.objects.create(name='LandType_1')
self.assertEqual(str(pt), 'LandType_1 (%s)' % pt.structure)
|
Add tests model land valuesfrom django.test import TestCase
from geotrek.land.models import LandType, PhysicalType
class TestModelLand(TestCase):
def test_physicaltype_value_no_structure(self):
pt = PhysicalType.objects.create(name='PhysicalType_1', structure=None)
self.assertEqual(str(pt), 'PhysicalType_1')
def test_physicaltype_value_default_structure(self):
pt = PhysicalType.objects.create(name='PhysicalType_1')
self.assertEqual(str(pt), 'PhysicalType_1 (%s)' % pt.structure)
def test_landtype_value_no_structure(self):
pt = LandType.objects.create(name='LandType_1', structure=None)
self.assertEqual(str(pt), 'LandType_1')
def test_landtype_value_default_structure(self):
pt = LandType.objects.create(name='LandType_1')
self.assertEqual(str(pt), 'LandType_1 (%s)' % pt.structure)
|
<commit_before><commit_msg>Add tests model land values<commit_after>from django.test import TestCase
from geotrek.land.models import LandType, PhysicalType
class TestModelLand(TestCase):
def test_physicaltype_value_no_structure(self):
pt = PhysicalType.objects.create(name='PhysicalType_1', structure=None)
self.assertEqual(str(pt), 'PhysicalType_1')
def test_physicaltype_value_default_structure(self):
pt = PhysicalType.objects.create(name='PhysicalType_1')
self.assertEqual(str(pt), 'PhysicalType_1 (%s)' % pt.structure)
def test_landtype_value_no_structure(self):
pt = LandType.objects.create(name='LandType_1', structure=None)
self.assertEqual(str(pt), 'LandType_1')
def test_landtype_value_default_structure(self):
pt = LandType.objects.create(name='LandType_1')
self.assertEqual(str(pt), 'LandType_1 (%s)' % pt.structure)
|
|
f18e08afb49724c01d84e36779eb6662631c5485
|
hoomd/md/pytest/test_integrate.py
|
hoomd/md/pytest/test_integrate.py
|
import pytest
import hoomd
import hoomd.md as md
def make_simulation(simulation_factory, two_particle_snapshot_factory):
def sim_factory(particle_types=['A'], dimensions=3, d=1, L=20):
return simulation_factory(
two_particle_snapshot_factory(particle_types, dimensions, d, L))
return sim_factory
@pytest.fixture
def integrator_elements():
nlist = md.nlist.Cell()
lj = md.pair.LJ(nlist=nlist, default_r_cut=2.5)
gauss = md.pair.Gauss(nlist, default_r_cut=3.0)
lj.params[("A", "A")] = {"epsilon": 1.0, "sigma": 1.0}
gauss.params[("A", "A")] = {"epsilon": 1.0, "sigma": 1.0}
return {
"methods": [md.methods.NVE(hoomd.filter.All())],
"forces": [lj, gauss],
"constraints": [md.constrain.Distance()]
}
def test_attaching(make_simulation, integrator_elements):
sim = make_simulation()
integrator = hoomd.md.Integrator(0.005, **integrator_elements)
sim.operations.integrator = integrator
sim.run(0)
assert integrator._attached
assert integrator._forces._synced
assert integrator._methods._synced
assert integrator._contraints._synced
def test_detaching(make_simulation, methods, forces):
sim = make_simulation()
integrator = hoomd.md.Integrator(0.005, **integrator_elements)
sim.operations.integrator = integrator
sim.run(0)
sim.operations._unschedule()
assert not integrator._attached
assert not integrator._forces._synced
assert not integrator._methods._synced
assert not integrator._contraints._synced
|
Add basic integrator attaching/detaching tests
|
Add basic integrator attaching/detaching tests
|
Python
|
bsd-3-clause
|
joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue
|
Add basic integrator attaching/detaching tests
|
import pytest
import hoomd
import hoomd.md as md
def make_simulation(simulation_factory, two_particle_snapshot_factory):
def sim_factory(particle_types=['A'], dimensions=3, d=1, L=20):
return simulation_factory(
two_particle_snapshot_factory(particle_types, dimensions, d, L))
return sim_factory
@pytest.fixture
def integrator_elements():
nlist = md.nlist.Cell()
lj = md.pair.LJ(nlist=nlist, default_r_cut=2.5)
gauss = md.pair.Gauss(nlist, default_r_cut=3.0)
lj.params[("A", "A")] = {"epsilon": 1.0, "sigma": 1.0}
gauss.params[("A", "A")] = {"epsilon": 1.0, "sigma": 1.0}
return {
"methods": [md.methods.NVE(hoomd.filter.All())],
"forces": [lj, gauss],
"constraints": [md.constrain.Distance()]
}
def test_attaching(make_simulation, integrator_elements):
sim = make_simulation()
integrator = hoomd.md.Integrator(0.005, **integrator_elements)
sim.operations.integrator = integrator
sim.run(0)
assert integrator._attached
assert integrator._forces._synced
assert integrator._methods._synced
assert integrator._contraints._synced
def test_detaching(make_simulation, methods, forces):
sim = make_simulation()
integrator = hoomd.md.Integrator(0.005, **integrator_elements)
sim.operations.integrator = integrator
sim.run(0)
sim.operations._unschedule()
assert not integrator._attached
assert not integrator._forces._synced
assert not integrator._methods._synced
assert not integrator._contraints._synced
|
<commit_before><commit_msg>Add basic integrator attaching/detaching tests<commit_after>
|
import pytest
import hoomd
import hoomd.md as md
def make_simulation(simulation_factory, two_particle_snapshot_factory):
def sim_factory(particle_types=['A'], dimensions=3, d=1, L=20):
return simulation_factory(
two_particle_snapshot_factory(particle_types, dimensions, d, L))
return sim_factory
@pytest.fixture
def integrator_elements():
nlist = md.nlist.Cell()
lj = md.pair.LJ(nlist=nlist, default_r_cut=2.5)
gauss = md.pair.Gauss(nlist, default_r_cut=3.0)
lj.params[("A", "A")] = {"epsilon": 1.0, "sigma": 1.0}
gauss.params[("A", "A")] = {"epsilon": 1.0, "sigma": 1.0}
return {
"methods": [md.methods.NVE(hoomd.filter.All())],
"forces": [lj, gauss],
"constraints": [md.constrain.Distance()]
}
def test_attaching(make_simulation, integrator_elements):
sim = make_simulation()
integrator = hoomd.md.Integrator(0.005, **integrator_elements)
sim.operations.integrator = integrator
sim.run(0)
assert integrator._attached
assert integrator._forces._synced
assert integrator._methods._synced
assert integrator._contraints._synced
def test_detaching(make_simulation, methods, forces):
sim = make_simulation()
integrator = hoomd.md.Integrator(0.005, **integrator_elements)
sim.operations.integrator = integrator
sim.run(0)
sim.operations._unschedule()
assert not integrator._attached
assert not integrator._forces._synced
assert not integrator._methods._synced
assert not integrator._contraints._synced
|
Add basic integrator attaching/detaching testsimport pytest
import hoomd
import hoomd.md as md
def make_simulation(simulation_factory, two_particle_snapshot_factory):
def sim_factory(particle_types=['A'], dimensions=3, d=1, L=20):
return simulation_factory(
two_particle_snapshot_factory(particle_types, dimensions, d, L))
return sim_factory
@pytest.fixture
def integrator_elements():
nlist = md.nlist.Cell()
lj = md.pair.LJ(nlist=nlist, default_r_cut=2.5)
gauss = md.pair.Gauss(nlist, default_r_cut=3.0)
lj.params[("A", "A")] = {"epsilon": 1.0, "sigma": 1.0}
gauss.params[("A", "A")] = {"epsilon": 1.0, "sigma": 1.0}
return {
"methods": [md.methods.NVE(hoomd.filter.All())],
"forces": [lj, gauss],
"constraints": [md.constrain.Distance()]
}
def test_attaching(make_simulation, integrator_elements):
sim = make_simulation()
integrator = hoomd.md.Integrator(0.005, **integrator_elements)
sim.operations.integrator = integrator
sim.run(0)
assert integrator._attached
assert integrator._forces._synced
assert integrator._methods._synced
assert integrator._contraints._synced
def test_detaching(make_simulation, methods, forces):
sim = make_simulation()
integrator = hoomd.md.Integrator(0.005, **integrator_elements)
sim.operations.integrator = integrator
sim.run(0)
sim.operations._unschedule()
assert not integrator._attached
assert not integrator._forces._synced
assert not integrator._methods._synced
assert not integrator._contraints._synced
|
<commit_before><commit_msg>Add basic integrator attaching/detaching tests<commit_after>import pytest
import hoomd
import hoomd.md as md
def make_simulation(simulation_factory, two_particle_snapshot_factory):
def sim_factory(particle_types=['A'], dimensions=3, d=1, L=20):
return simulation_factory(
two_particle_snapshot_factory(particle_types, dimensions, d, L))
return sim_factory
@pytest.fixture
def integrator_elements():
nlist = md.nlist.Cell()
lj = md.pair.LJ(nlist=nlist, default_r_cut=2.5)
gauss = md.pair.Gauss(nlist, default_r_cut=3.0)
lj.params[("A", "A")] = {"epsilon": 1.0, "sigma": 1.0}
gauss.params[("A", "A")] = {"epsilon": 1.0, "sigma": 1.0}
return {
"methods": [md.methods.NVE(hoomd.filter.All())],
"forces": [lj, gauss],
"constraints": [md.constrain.Distance()]
}
def test_attaching(make_simulation, integrator_elements):
sim = make_simulation()
integrator = hoomd.md.Integrator(0.005, **integrator_elements)
sim.operations.integrator = integrator
sim.run(0)
assert integrator._attached
assert integrator._forces._synced
assert integrator._methods._synced
assert integrator._contraints._synced
def test_detaching(make_simulation, methods, forces):
sim = make_simulation()
integrator = hoomd.md.Integrator(0.005, **integrator_elements)
sim.operations.integrator = integrator
sim.run(0)
sim.operations._unschedule()
assert not integrator._attached
assert not integrator._forces._synced
assert not integrator._methods._synced
assert not integrator._contraints._synced
|
|
cc358ae9d62e2a0fd6ebca99c410d0c2ce517d87
|
aospy/test/test_timedate.py
|
aospy/test/test_timedate.py
|
#!/usr/bin/env python
"""Test suite for aospy.utils module."""
import sys
import unittest
import numpy as np
import xarray as xr
from datetime import datetime
from aospy.timedate import TimeManager
class AospyTimeManagerTestCase(unittest.TestCase):
def setUp(self):
# self.tm = TimeManager()
pass
def tearDown(self):
pass
class TestTimeManager(AospyTimeManagerTestCase):
def test_to_datetime_bool(self):
self.assertEqual(TimeManager.to_datetime(True), True)
self.assertEqual(TimeManager.to_datetime(False), False)
def test_to_datetime_datetime(self):
self.assertEqual(TimeManager.to_datetime(datetime(2000, 1, 1)),
datetime(2000, 1, 1))
def test_to_datetime_year_only(self):
self.assertEqual(TimeManager.to_datetime(2000), datetime(2000, 1, 1))
def test_to_datetime_str(self):
self.assertEqual(TimeManager.to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_str_to_datetime(self):
self.assertEqual(TimeManager.str_to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_apply_year_offset(self):
self.assertEqual(TimeManager.apply_year_offset(datetime(1678, 1, 1)),
datetime(1678, 1, 1))
self.assertEqual(TimeManager.apply_year_offset(datetime(1, 1, 1)),
datetime(1900, 1, 1))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestTimeManager)
unittest.TextTestRunner(verbosity=2).run(suite)
|
TEST Added tests of TimeManager
|
TEST Added tests of TimeManager
|
Python
|
apache-2.0
|
spencerahill/aospy,spencerkclark/aospy
|
TEST Added tests of TimeManager
|
#!/usr/bin/env python
"""Test suite for aospy.utils module."""
import sys
import unittest
import numpy as np
import xarray as xr
from datetime import datetime
from aospy.timedate import TimeManager
class AospyTimeManagerTestCase(unittest.TestCase):
def setUp(self):
# self.tm = TimeManager()
pass
def tearDown(self):
pass
class TestTimeManager(AospyTimeManagerTestCase):
def test_to_datetime_bool(self):
self.assertEqual(TimeManager.to_datetime(True), True)
self.assertEqual(TimeManager.to_datetime(False), False)
def test_to_datetime_datetime(self):
self.assertEqual(TimeManager.to_datetime(datetime(2000, 1, 1)),
datetime(2000, 1, 1))
def test_to_datetime_year_only(self):
self.assertEqual(TimeManager.to_datetime(2000), datetime(2000, 1, 1))
def test_to_datetime_str(self):
self.assertEqual(TimeManager.to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_str_to_datetime(self):
self.assertEqual(TimeManager.str_to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_apply_year_offset(self):
self.assertEqual(TimeManager.apply_year_offset(datetime(1678, 1, 1)),
datetime(1678, 1, 1))
self.assertEqual(TimeManager.apply_year_offset(datetime(1, 1, 1)),
datetime(1900, 1, 1))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestTimeManager)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>TEST Added tests of TimeManager<commit_after>
|
#!/usr/bin/env python
"""Test suite for aospy.utils module."""
import sys
import unittest
import numpy as np
import xarray as xr
from datetime import datetime
from aospy.timedate import TimeManager
class AospyTimeManagerTestCase(unittest.TestCase):
def setUp(self):
# self.tm = TimeManager()
pass
def tearDown(self):
pass
class TestTimeManager(AospyTimeManagerTestCase):
def test_to_datetime_bool(self):
self.assertEqual(TimeManager.to_datetime(True), True)
self.assertEqual(TimeManager.to_datetime(False), False)
def test_to_datetime_datetime(self):
self.assertEqual(TimeManager.to_datetime(datetime(2000, 1, 1)),
datetime(2000, 1, 1))
def test_to_datetime_year_only(self):
self.assertEqual(TimeManager.to_datetime(2000), datetime(2000, 1, 1))
def test_to_datetime_str(self):
self.assertEqual(TimeManager.to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_str_to_datetime(self):
self.assertEqual(TimeManager.str_to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_apply_year_offset(self):
self.assertEqual(TimeManager.apply_year_offset(datetime(1678, 1, 1)),
datetime(1678, 1, 1))
self.assertEqual(TimeManager.apply_year_offset(datetime(1, 1, 1)),
datetime(1900, 1, 1))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestTimeManager)
unittest.TextTestRunner(verbosity=2).run(suite)
|
TEST Added tests of TimeManager#!/usr/bin/env python
"""Test suite for aospy.utils module."""
import sys
import unittest
import numpy as np
import xarray as xr
from datetime import datetime
from aospy.timedate import TimeManager
class AospyTimeManagerTestCase(unittest.TestCase):
def setUp(self):
# self.tm = TimeManager()
pass
def tearDown(self):
pass
class TestTimeManager(AospyTimeManagerTestCase):
def test_to_datetime_bool(self):
self.assertEqual(TimeManager.to_datetime(True), True)
self.assertEqual(TimeManager.to_datetime(False), False)
def test_to_datetime_datetime(self):
self.assertEqual(TimeManager.to_datetime(datetime(2000, 1, 1)),
datetime(2000, 1, 1))
def test_to_datetime_year_only(self):
self.assertEqual(TimeManager.to_datetime(2000), datetime(2000, 1, 1))
def test_to_datetime_str(self):
self.assertEqual(TimeManager.to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_str_to_datetime(self):
self.assertEqual(TimeManager.str_to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_apply_year_offset(self):
self.assertEqual(TimeManager.apply_year_offset(datetime(1678, 1, 1)),
datetime(1678, 1, 1))
self.assertEqual(TimeManager.apply_year_offset(datetime(1, 1, 1)),
datetime(1900, 1, 1))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestTimeManager)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>TEST Added tests of TimeManager<commit_after>#!/usr/bin/env python
"""Test suite for aospy.utils module."""
import sys
import unittest
import numpy as np
import xarray as xr
from datetime import datetime
from aospy.timedate import TimeManager
class AospyTimeManagerTestCase(unittest.TestCase):
def setUp(self):
# self.tm = TimeManager()
pass
def tearDown(self):
pass
class TestTimeManager(AospyTimeManagerTestCase):
def test_to_datetime_bool(self):
self.assertEqual(TimeManager.to_datetime(True), True)
self.assertEqual(TimeManager.to_datetime(False), False)
def test_to_datetime_datetime(self):
self.assertEqual(TimeManager.to_datetime(datetime(2000, 1, 1)),
datetime(2000, 1, 1))
def test_to_datetime_year_only(self):
self.assertEqual(TimeManager.to_datetime(2000), datetime(2000, 1, 1))
def test_to_datetime_str(self):
self.assertEqual(TimeManager.to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_str_to_datetime(self):
self.assertEqual(TimeManager.str_to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_apply_year_offset(self):
self.assertEqual(TimeManager.apply_year_offset(datetime(1678, 1, 1)),
datetime(1678, 1, 1))
self.assertEqual(TimeManager.apply_year_offset(datetime(1, 1, 1)),
datetime(1900, 1, 1))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestTimeManager)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
ba09a243a37db3ef8b587f92d258a60047a4c2c7
|
python/rds_desired_instance_type-triggered.py
|
python/rds_desired_instance_type-triggered.py
|
#
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
# Ensure all RDS DB Instances are of a Given Type
# Description: Checks that all RDS DB instances are of the type specified
#
# Trigger Type: Change Triggered
# Scope of Changes: RDS::DBInstance
# Required Parameter: DBInstance
# Example Value: db.t2.small
#
# See https://aws.amazon.com/ec2/instance-types/ for more instance types
import boto3
import json
def is_applicable(config_item, event):
status = config_item['configurationItemStatus']
event_left_scope = event['eventLeftScope']
test = ((status in ['OK', 'ResourceDiscovered']) and
event_left_scope == False)
return test
def evaluate_compliance(config_item, rule_parameters):
if (config_item['resourceType'] != 'AWS::RDS::DBInstance'):
return 'NOT_APPLICABLE'
elif (config_item['configuration']['dBInstanceClass'] in
rule_parameters['DBInstance']):
return 'COMPLIANT'
else:
return 'NON_COMPLIANT'
def lambda_handler(event, context):
invoking_event = json.loads(event['invokingEvent'])
rule_parameters = json.loads(event['ruleParameters'])
compliance_value = 'NOT_APPLICABLE'
if is_applicable(invoking_event['configurationItem'], event):
compliance_value = evaluate_compliance(
invoking_event['configurationItem'], rule_parameters)
config = boto3.client('config')
response = config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': invoking_event['configurationItem']['resourceType'],
'ComplianceResourceId': invoking_event['configurationItem']['resourceId'],
'ComplianceType': compliance_value,
'OrderingTimestamp': invoking_event['configurationItem']['configurationItemCaptureTime']
},
],
ResultToken=event['resultToken'])
|
Add RDS instance Type Config Rule.
|
Add RDS instance Type Config Rule.
Requirements: comma separated DBInstance Class named "DBInstance" as Key on Rule parameters.
|
Python
|
cc0-1.0
|
awslabs/aws-config-rules,awslabs/aws-config-rules,awslabs/aws-config-rules
|
Add RDS instance Type Config Rule.
Requirements: comma separated DBInstance Class named "DBInstance" as Key on Rule parameters.
|
#
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
# Ensure all RDS DB Instances are of a Given Type
# Description: Checks that all RDS DB instances are of the type specified
#
# Trigger Type: Change Triggered
# Scope of Changes: RDS::DBInstance
# Required Parameter: DBInstance
# Example Value: db.t2.small
#
# See https://aws.amazon.com/ec2/instance-types/ for more instance types
import boto3
import json
def is_applicable(config_item, event):
status = config_item['configurationItemStatus']
event_left_scope = event['eventLeftScope']
test = ((status in ['OK', 'ResourceDiscovered']) and
event_left_scope == False)
return test
def evaluate_compliance(config_item, rule_parameters):
if (config_item['resourceType'] != 'AWS::RDS::DBInstance'):
return 'NOT_APPLICABLE'
elif (config_item['configuration']['dBInstanceClass'] in
rule_parameters['DBInstance']):
return 'COMPLIANT'
else:
return 'NON_COMPLIANT'
def lambda_handler(event, context):
invoking_event = json.loads(event['invokingEvent'])
rule_parameters = json.loads(event['ruleParameters'])
compliance_value = 'NOT_APPLICABLE'
if is_applicable(invoking_event['configurationItem'], event):
compliance_value = evaluate_compliance(
invoking_event['configurationItem'], rule_parameters)
config = boto3.client('config')
response = config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': invoking_event['configurationItem']['resourceType'],
'ComplianceResourceId': invoking_event['configurationItem']['resourceId'],
'ComplianceType': compliance_value,
'OrderingTimestamp': invoking_event['configurationItem']['configurationItemCaptureTime']
},
],
ResultToken=event['resultToken'])
|
<commit_before><commit_msg>Add RDS instance Type Config Rule.
Requirements: comma separated DBInstance Class named "DBInstance" as Key on Rule parameters.<commit_after>
|
#
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
# Ensure all RDS DB Instances are of a Given Type
# Description: Checks that all RDS DB instances are of the type specified
#
# Trigger Type: Change Triggered
# Scope of Changes: RDS::DBInstance
# Required Parameter: DBInstance
# Example Value: db.t2.small
#
# See https://aws.amazon.com/ec2/instance-types/ for more instance types
import boto3
import json
def is_applicable(config_item, event):
status = config_item['configurationItemStatus']
event_left_scope = event['eventLeftScope']
test = ((status in ['OK', 'ResourceDiscovered']) and
event_left_scope == False)
return test
def evaluate_compliance(config_item, rule_parameters):
if (config_item['resourceType'] != 'AWS::RDS::DBInstance'):
return 'NOT_APPLICABLE'
elif (config_item['configuration']['dBInstanceClass'] in
rule_parameters['DBInstance']):
return 'COMPLIANT'
else:
return 'NON_COMPLIANT'
def lambda_handler(event, context):
invoking_event = json.loads(event['invokingEvent'])
rule_parameters = json.loads(event['ruleParameters'])
compliance_value = 'NOT_APPLICABLE'
if is_applicable(invoking_event['configurationItem'], event):
compliance_value = evaluate_compliance(
invoking_event['configurationItem'], rule_parameters)
config = boto3.client('config')
response = config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': invoking_event['configurationItem']['resourceType'],
'ComplianceResourceId': invoking_event['configurationItem']['resourceId'],
'ComplianceType': compliance_value,
'OrderingTimestamp': invoking_event['configurationItem']['configurationItemCaptureTime']
},
],
ResultToken=event['resultToken'])
|
Add RDS instance Type Config Rule.
Requirements: comma separated DBInstance Class named "DBInstance" as Key on Rule parameters.#
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
# Ensure all RDS DB Instances are of a Given Type
# Description: Checks that all RDS DB instances are of the type specified
#
# Trigger Type: Change Triggered
# Scope of Changes: RDS::DBInstance
# Required Parameter: DBInstance
# Example Value: db.t2.small
#
# See https://aws.amazon.com/ec2/instance-types/ for more instance types
import boto3
import json
def is_applicable(config_item, event):
status = config_item['configurationItemStatus']
event_left_scope = event['eventLeftScope']
test = ((status in ['OK', 'ResourceDiscovered']) and
event_left_scope == False)
return test
def evaluate_compliance(config_item, rule_parameters):
if (config_item['resourceType'] != 'AWS::RDS::DBInstance'):
return 'NOT_APPLICABLE'
elif (config_item['configuration']['dBInstanceClass'] in
rule_parameters['DBInstance']):
return 'COMPLIANT'
else:
return 'NON_COMPLIANT'
def lambda_handler(event, context):
invoking_event = json.loads(event['invokingEvent'])
rule_parameters = json.loads(event['ruleParameters'])
compliance_value = 'NOT_APPLICABLE'
if is_applicable(invoking_event['configurationItem'], event):
compliance_value = evaluate_compliance(
invoking_event['configurationItem'], rule_parameters)
config = boto3.client('config')
response = config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': invoking_event['configurationItem']['resourceType'],
'ComplianceResourceId': invoking_event['configurationItem']['resourceId'],
'ComplianceType': compliance_value,
'OrderingTimestamp': invoking_event['configurationItem']['configurationItemCaptureTime']
},
],
ResultToken=event['resultToken'])
|
<commit_before><commit_msg>Add RDS instance Type Config Rule.
Requirements: comma separated DBInstance Class named "DBInstance" as Key on Rule parameters.<commit_after>#
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
# Ensure all RDS DB Instances are of a Given Type
# Description: Checks that all RDS DB instances are of the type specified
#
# Trigger Type: Change Triggered
# Scope of Changes: RDS::DBInstance
# Required Parameter: DBInstance
# Example Value: db.t2.small
#
# See https://aws.amazon.com/ec2/instance-types/ for more instance types
import boto3
import json
def is_applicable(config_item, event):
status = config_item['configurationItemStatus']
event_left_scope = event['eventLeftScope']
test = ((status in ['OK', 'ResourceDiscovered']) and
event_left_scope == False)
return test
def evaluate_compliance(config_item, rule_parameters):
if (config_item['resourceType'] != 'AWS::RDS::DBInstance'):
return 'NOT_APPLICABLE'
elif (config_item['configuration']['dBInstanceClass'] in
rule_parameters['DBInstance']):
return 'COMPLIANT'
else:
return 'NON_COMPLIANT'
def lambda_handler(event, context):
invoking_event = json.loads(event['invokingEvent'])
rule_parameters = json.loads(event['ruleParameters'])
compliance_value = 'NOT_APPLICABLE'
if is_applicable(invoking_event['configurationItem'], event):
compliance_value = evaluate_compliance(
invoking_event['configurationItem'], rule_parameters)
config = boto3.client('config')
response = config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': invoking_event['configurationItem']['resourceType'],
'ComplianceResourceId': invoking_event['configurationItem']['resourceId'],
'ComplianceType': compliance_value,
'OrderingTimestamp': invoking_event['configurationItem']['configurationItemCaptureTime']
},
],
ResultToken=event['resultToken'])
|
|
b463737daffac786354bee8c49bd11dbed3a8064
|
examples/arlo-adjustbrightness.py
|
examples/arlo-adjustbrightness.py
|
from Arlo import Arlo
USERNAME = 'user@example.com'
PASSWORD = 'supersecretpassword'
try:
# Instantiating the Arlo object automatically calls Login(), which returns an oAuth token that gets cached.
# Subsequent successful calls to login will update the oAuth token.
arlo = Arlo(USERNAME, PASSWORD)
# At this point you're logged into Arlo.
# Get the list of devices and filter on device type to only get the basestation.
# This will return an array which includes all of the basestation's associated metadata.
basestations = arlo.GetDevices('basestation')
# Get the list of devices and filter on device type to only get the camera.
# This will return an array which includes all of the camera's associated metadata.
cameras = arlo.GetDevices('camera')
# Set camera brightness to 0%.
#arlo.AdjustBrightness(basestations[0], cameras[0] -2)
# Set camera brightness to 25%.
#arlo.AdjustBrightness(basestations[0], cameras[0], -1)
# Set camera brightness to 50%.
arlo.AdjustBrightness(basestations[0], cameras[0], 0)
# Set camera brightness to 75%.
#arlo.AdjustBrightness(basestations[0], cameras[0], 1)
# Set camera brightness to 100%.
#arlo.AdjustBrightness(basestations[0], cameras[0], 2)
except Exception as e:
print(e)
|
Add example script for adjusting live camera feed brightness.
|
Add example script for adjusting live camera feed brightness.
|
Python
|
apache-2.0
|
jeffreydwalter/arlo
|
Add example script for adjusting live camera feed brightness.
|
from Arlo import Arlo
USERNAME = 'user@example.com'
PASSWORD = 'supersecretpassword'
try:
# Instantiating the Arlo object automatically calls Login(), which returns an oAuth token that gets cached.
# Subsequent successful calls to login will update the oAuth token.
arlo = Arlo(USERNAME, PASSWORD)
# At this point you're logged into Arlo.
# Get the list of devices and filter on device type to only get the basestation.
# This will return an array which includes all of the basestation's associated metadata.
basestations = arlo.GetDevices('basestation')
# Get the list of devices and filter on device type to only get the camera.
# This will return an array which includes all of the camera's associated metadata.
cameras = arlo.GetDevices('camera')
# Set camera brightness to 0%.
#arlo.AdjustBrightness(basestations[0], cameras[0] -2)
# Set camera brightness to 25%.
#arlo.AdjustBrightness(basestations[0], cameras[0], -1)
# Set camera brightness to 50%.
arlo.AdjustBrightness(basestations[0], cameras[0], 0)
# Set camera brightness to 75%.
#arlo.AdjustBrightness(basestations[0], cameras[0], 1)
# Set camera brightness to 100%.
#arlo.AdjustBrightness(basestations[0], cameras[0], 2)
except Exception as e:
print(e)
|
<commit_before><commit_msg>Add example script for adjusting live camera feed brightness.<commit_after>
|
from Arlo import Arlo
USERNAME = 'user@example.com'
PASSWORD = 'supersecretpassword'
try:
# Instantiating the Arlo object automatically calls Login(), which returns an oAuth token that gets cached.
# Subsequent successful calls to login will update the oAuth token.
arlo = Arlo(USERNAME, PASSWORD)
# At this point you're logged into Arlo.
# Get the list of devices and filter on device type to only get the basestation.
# This will return an array which includes all of the basestation's associated metadata.
basestations = arlo.GetDevices('basestation')
# Get the list of devices and filter on device type to only get the camera.
# This will return an array which includes all of the camera's associated metadata.
cameras = arlo.GetDevices('camera')
# Set camera brightness to 0%.
#arlo.AdjustBrightness(basestations[0], cameras[0] -2)
# Set camera brightness to 25%.
#arlo.AdjustBrightness(basestations[0], cameras[0], -1)
# Set camera brightness to 50%.
arlo.AdjustBrightness(basestations[0], cameras[0], 0)
# Set camera brightness to 75%.
#arlo.AdjustBrightness(basestations[0], cameras[0], 1)
# Set camera brightness to 100%.
#arlo.AdjustBrightness(basestations[0], cameras[0], 2)
except Exception as e:
print(e)
|
Add example script for adjusting live camera feed brightness.from Arlo import Arlo
USERNAME = 'user@example.com'
PASSWORD = 'supersecretpassword'
try:
# Instantiating the Arlo object automatically calls Login(), which returns an oAuth token that gets cached.
# Subsequent successful calls to login will update the oAuth token.
arlo = Arlo(USERNAME, PASSWORD)
# At this point you're logged into Arlo.
# Get the list of devices and filter on device type to only get the basestation.
# This will return an array which includes all of the basestation's associated metadata.
basestations = arlo.GetDevices('basestation')
# Get the list of devices and filter on device type to only get the camera.
# This will return an array which includes all of the camera's associated metadata.
cameras = arlo.GetDevices('camera')
# Set camera brightness to 0%.
#arlo.AdjustBrightness(basestations[0], cameras[0] -2)
# Set camera brightness to 25%.
#arlo.AdjustBrightness(basestations[0], cameras[0], -1)
# Set camera brightness to 50%.
arlo.AdjustBrightness(basestations[0], cameras[0], 0)
# Set camera brightness to 75%.
#arlo.AdjustBrightness(basestations[0], cameras[0], 1)
# Set camera brightness to 100%.
#arlo.AdjustBrightness(basestations[0], cameras[0], 2)
except Exception as e:
print(e)
|
<commit_before><commit_msg>Add example script for adjusting live camera feed brightness.<commit_after>from Arlo import Arlo
USERNAME = 'user@example.com'
PASSWORD = 'supersecretpassword'
try:
# Instantiating the Arlo object automatically calls Login(), which returns an oAuth token that gets cached.
# Subsequent successful calls to login will update the oAuth token.
arlo = Arlo(USERNAME, PASSWORD)
# At this point you're logged into Arlo.
# Get the list of devices and filter on device type to only get the basestation.
# This will return an array which includes all of the basestation's associated metadata.
basestations = arlo.GetDevices('basestation')
# Get the list of devices and filter on device type to only get the camera.
# This will return an array which includes all of the camera's associated metadata.
cameras = arlo.GetDevices('camera')
# Set camera brightness to 0%.
#arlo.AdjustBrightness(basestations[0], cameras[0] -2)
# Set camera brightness to 25%.
#arlo.AdjustBrightness(basestations[0], cameras[0], -1)
# Set camera brightness to 50%.
arlo.AdjustBrightness(basestations[0], cameras[0], 0)
# Set camera brightness to 75%.
#arlo.AdjustBrightness(basestations[0], cameras[0], 1)
# Set camera brightness to 100%.
#arlo.AdjustBrightness(basestations[0], cameras[0], 2)
except Exception as e:
print(e)
|
|
4084fba3d438da4a4133dfafec985d8dd4191a3a
|
examples/demo/basic/scatter_1d.py
|
examples/demo/basic/scatter_1d.py
|
"""
Scatter plot with auxilliary 1d plots
Shows a scatter plot of a set of random points,
with auxilliary 1d plots of the data.
"""
# Major library imports
from numpy import sort
from numpy.random import random, randint
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 50
x = sort(random(numpts))
y = random(numpts)
# Create a plot data object and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd, use_backbuffer=True, auto_grid=False)
plot.plot_1d(
'index',
type='line_scatter_1d',
orientation='h',
color='lightgrey',
line_style='dot',
)
plot.plot_1d(
'index',
type='scatter_1d',
orientation='h',
marker='plus',
marker_alignment='bottom'
)
plot.plot_1d(
'value',
type='line_scatter_1d',
orientation='v',
color='lightgrey',
line_style='dot',
)
plot.plot_1d(
'value',
type='scatter_1d',
orientation='v',
marker='plus',
marker_alignment='left'
)
plot.plot(("index", "value"),
type="scatter",
marker="square",
index_sort="ascending",
color="orange",
marker_size=3, #randint(1,5, numpts),
bgcolor="white",
use_backbuffer=True)
# Tweak some of the plot properties
plot.title = "1D Scatter Plots"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "1D scatter plots"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
|
Add example showing 1d scatter plots.
|
Add example showing 1d scatter plots.
|
Python
|
bsd-3-clause
|
tommy-u/chaco,tommy-u/chaco,tommy-u/chaco
|
Add example showing 1d scatter plots.
|
"""
Scatter plot with auxilliary 1d plots
Shows a scatter plot of a set of random points,
with auxilliary 1d plots of the data.
"""
# Major library imports
from numpy import sort
from numpy.random import random, randint
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 50
x = sort(random(numpts))
y = random(numpts)
# Create a plot data object and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd, use_backbuffer=True, auto_grid=False)
plot.plot_1d(
'index',
type='line_scatter_1d',
orientation='h',
color='lightgrey',
line_style='dot',
)
plot.plot_1d(
'index',
type='scatter_1d',
orientation='h',
marker='plus',
marker_alignment='bottom'
)
plot.plot_1d(
'value',
type='line_scatter_1d',
orientation='v',
color='lightgrey',
line_style='dot',
)
plot.plot_1d(
'value',
type='scatter_1d',
orientation='v',
marker='plus',
marker_alignment='left'
)
plot.plot(("index", "value"),
type="scatter",
marker="square",
index_sort="ascending",
color="orange",
marker_size=3, #randint(1,5, numpts),
bgcolor="white",
use_backbuffer=True)
# Tweak some of the plot properties
plot.title = "1D Scatter Plots"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "1D scatter plots"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
|
<commit_before><commit_msg>Add example showing 1d scatter plots.<commit_after>
|
"""
Scatter plot with auxilliary 1d plots
Shows a scatter plot of a set of random points,
with auxilliary 1d plots of the data.
"""
# Major library imports
from numpy import sort
from numpy.random import random, randint
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 50
x = sort(random(numpts))
y = random(numpts)
# Create a plot data object and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd, use_backbuffer=True, auto_grid=False)
plot.plot_1d(
'index',
type='line_scatter_1d',
orientation='h',
color='lightgrey',
line_style='dot',
)
plot.plot_1d(
'index',
type='scatter_1d',
orientation='h',
marker='plus',
marker_alignment='bottom'
)
plot.plot_1d(
'value',
type='line_scatter_1d',
orientation='v',
color='lightgrey',
line_style='dot',
)
plot.plot_1d(
'value',
type='scatter_1d',
orientation='v',
marker='plus',
marker_alignment='left'
)
plot.plot(("index", "value"),
type="scatter",
marker="square",
index_sort="ascending",
color="orange",
marker_size=3, #randint(1,5, numpts),
bgcolor="white",
use_backbuffer=True)
# Tweak some of the plot properties
plot.title = "1D Scatter Plots"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "1D scatter plots"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
|
Add example showing 1d scatter plots."""
Scatter plot with auxilliary 1d plots
Shows a scatter plot of a set of random points,
with auxilliary 1d plots of the data.
"""
# Major library imports
from numpy import sort
from numpy.random import random, randint
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 50
x = sort(random(numpts))
y = random(numpts)
# Create a plot data object and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd, use_backbuffer=True, auto_grid=False)
plot.plot_1d(
'index',
type='line_scatter_1d',
orientation='h',
color='lightgrey',
line_style='dot',
)
plot.plot_1d(
'index',
type='scatter_1d',
orientation='h',
marker='plus',
marker_alignment='bottom'
)
plot.plot_1d(
'value',
type='line_scatter_1d',
orientation='v',
color='lightgrey',
line_style='dot',
)
plot.plot_1d(
'value',
type='scatter_1d',
orientation='v',
marker='plus',
marker_alignment='left'
)
plot.plot(("index", "value"),
type="scatter",
marker="square",
index_sort="ascending",
color="orange",
marker_size=3, #randint(1,5, numpts),
bgcolor="white",
use_backbuffer=True)
# Tweak some of the plot properties
plot.title = "1D Scatter Plots"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "1D scatter plots"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
|
<commit_before><commit_msg>Add example showing 1d scatter plots.<commit_after>"""
Scatter plot with auxilliary 1d plots
Shows a scatter plot of a set of random points,
with auxilliary 1d plots of the data.
"""
# Major library imports
from numpy import sort
from numpy.random import random, randint
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 50
x = sort(random(numpts))
y = random(numpts)
# Create a plot data object and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd, use_backbuffer=True, auto_grid=False)
plot.plot_1d(
'index',
type='line_scatter_1d',
orientation='h',
color='lightgrey',
line_style='dot',
)
plot.plot_1d(
'index',
type='scatter_1d',
orientation='h',
marker='plus',
marker_alignment='bottom'
)
plot.plot_1d(
'value',
type='line_scatter_1d',
orientation='v',
color='lightgrey',
line_style='dot',
)
plot.plot_1d(
'value',
type='scatter_1d',
orientation='v',
marker='plus',
marker_alignment='left'
)
plot.plot(("index", "value"),
type="scatter",
marker="square",
index_sort="ascending",
color="orange",
marker_size=3, #randint(1,5, numpts),
bgcolor="white",
use_backbuffer=True)
# Tweak some of the plot properties
plot.title = "1D Scatter Plots"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "1D scatter plots"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
|
|
4b6a9d9a7aa45271d2f5e001f06a2fde115bf677
|
migrations/versions/2d257d93329_remove_booksales_tables.py
|
migrations/versions/2d257d93329_remove_booksales_tables.py
|
"""Remove booksales tables
Revision ID: 2d257d93329
Revises: 3ee07cd6b53
Create Date: 2016-04-09 13:04:55.562530
"""
# revision identifiers, used by Alembic.
revision = '2d257d93329'
down_revision = '3ee07cd6b53'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('booksales_sale_group')
op.drop_table('booksales_book')
op.drop_table('booksales_sale')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('booksales_sale',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('created', mysql.DATETIME(), nullable=True),
sa.Column('modified', mysql.DATETIME(), nullable=True),
sa.Column('student_number', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('price', mysql.FLOAT(), nullable=True),
sa.Column('payment', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_table('booksales_book',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('created', mysql.DATETIME(), nullable=True),
sa.Column('modified', mysql.DATETIME(), nullable=True),
sa.Column('title', mysql.TEXT(), nullable=True),
sa.Column('price', mysql.FLOAT(), nullable=True),
sa.Column('isbn', mysql.TEXT(), nullable=True),
sa.Column('stock', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_table('booksales_sale_group',
sa.Column('book_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('sale_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['booksales_book.id'], name='booksales_sale_group_ibfk_1'),
sa.ForeignKeyConstraint(['sale_id'], ['booksales_sale.id'], name='booksales_sale_group_ibfk_2'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
### end Alembic commands ###
|
Add a migration to remove booksales tables
|
Add a migration to remove booksales tables
|
Python
|
mit
|
viaict/viaduct,viaict/viaduct,viaict/viaduct,viaict/viaduct,viaict/viaduct
|
Add a migration to remove booksales tables
|
"""Remove booksales tables
Revision ID: 2d257d93329
Revises: 3ee07cd6b53
Create Date: 2016-04-09 13:04:55.562530
"""
# revision identifiers, used by Alembic.
revision = '2d257d93329'
down_revision = '3ee07cd6b53'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('booksales_sale_group')
op.drop_table('booksales_book')
op.drop_table('booksales_sale')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('booksales_sale',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('created', mysql.DATETIME(), nullable=True),
sa.Column('modified', mysql.DATETIME(), nullable=True),
sa.Column('student_number', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('price', mysql.FLOAT(), nullable=True),
sa.Column('payment', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_table('booksales_book',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('created', mysql.DATETIME(), nullable=True),
sa.Column('modified', mysql.DATETIME(), nullable=True),
sa.Column('title', mysql.TEXT(), nullable=True),
sa.Column('price', mysql.FLOAT(), nullable=True),
sa.Column('isbn', mysql.TEXT(), nullable=True),
sa.Column('stock', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_table('booksales_sale_group',
sa.Column('book_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('sale_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['booksales_book.id'], name='booksales_sale_group_ibfk_1'),
sa.ForeignKeyConstraint(['sale_id'], ['booksales_sale.id'], name='booksales_sale_group_ibfk_2'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
### end Alembic commands ###
|
<commit_before><commit_msg>Add a migration to remove booksales tables<commit_after>
|
"""Remove booksales tables
Revision ID: 2d257d93329
Revises: 3ee07cd6b53
Create Date: 2016-04-09 13:04:55.562530
"""
# revision identifiers, used by Alembic.
revision = '2d257d93329'
down_revision = '3ee07cd6b53'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('booksales_sale_group')
op.drop_table('booksales_book')
op.drop_table('booksales_sale')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('booksales_sale',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('created', mysql.DATETIME(), nullable=True),
sa.Column('modified', mysql.DATETIME(), nullable=True),
sa.Column('student_number', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('price', mysql.FLOAT(), nullable=True),
sa.Column('payment', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_table('booksales_book',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('created', mysql.DATETIME(), nullable=True),
sa.Column('modified', mysql.DATETIME(), nullable=True),
sa.Column('title', mysql.TEXT(), nullable=True),
sa.Column('price', mysql.FLOAT(), nullable=True),
sa.Column('isbn', mysql.TEXT(), nullable=True),
sa.Column('stock', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_table('booksales_sale_group',
sa.Column('book_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('sale_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['booksales_book.id'], name='booksales_sale_group_ibfk_1'),
sa.ForeignKeyConstraint(['sale_id'], ['booksales_sale.id'], name='booksales_sale_group_ibfk_2'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
### end Alembic commands ###
|
Add a migration to remove booksales tables"""Remove booksales tables
Revision ID: 2d257d93329
Revises: 3ee07cd6b53
Create Date: 2016-04-09 13:04:55.562530
"""
# revision identifiers, used by Alembic.
revision = '2d257d93329'
down_revision = '3ee07cd6b53'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('booksales_sale_group')
op.drop_table('booksales_book')
op.drop_table('booksales_sale')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('booksales_sale',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('created', mysql.DATETIME(), nullable=True),
sa.Column('modified', mysql.DATETIME(), nullable=True),
sa.Column('student_number', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('price', mysql.FLOAT(), nullable=True),
sa.Column('payment', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_table('booksales_book',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('created', mysql.DATETIME(), nullable=True),
sa.Column('modified', mysql.DATETIME(), nullable=True),
sa.Column('title', mysql.TEXT(), nullable=True),
sa.Column('price', mysql.FLOAT(), nullable=True),
sa.Column('isbn', mysql.TEXT(), nullable=True),
sa.Column('stock', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_table('booksales_sale_group',
sa.Column('book_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('sale_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['booksales_book.id'], name='booksales_sale_group_ibfk_1'),
sa.ForeignKeyConstraint(['sale_id'], ['booksales_sale.id'], name='booksales_sale_group_ibfk_2'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
### end Alembic commands ###
|
<commit_before><commit_msg>Add a migration to remove booksales tables<commit_after>"""Remove booksales tables
Revision ID: 2d257d93329
Revises: 3ee07cd6b53
Create Date: 2016-04-09 13:04:55.562530
"""
# revision identifiers, used by Alembic.
revision = '2d257d93329'
down_revision = '3ee07cd6b53'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('booksales_sale_group')
op.drop_table('booksales_book')
op.drop_table('booksales_sale')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('booksales_sale',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('created', mysql.DATETIME(), nullable=True),
sa.Column('modified', mysql.DATETIME(), nullable=True),
sa.Column('student_number', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('price', mysql.FLOAT(), nullable=True),
sa.Column('payment', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_table('booksales_book',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('created', mysql.DATETIME(), nullable=True),
sa.Column('modified', mysql.DATETIME(), nullable=True),
sa.Column('title', mysql.TEXT(), nullable=True),
sa.Column('price', mysql.FLOAT(), nullable=True),
sa.Column('isbn', mysql.TEXT(), nullable=True),
sa.Column('stock', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
op.create_table('booksales_sale_group',
sa.Column('book_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('sale_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['booksales_book.id'], name='booksales_sale_group_ibfk_1'),
sa.ForeignKeyConstraint(['sale_id'], ['booksales_sale.id'], name='booksales_sale_group_ibfk_2'),
mysql_default_charset='latin1',
mysql_engine='InnoDB'
)
### end Alembic commands ###
|
|
c0f410237bf97585de1e42a37e54e30775c40d33
|
installscanners/installscanners.py
|
installscanners/installscanners.py
|
import subprocess
SCANNER_INSTALLATION_INSTRUCTIONS = {
"safesql":"github.com/stripe/safesql",
"gas":"github.com/GoASTScanner/gas"
}
GO_CMD = "go"
class InstallGOScanners:
'''Install the scanners as needed
'''
def install_scanner(self, scannerName):
try:
scanner_installation_message = subprocess.check_output([GO_CMD,
"get", SCANNER_INSTALLATION_INSTRUCTIONS[scannerName]])
print(scanner_installation_message.decode("utf-8"))
print("\nINFO: {0} installed successfully!".format(scannerName))
return 1
except subprocess.CalledProcessError as err:
print("\n\nERROR: {0} installation failed with error {1}".format(scannerName, str(err)))
return 0
|
Install scanners that are not installed based on user selection
|
Install scanners that are not installed based on user selection
|
Python
|
mit
|
gaurabb/pygosec
|
Install scanners that are not installed based on user selection
|
import subprocess
SCANNER_INSTALLATION_INSTRUCTIONS = {
"safesql":"github.com/stripe/safesql",
"gas":"github.com/GoASTScanner/gas"
}
GO_CMD = "go"
class InstallGOScanners:
'''Install the scanners as needed
'''
def install_scanner(self, scannerName):
try:
scanner_installation_message = subprocess.check_output([GO_CMD,
"get", SCANNER_INSTALLATION_INSTRUCTIONS[scannerName]])
print(scanner_installation_message.decode("utf-8"))
print("\nINFO: {0} installed successfully!".format(scannerName))
return 1
except subprocess.CalledProcessError as err:
print("\n\nERROR: {0} installation failed with error {1}".format(scannerName, str(err)))
return 0
|
<commit_before><commit_msg>Install scanners that are not installed based on user selection<commit_after>
|
import subprocess
SCANNER_INSTALLATION_INSTRUCTIONS = {
"safesql":"github.com/stripe/safesql",
"gas":"github.com/GoASTScanner/gas"
}
GO_CMD = "go"
class InstallGOScanners:
'''Install the scanners as needed
'''
def install_scanner(self, scannerName):
try:
scanner_installation_message = subprocess.check_output([GO_CMD,
"get", SCANNER_INSTALLATION_INSTRUCTIONS[scannerName]])
print(scanner_installation_message.decode("utf-8"))
print("\nINFO: {0} installed successfully!".format(scannerName))
return 1
except subprocess.CalledProcessError as err:
print("\n\nERROR: {0} installation failed with error {1}".format(scannerName, str(err)))
return 0
|
Install scanners that are not installed based on user selection
import subprocess
SCANNER_INSTALLATION_INSTRUCTIONS = {
"safesql":"github.com/stripe/safesql",
"gas":"github.com/GoASTScanner/gas"
}
GO_CMD = "go"
class InstallGOScanners:
'''Install the scanners as needed
'''
def install_scanner(self, scannerName):
try:
scanner_installation_message = subprocess.check_output([GO_CMD,
"get", SCANNER_INSTALLATION_INSTRUCTIONS[scannerName]])
print(scanner_installation_message.decode("utf-8"))
print("\nINFO: {0} installed successfully!".format(scannerName))
return 1
except subprocess.CalledProcessError as err:
print("\n\nERROR: {0} installation failed with error {1}".format(scannerName, str(err)))
return 0
|
<commit_before><commit_msg>Install scanners that are not installed based on user selection<commit_after>
import subprocess
SCANNER_INSTALLATION_INSTRUCTIONS = {
"safesql":"github.com/stripe/safesql",
"gas":"github.com/GoASTScanner/gas"
}
GO_CMD = "go"
class InstallGOScanners:
'''Install the scanners as needed
'''
def install_scanner(self, scannerName):
try:
scanner_installation_message = subprocess.check_output([GO_CMD,
"get", SCANNER_INSTALLATION_INSTRUCTIONS[scannerName]])
print(scanner_installation_message.decode("utf-8"))
print("\nINFO: {0} installed successfully!".format(scannerName))
return 1
except subprocess.CalledProcessError as err:
print("\n\nERROR: {0} installation failed with error {1}".format(scannerName, str(err)))
return 0
|
|
002c5c2523a1f5da665a58c5b574d743c2b25218
|
python/testData/inspections/PyStringFormatInspection/IndexElementWithPackedDictLiteralInsideDictLiteralArg.py
|
python/testData/inspections/PyStringFormatInspection/IndexElementWithPackedDictLiteralInsideDictLiteralArg.py
|
# "{d[1]}".format(d={2: a, **{1: b}})
<warning descr="Too few arguments for format string">"{d[3]}"</warning>.format(d={2: a, **{1: b}})
|
"{d[1]}".format(d={2: a, **{1: b}})
<warning descr="Too few arguments for format string">"{d[3]}"</warning>.format(d={2: a, **{1: b}})
|
Fix test: uncomment test commented by mistake
|
Fix test: uncomment test commented by mistake
|
Python
|
apache-2.0
|
vvv1559/intellij-community,allotria/intellij-community,semonte/intellij-community,da1z/intellij-community,suncycheng/intellij-community,asedunov/intellij-community,suncycheng/intellij-community,ThiagoGarciaAlves/intellij-community,ThiagoGarciaAlves/intellij-community,apixandru/intellij-community,FHannes/intellij-community,suncycheng/intellij-community,mglukhikh/intellij-community,allotria/intellij-community,asedunov/intellij-community,xfournet/intellij-community,ThiagoGarciaAlves/intellij-community,xfournet/intellij-community,FHannes/intellij-community,da1z/intellij-community,allotria/intellij-community,FHannes/intellij-community,semonte/intellij-community,ThiagoGarciaAlves/intellij-community,semonte/intellij-community,xfournet/intellij-community,semonte/intellij-community,xfournet/intellij-community,mglukhikh/intellij-community,apixandru/intellij-community,ibinti/intellij-community,xfournet/intellij-community,mglukhikh/intellij-community,allotria/intellij-community,apixandru/intellij-community,vvv1559/intellij-community,ThiagoGarciaAlves/intellij-community,mglukhikh/intellij-community,vvv1559/intellij-community,da1z/intellij-community,da1z/intellij-community,vvv1559/intellij-community,ThiagoGarciaAlves/intellij-community,mglukhikh/intellij-community,ibinti/intellij-community,apixandru/intellij-community,semonte/intellij-community,semonte/intellij-community,asedunov/intellij-community,suncycheng/intellij-community,xfournet/intellij-community,vvv1559/intellij-community,vvv1559/intellij-community,xfournet/intellij-community,mglukhikh/intellij-community,xfournet/intellij-community,da1z/intellij-community,FHannes/intellij-community,apixandru/intellij-community,semonte/intellij-community,semonte/intellij-community,FHannes/intellij-community,ibinti/intellij-community,apixandru/intellij-community,ibinti/intellij-community,da1z/intellij-community,suncycheng/intellij-community,asedunov/intellij-community,allotria/intellij-community,da1z/intellij-community,FHannes/intellij-community,semonte/intellij-community,FHannes/intellij-community,ThiagoGarciaAlves/intellij-community,suncycheng/intellij-community,xfournet/intellij-community,suncycheng/intellij-community,asedunov/intellij-community,ThiagoGarciaAlves/intellij-community,xfournet/intellij-community,semonte/intellij-community,da1z/intellij-community,ibinti/intellij-community,mglukhikh/intellij-community,semonte/intellij-community,da1z/intellij-community,apixandru/intellij-community,apixandru/intellij-community,allotria/intellij-community,apixandru/intellij-community,da1z/intellij-community,suncycheng/intellij-community,apixandru/intellij-community,allotria/intellij-community,mglukhikh/intellij-community,ThiagoGarciaAlves/intellij-community,da1z/intellij-community,mglukhikh/intellij-community,vvv1559/intellij-community,suncycheng/intellij-community,asedunov/intellij-community,FHannes/intellij-community,ibinti/intellij-community,ThiagoGarciaAlves/intellij-community,ThiagoGarciaAlves/intellij-community,allotria/intellij-community,suncycheng/intellij-community,FHannes/intellij-community,apixandru/intellij-community,allotria/intellij-community,vvv1559/intellij-community,da1z/intellij-community,ibinti/intellij-community,suncycheng/intellij-community,apixandru/intellij-community,vvv1559/intellij-community,da1z/intellij-community,FHannes/intellij-community,apixandru/intellij-community,allotria/intellij-community,xfournet/intellij-community,ibinti/intellij-community,suncycheng/intellij-community,asedunov/intellij-community,semonte/intellij-community,FHannes/intellij-community,ibinti/intellij-community,xfournet/intellij-community,asedunov/intellij-community,xfournet/intellij-community,FHannes/intellij-community,asedunov/intellij-community,semonte/intellij-community,FHannes/intellij-community,mglukhikh/intellij-community,vvv1559/intellij-community,asedunov/intellij-community,vvv1559/intellij-community,mglukhikh/intellij-community,allotria/intellij-community,mglukhikh/intellij-community,vvv1559/intellij-community,ibinti/intellij-community,ibinti/intellij-community,allotria/intellij-community,ibinti/intellij-community,asedunov/intellij-community,ibinti/intellij-community,allotria/intellij-community,ThiagoGarciaAlves/intellij-community,mglukhikh/intellij-community,asedunov/intellij-community,apixandru/intellij-community,vvv1559/intellij-community,asedunov/intellij-community
|
# "{d[1]}".format(d={2: a, **{1: b}})
<warning descr="Too few arguments for format string">"{d[3]}"</warning>.format(d={2: a, **{1: b}})Fix test: uncomment test commented by mistake
|
"{d[1]}".format(d={2: a, **{1: b}})
<warning descr="Too few arguments for format string">"{d[3]}"</warning>.format(d={2: a, **{1: b}})
|
<commit_before># "{d[1]}".format(d={2: a, **{1: b}})
<warning descr="Too few arguments for format string">"{d[3]}"</warning>.format(d={2: a, **{1: b}})<commit_msg>Fix test: uncomment test commented by mistake<commit_after>
|
"{d[1]}".format(d={2: a, **{1: b}})
<warning descr="Too few arguments for format string">"{d[3]}"</warning>.format(d={2: a, **{1: b}})
|
# "{d[1]}".format(d={2: a, **{1: b}})
<warning descr="Too few arguments for format string">"{d[3]}"</warning>.format(d={2: a, **{1: b}})Fix test: uncomment test commented by mistake"{d[1]}".format(d={2: a, **{1: b}})
<warning descr="Too few arguments for format string">"{d[3]}"</warning>.format(d={2: a, **{1: b}})
|
<commit_before># "{d[1]}".format(d={2: a, **{1: b}})
<warning descr="Too few arguments for format string">"{d[3]}"</warning>.format(d={2: a, **{1: b}})<commit_msg>Fix test: uncomment test commented by mistake<commit_after>"{d[1]}".format(d={2: a, **{1: b}})
<warning descr="Too few arguments for format string">"{d[3]}"</warning>.format(d={2: a, **{1: b}})
|
d07d19a2d88762d9483dad07a432329759e51e67
|
stack/database.py
|
stack/database.py
|
from troposphere import (
rds,
Ref,
AWS_STACK_NAME,
)
from .template import template
from .vpc import (
container_a_subnet,
container_b_subnet,
)
db_subnet_group = rds.DBSubnetGroup(
"DatabaseSubnetGroup",
template=template,
DBSubnetGroupDescription="Subnets available for the RDS DB Instance",
SubnetIds=[Ref(container_a_subnet), Ref(container_b_subnet)],
)
db_instance = rds.DBInstance(
"PostgreSQL",
template=template,
DBInstanceIdentifier=Ref(AWS_STACK_NAME),
Engine="postgres",
EngineVersion="9.4.5",
MultiAZ=True,
StorageType="gp2",
DBSubnetGroupName=Ref(db_subnet_group),
BackupRetentionPeriod="7",
)
|
Add a multi AZ PostgreSQL `RDS` instance bound to container subnets
|
Add a multi AZ PostgreSQL `RDS` instance bound to container subnets
|
Python
|
mit
|
caktus/aws-web-stacks,tobiasmcnulty/aws-container-basics
|
Add a multi AZ PostgreSQL `RDS` instance bound to container subnets
|
from troposphere import (
rds,
Ref,
AWS_STACK_NAME,
)
from .template import template
from .vpc import (
container_a_subnet,
container_b_subnet,
)
db_subnet_group = rds.DBSubnetGroup(
"DatabaseSubnetGroup",
template=template,
DBSubnetGroupDescription="Subnets available for the RDS DB Instance",
SubnetIds=[Ref(container_a_subnet), Ref(container_b_subnet)],
)
db_instance = rds.DBInstance(
"PostgreSQL",
template=template,
DBInstanceIdentifier=Ref(AWS_STACK_NAME),
Engine="postgres",
EngineVersion="9.4.5",
MultiAZ=True,
StorageType="gp2",
DBSubnetGroupName=Ref(db_subnet_group),
BackupRetentionPeriod="7",
)
|
<commit_before><commit_msg>Add a multi AZ PostgreSQL `RDS` instance bound to container subnets<commit_after>
|
from troposphere import (
rds,
Ref,
AWS_STACK_NAME,
)
from .template import template
from .vpc import (
container_a_subnet,
container_b_subnet,
)
db_subnet_group = rds.DBSubnetGroup(
"DatabaseSubnetGroup",
template=template,
DBSubnetGroupDescription="Subnets available for the RDS DB Instance",
SubnetIds=[Ref(container_a_subnet), Ref(container_b_subnet)],
)
db_instance = rds.DBInstance(
"PostgreSQL",
template=template,
DBInstanceIdentifier=Ref(AWS_STACK_NAME),
Engine="postgres",
EngineVersion="9.4.5",
MultiAZ=True,
StorageType="gp2",
DBSubnetGroupName=Ref(db_subnet_group),
BackupRetentionPeriod="7",
)
|
Add a multi AZ PostgreSQL `RDS` instance bound to container subnetsfrom troposphere import (
rds,
Ref,
AWS_STACK_NAME,
)
from .template import template
from .vpc import (
container_a_subnet,
container_b_subnet,
)
db_subnet_group = rds.DBSubnetGroup(
"DatabaseSubnetGroup",
template=template,
DBSubnetGroupDescription="Subnets available for the RDS DB Instance",
SubnetIds=[Ref(container_a_subnet), Ref(container_b_subnet)],
)
db_instance = rds.DBInstance(
"PostgreSQL",
template=template,
DBInstanceIdentifier=Ref(AWS_STACK_NAME),
Engine="postgres",
EngineVersion="9.4.5",
MultiAZ=True,
StorageType="gp2",
DBSubnetGroupName=Ref(db_subnet_group),
BackupRetentionPeriod="7",
)
|
<commit_before><commit_msg>Add a multi AZ PostgreSQL `RDS` instance bound to container subnets<commit_after>from troposphere import (
rds,
Ref,
AWS_STACK_NAME,
)
from .template import template
from .vpc import (
container_a_subnet,
container_b_subnet,
)
db_subnet_group = rds.DBSubnetGroup(
"DatabaseSubnetGroup",
template=template,
DBSubnetGroupDescription="Subnets available for the RDS DB Instance",
SubnetIds=[Ref(container_a_subnet), Ref(container_b_subnet)],
)
db_instance = rds.DBInstance(
"PostgreSQL",
template=template,
DBInstanceIdentifier=Ref(AWS_STACK_NAME),
Engine="postgres",
EngineVersion="9.4.5",
MultiAZ=True,
StorageType="gp2",
DBSubnetGroupName=Ref(db_subnet_group),
BackupRetentionPeriod="7",
)
|
|
19cdcd64c2172e4ef5877e2f331b03671091c1e2
|
tests/test_connect.py
|
tests/test_connect.py
|
import pytest
from pypuppetdb.errors import UnsupportedVersionError
from pypuppetdb import connect
pytestmark = pytest.mark.unit
def test_connect_unknown_api_version():
with pytest.raises(UnsupportedVersionError):
connect(api_version=1)
|
Add a basic test for connect().
|
tests: Add a basic test for connect().
|
Python
|
apache-2.0
|
voxpupuli/pypuppetdb,puppet-community/pypuppetdb,vicinus/pypuppetdb,dforste/pypuppetdb,amwilson/pypuppetdb,jcastillocano/pypuppetdb,jcastillocano/pypuppetdb,jorik041/pypuppetdb
|
tests: Add a basic test for connect().
|
import pytest
from pypuppetdb.errors import UnsupportedVersionError
from pypuppetdb import connect
pytestmark = pytest.mark.unit
def test_connect_unknown_api_version():
with pytest.raises(UnsupportedVersionError):
connect(api_version=1)
|
<commit_before><commit_msg>tests: Add a basic test for connect().<commit_after>
|
import pytest
from pypuppetdb.errors import UnsupportedVersionError
from pypuppetdb import connect
pytestmark = pytest.mark.unit
def test_connect_unknown_api_version():
with pytest.raises(UnsupportedVersionError):
connect(api_version=1)
|
tests: Add a basic test for connect().import pytest
from pypuppetdb.errors import UnsupportedVersionError
from pypuppetdb import connect
pytestmark = pytest.mark.unit
def test_connect_unknown_api_version():
with pytest.raises(UnsupportedVersionError):
connect(api_version=1)
|
<commit_before><commit_msg>tests: Add a basic test for connect().<commit_after>import pytest
from pypuppetdb.errors import UnsupportedVersionError
from pypuppetdb import connect
pytestmark = pytest.mark.unit
def test_connect_unknown_api_version():
with pytest.raises(UnsupportedVersionError):
connect(api_version=1)
|
|
3cefb683ee51fb9821a7a47013ca18db5429b4a9
|
tests/test_jupyter.py
|
tests/test_jupyter.py
|
import mock
import pytest
try:
import ipywidgets
except ImportError:
ipywidgets = None
@pytest.mark.skipif(ipywidgets is None, reason='ipywidgets is not installed')
class TestExperimentWidget(object):
@pytest.fixture
def exp(self):
from dallinger.experiment import Experiment
return Experiment()
def test_experiment_initializes_widget(self, exp):
assert exp.widget is not None
def test_experiment_updates_widget_status(self, exp):
exp.update_status(u'Testing')
assert exp.widget.status == u'Testing'
assert 'Testing' in exp.widget.children[0].value
def test_experiment_displays_widget(self, exp):
with mock.patch('IPython.display.display') as display:
exp._ipython_display_()
assert display.called_once_with(exp.widget)
def test_widget_children_no_config(self, exp):
assert exp.widget.children[1].children[0].value == 'Not loaded.'
def test_widget_children_with_config(self, active_config, exp):
assert exp.widget.children[1].children[0].value != 'Not loaded.'
|
Add tests for jupyter widget
|
Add tests for jupyter widget
|
Python
|
mit
|
Dallinger/Dallinger,jcpeterson/Dallinger,jcpeterson/Dallinger,jcpeterson/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger
|
Add tests for jupyter widget
|
import mock
import pytest
try:
import ipywidgets
except ImportError:
ipywidgets = None
@pytest.mark.skipif(ipywidgets is None, reason='ipywidgets is not installed')
class TestExperimentWidget(object):
@pytest.fixture
def exp(self):
from dallinger.experiment import Experiment
return Experiment()
def test_experiment_initializes_widget(self, exp):
assert exp.widget is not None
def test_experiment_updates_widget_status(self, exp):
exp.update_status(u'Testing')
assert exp.widget.status == u'Testing'
assert 'Testing' in exp.widget.children[0].value
def test_experiment_displays_widget(self, exp):
with mock.patch('IPython.display.display') as display:
exp._ipython_display_()
assert display.called_once_with(exp.widget)
def test_widget_children_no_config(self, exp):
assert exp.widget.children[1].children[0].value == 'Not loaded.'
def test_widget_children_with_config(self, active_config, exp):
assert exp.widget.children[1].children[0].value != 'Not loaded.'
|
<commit_before><commit_msg>Add tests for jupyter widget<commit_after>
|
import mock
import pytest
try:
import ipywidgets
except ImportError:
ipywidgets = None
@pytest.mark.skipif(ipywidgets is None, reason='ipywidgets is not installed')
class TestExperimentWidget(object):
@pytest.fixture
def exp(self):
from dallinger.experiment import Experiment
return Experiment()
def test_experiment_initializes_widget(self, exp):
assert exp.widget is not None
def test_experiment_updates_widget_status(self, exp):
exp.update_status(u'Testing')
assert exp.widget.status == u'Testing'
assert 'Testing' in exp.widget.children[0].value
def test_experiment_displays_widget(self, exp):
with mock.patch('IPython.display.display') as display:
exp._ipython_display_()
assert display.called_once_with(exp.widget)
def test_widget_children_no_config(self, exp):
assert exp.widget.children[1].children[0].value == 'Not loaded.'
def test_widget_children_with_config(self, active_config, exp):
assert exp.widget.children[1].children[0].value != 'Not loaded.'
|
Add tests for jupyter widgetimport mock
import pytest
try:
import ipywidgets
except ImportError:
ipywidgets = None
@pytest.mark.skipif(ipywidgets is None, reason='ipywidgets is not installed')
class TestExperimentWidget(object):
@pytest.fixture
def exp(self):
from dallinger.experiment import Experiment
return Experiment()
def test_experiment_initializes_widget(self, exp):
assert exp.widget is not None
def test_experiment_updates_widget_status(self, exp):
exp.update_status(u'Testing')
assert exp.widget.status == u'Testing'
assert 'Testing' in exp.widget.children[0].value
def test_experiment_displays_widget(self, exp):
with mock.patch('IPython.display.display') as display:
exp._ipython_display_()
assert display.called_once_with(exp.widget)
def test_widget_children_no_config(self, exp):
assert exp.widget.children[1].children[0].value == 'Not loaded.'
def test_widget_children_with_config(self, active_config, exp):
assert exp.widget.children[1].children[0].value != 'Not loaded.'
|
<commit_before><commit_msg>Add tests for jupyter widget<commit_after>import mock
import pytest
try:
import ipywidgets
except ImportError:
ipywidgets = None
@pytest.mark.skipif(ipywidgets is None, reason='ipywidgets is not installed')
class TestExperimentWidget(object):
@pytest.fixture
def exp(self):
from dallinger.experiment import Experiment
return Experiment()
def test_experiment_initializes_widget(self, exp):
assert exp.widget is not None
def test_experiment_updates_widget_status(self, exp):
exp.update_status(u'Testing')
assert exp.widget.status == u'Testing'
assert 'Testing' in exp.widget.children[0].value
def test_experiment_displays_widget(self, exp):
with mock.patch('IPython.display.display') as display:
exp._ipython_display_()
assert display.called_once_with(exp.widget)
def test_widget_children_no_config(self, exp):
assert exp.widget.children[1].children[0].value == 'Not loaded.'
def test_widget_children_with_config(self, active_config, exp):
assert exp.widget.children[1].children[0].value != 'Not loaded.'
|
|
e6ddd127f906f383fedcdecd9b0eaefb55aba54c
|
tests/conftest.py
|
tests/conftest.py
|
from pytest import Item
def pytest_assertion_pass(item: Item,
lineno: int,
orig: str,
expl: str) -> None:
'''
Log every assertion, in case we later need to audit a run.
Excessive in most cases.
Use with pytest -rP.
'''
# Not bothering with make-style output that you can feed into a Vim
# quickfix list and iterate over.
print('given', item.name + ':' + str(lineno), str(orig)) # no repr()!)
print('actual', item.name + ':' + str(lineno),
# Get rid of full-diff, -vv for full diff, etc.
# TODO: Make work when multiline output.
'\n'.join(str(expl).splitlines()[:-2]))
|
Add optional actual asserted values logging
|
Add optional actual asserted values logging
|
Python
|
isc
|
pilona/RPN,pilona/RPN
|
Add optional actual asserted values logging
|
from pytest import Item
def pytest_assertion_pass(item: Item,
lineno: int,
orig: str,
expl: str) -> None:
'''
Log every assertion, in case we later need to audit a run.
Excessive in most cases.
Use with pytest -rP.
'''
# Not bothering with make-style output that you can feed into a Vim
# quickfix list and iterate over.
print('given', item.name + ':' + str(lineno), str(orig)) # no repr()!)
print('actual', item.name + ':' + str(lineno),
# Get rid of full-diff, -vv for full diff, etc.
# TODO: Make work when multiline output.
'\n'.join(str(expl).splitlines()[:-2]))
|
<commit_before><commit_msg>Add optional actual asserted values logging<commit_after>
|
from pytest import Item
def pytest_assertion_pass(item: Item,
lineno: int,
orig: str,
expl: str) -> None:
'''
Log every assertion, in case we later need to audit a run.
Excessive in most cases.
Use with pytest -rP.
'''
# Not bothering with make-style output that you can feed into a Vim
# quickfix list and iterate over.
print('given', item.name + ':' + str(lineno), str(orig)) # no repr()!)
print('actual', item.name + ':' + str(lineno),
# Get rid of full-diff, -vv for full diff, etc.
# TODO: Make work when multiline output.
'\n'.join(str(expl).splitlines()[:-2]))
|
Add optional actual asserted values loggingfrom pytest import Item
def pytest_assertion_pass(item: Item,
lineno: int,
orig: str,
expl: str) -> None:
'''
Log every assertion, in case we later need to audit a run.
Excessive in most cases.
Use with pytest -rP.
'''
# Not bothering with make-style output that you can feed into a Vim
# quickfix list and iterate over.
print('given', item.name + ':' + str(lineno), str(orig)) # no repr()!)
print('actual', item.name + ':' + str(lineno),
# Get rid of full-diff, -vv for full diff, etc.
# TODO: Make work when multiline output.
'\n'.join(str(expl).splitlines()[:-2]))
|
<commit_before><commit_msg>Add optional actual asserted values logging<commit_after>from pytest import Item
def pytest_assertion_pass(item: Item,
lineno: int,
orig: str,
expl: str) -> None:
'''
Log every assertion, in case we later need to audit a run.
Excessive in most cases.
Use with pytest -rP.
'''
# Not bothering with make-style output that you can feed into a Vim
# quickfix list and iterate over.
print('given', item.name + ':' + str(lineno), str(orig)) # no repr()!)
print('actual', item.name + ':' + str(lineno),
# Get rid of full-diff, -vv for full diff, etc.
# TODO: Make work when multiline output.
'\n'.join(str(expl).splitlines()[:-2]))
|
|
8458efcd6acec59971271d3b0391732022c66412
|
metpy/plots/tests/test_skewt.py
|
metpy/plots/tests/test_skewt.py
|
import tempfile
import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from metpy.plots.skewt import * # noqa
# TODO: Need at some point to do image-based comparison, but that's a lot to
# bite off right now
class TestSkewT(object):
def test_api(self):
'Test the SkewT api'
fig = Figure(figsize=(9, 9))
skew = SkewT(fig)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
p = np.linspace(1000, 100, 10)
t = np.linspace(20, -20, 10)
u = np.linspace(-10, 10, 10)
skew.plot(p, t, 'r')
skew.plot_barbs(p, u, u)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
with tempfile.NamedTemporaryFile() as f:
FigureCanvasAgg(fig).print_png(f.name)
|
Add API test for SkewT.
|
Add API test for SkewT.
This does no checking for the output being correct, but at least checks
that the code doesn't fire an exception on normal use.
|
Python
|
bsd-3-clause
|
Unidata/MetPy,ahaberlie/MetPy,ahill818/MetPy,dopplershift/MetPy,jrleeman/MetPy,ShawnMurd/MetPy,deeplycloudy/MetPy,dopplershift/MetPy,ahaberlie/MetPy,jrleeman/MetPy,Unidata/MetPy
|
Add API test for SkewT.
This does no checking for the output being correct, but at least checks
that the code doesn't fire an exception on normal use.
|
import tempfile
import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from metpy.plots.skewt import * # noqa
# TODO: Need at some point to do image-based comparison, but that's a lot to
# bite off right now
class TestSkewT(object):
def test_api(self):
'Test the SkewT api'
fig = Figure(figsize=(9, 9))
skew = SkewT(fig)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
p = np.linspace(1000, 100, 10)
t = np.linspace(20, -20, 10)
u = np.linspace(-10, 10, 10)
skew.plot(p, t, 'r')
skew.plot_barbs(p, u, u)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
with tempfile.NamedTemporaryFile() as f:
FigureCanvasAgg(fig).print_png(f.name)
|
<commit_before><commit_msg>Add API test for SkewT.
This does no checking for the output being correct, but at least checks
that the code doesn't fire an exception on normal use.<commit_after>
|
import tempfile
import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from metpy.plots.skewt import * # noqa
# TODO: Need at some point to do image-based comparison, but that's a lot to
# bite off right now
class TestSkewT(object):
def test_api(self):
'Test the SkewT api'
fig = Figure(figsize=(9, 9))
skew = SkewT(fig)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
p = np.linspace(1000, 100, 10)
t = np.linspace(20, -20, 10)
u = np.linspace(-10, 10, 10)
skew.plot(p, t, 'r')
skew.plot_barbs(p, u, u)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
with tempfile.NamedTemporaryFile() as f:
FigureCanvasAgg(fig).print_png(f.name)
|
Add API test for SkewT.
This does no checking for the output being correct, but at least checks
that the code doesn't fire an exception on normal use.import tempfile
import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from metpy.plots.skewt import * # noqa
# TODO: Need at some point to do image-based comparison, but that's a lot to
# bite off right now
class TestSkewT(object):
def test_api(self):
'Test the SkewT api'
fig = Figure(figsize=(9, 9))
skew = SkewT(fig)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
p = np.linspace(1000, 100, 10)
t = np.linspace(20, -20, 10)
u = np.linspace(-10, 10, 10)
skew.plot(p, t, 'r')
skew.plot_barbs(p, u, u)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
with tempfile.NamedTemporaryFile() as f:
FigureCanvasAgg(fig).print_png(f.name)
|
<commit_before><commit_msg>Add API test for SkewT.
This does no checking for the output being correct, but at least checks
that the code doesn't fire an exception on normal use.<commit_after>import tempfile
import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from metpy.plots.skewt import * # noqa
# TODO: Need at some point to do image-based comparison, but that's a lot to
# bite off right now
class TestSkewT(object):
def test_api(self):
'Test the SkewT api'
fig = Figure(figsize=(9, 9))
skew = SkewT(fig)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
p = np.linspace(1000, 100, 10)
t = np.linspace(20, -20, 10)
u = np.linspace(-10, 10, 10)
skew.plot(p, t, 'r')
skew.plot_barbs(p, u, u)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
with tempfile.NamedTemporaryFile() as f:
FigureCanvasAgg(fig).print_png(f.name)
|
|
bfcab239099e2e2ad28dde3cab886e0081664868
|
unlock/admin/hello.py
|
unlock/admin/hello.py
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route("/hitmehard")
def ttol():
import pyaudio
import unlock
# yous a do your business here...
return str(dir(unlock))
if __name__ == "__main__":
app.run()
|
Put together a basic flask for Dante. The start of the admin page.
|
Put together a basic flask for Dante. The start of the admin page.
|
Python
|
bsd-3-clause
|
NeuralProsthesisLab/unlock,NeuralProsthesisLab/unlock,NeuralProsthesisLab/unlock,NeuralProsthesisLab/unlock,NeuralProsthesisLab/unlock,NeuralProsthesisLab/unlock,NeuralProsthesisLab/unlock
|
Put together a basic flask for Dante. The start of the admin page.
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route("/hitmehard")
def ttol():
import pyaudio
import unlock
# yous a do your business here...
return str(dir(unlock))
if __name__ == "__main__":
app.run()
|
<commit_before><commit_msg>Put together a basic flask for Dante. The start of the admin page.<commit_after>
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route("/hitmehard")
def ttol():
import pyaudio
import unlock
# yous a do your business here...
return str(dir(unlock))
if __name__ == "__main__":
app.run()
|
Put together a basic flask for Dante. The start of the admin page.from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route("/hitmehard")
def ttol():
import pyaudio
import unlock
# yous a do your business here...
return str(dir(unlock))
if __name__ == "__main__":
app.run()
|
<commit_before><commit_msg>Put together a basic flask for Dante. The start of the admin page.<commit_after>from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route("/hitmehard")
def ttol():
import pyaudio
import unlock
# yous a do your business here...
return str(dir(unlock))
if __name__ == "__main__":
app.run()
|
|
eccf9607fae71c4fe955f1b71b1090882b6bae82
|
sage-equations.py
|
sage-equations.py
|
#!/usr/bin/python
#
# Write equations needed to feed into sage in alpha-numeric form
#
# Cahlen Humphreys (2015)
fo = open("anf.txt", "wb")
ptext = "01100010100101110000101011100011"
ctext = "01101000110010010100101001111001"
key = "00110100110111111001011000011100"
ptextl = list(ptext)
ctextl = list(ctext)
keyl = list(key)
ctextl.reverse()
ptextls = ""
ctextls = ""
keyls = ""
for i in range(0,32):
ptextls+="L" + str(i) + "+" + str(ptext[i]) + ","
ctextls+="L" + str(191-i) + "+" + str(ctext[31-i]) + ","
keyls+="K" + str(i) + "+" + str(keyl[i]) + ","
fo.write("%s%s%s"%(ptextls,ctextls,keyls))
print ctextls;
for i in range(0,160):
eq1 = str("L" + str(32+i)) + str("+" + "K" + str(i % 64)) + str("+L"+str(i)) + str("+L" + str(i+16)) + str("+L" + str(i+9)) + str("+L" + str(i+1)) + str("+L" + str(i+31) + "*" "L" + str(i+20))+str("+B" + str(i)) + str("+L" + str(i+26) + "*L" + str(i+20)) + str("+L" + str(i+26) + "*L" + str(i+1)) + str("+L" + str(i+20) + "*L" + str(i+9)) + str("+L" + str(i+9) + "*L" + str(i+1)) + str("+B" + str(i) + "*L" + str(i+9)) + str("+B" + str(i) + "*L" + str(i+20)) + str("+A" + str(i) + "*L" + str(i+9)) + str("+A" + str(i) + "*L" + str(i+20))
eq2 = str("A" + str(i) + "+L" + str(i+31) + "*L" + str(i+26))
eq3 = str("B" + str(i) + "+L" + str(i+31) + "*L" + str(i+1))
fo.write("%s,%s,%s,"%(eq1,eq2,eq3))
fo.close();
|
Write equations for sage to convert to CNF
|
Write equations for sage to convert to CNF
|
Python
|
mit
|
cahlen/keeloq-python
|
Write equations for sage to convert to CNF
|
#!/usr/bin/python
#
# Write equations needed to feed into sage in alpha-numeric form
#
# Cahlen Humphreys (2015)
fo = open("anf.txt", "wb")
ptext = "01100010100101110000101011100011"
ctext = "01101000110010010100101001111001"
key = "00110100110111111001011000011100"
ptextl = list(ptext)
ctextl = list(ctext)
keyl = list(key)
ctextl.reverse()
ptextls = ""
ctextls = ""
keyls = ""
for i in range(0,32):
ptextls+="L" + str(i) + "+" + str(ptext[i]) + ","
ctextls+="L" + str(191-i) + "+" + str(ctext[31-i]) + ","
keyls+="K" + str(i) + "+" + str(keyl[i]) + ","
fo.write("%s%s%s"%(ptextls,ctextls,keyls))
print ctextls;
for i in range(0,160):
eq1 = str("L" + str(32+i)) + str("+" + "K" + str(i % 64)) + str("+L"+str(i)) + str("+L" + str(i+16)) + str("+L" + str(i+9)) + str("+L" + str(i+1)) + str("+L" + str(i+31) + "*" "L" + str(i+20))+str("+B" + str(i)) + str("+L" + str(i+26) + "*L" + str(i+20)) + str("+L" + str(i+26) + "*L" + str(i+1)) + str("+L" + str(i+20) + "*L" + str(i+9)) + str("+L" + str(i+9) + "*L" + str(i+1)) + str("+B" + str(i) + "*L" + str(i+9)) + str("+B" + str(i) + "*L" + str(i+20)) + str("+A" + str(i) + "*L" + str(i+9)) + str("+A" + str(i) + "*L" + str(i+20))
eq2 = str("A" + str(i) + "+L" + str(i+31) + "*L" + str(i+26))
eq3 = str("B" + str(i) + "+L" + str(i+31) + "*L" + str(i+1))
fo.write("%s,%s,%s,"%(eq1,eq2,eq3))
fo.close();
|
<commit_before><commit_msg>Write equations for sage to convert to CNF<commit_after>
|
#!/usr/bin/python
#
# Write equations needed to feed into sage in alpha-numeric form
#
# Cahlen Humphreys (2015)
fo = open("anf.txt", "wb")
ptext = "01100010100101110000101011100011"
ctext = "01101000110010010100101001111001"
key = "00110100110111111001011000011100"
ptextl = list(ptext)
ctextl = list(ctext)
keyl = list(key)
ctextl.reverse()
ptextls = ""
ctextls = ""
keyls = ""
for i in range(0,32):
ptextls+="L" + str(i) + "+" + str(ptext[i]) + ","
ctextls+="L" + str(191-i) + "+" + str(ctext[31-i]) + ","
keyls+="K" + str(i) + "+" + str(keyl[i]) + ","
fo.write("%s%s%s"%(ptextls,ctextls,keyls))
print ctextls;
for i in range(0,160):
eq1 = str("L" + str(32+i)) + str("+" + "K" + str(i % 64)) + str("+L"+str(i)) + str("+L" + str(i+16)) + str("+L" + str(i+9)) + str("+L" + str(i+1)) + str("+L" + str(i+31) + "*" "L" + str(i+20))+str("+B" + str(i)) + str("+L" + str(i+26) + "*L" + str(i+20)) + str("+L" + str(i+26) + "*L" + str(i+1)) + str("+L" + str(i+20) + "*L" + str(i+9)) + str("+L" + str(i+9) + "*L" + str(i+1)) + str("+B" + str(i) + "*L" + str(i+9)) + str("+B" + str(i) + "*L" + str(i+20)) + str("+A" + str(i) + "*L" + str(i+9)) + str("+A" + str(i) + "*L" + str(i+20))
eq2 = str("A" + str(i) + "+L" + str(i+31) + "*L" + str(i+26))
eq3 = str("B" + str(i) + "+L" + str(i+31) + "*L" + str(i+1))
fo.write("%s,%s,%s,"%(eq1,eq2,eq3))
fo.close();
|
Write equations for sage to convert to CNF#!/usr/bin/python
#
# Write equations needed to feed into sage in alpha-numeric form
#
# Cahlen Humphreys (2015)
fo = open("anf.txt", "wb")
ptext = "01100010100101110000101011100011"
ctext = "01101000110010010100101001111001"
key = "00110100110111111001011000011100"
ptextl = list(ptext)
ctextl = list(ctext)
keyl = list(key)
ctextl.reverse()
ptextls = ""
ctextls = ""
keyls = ""
for i in range(0,32):
ptextls+="L" + str(i) + "+" + str(ptext[i]) + ","
ctextls+="L" + str(191-i) + "+" + str(ctext[31-i]) + ","
keyls+="K" + str(i) + "+" + str(keyl[i]) + ","
fo.write("%s%s%s"%(ptextls,ctextls,keyls))
print ctextls;
for i in range(0,160):
eq1 = str("L" + str(32+i)) + str("+" + "K" + str(i % 64)) + str("+L"+str(i)) + str("+L" + str(i+16)) + str("+L" + str(i+9)) + str("+L" + str(i+1)) + str("+L" + str(i+31) + "*" "L" + str(i+20))+str("+B" + str(i)) + str("+L" + str(i+26) + "*L" + str(i+20)) + str("+L" + str(i+26) + "*L" + str(i+1)) + str("+L" + str(i+20) + "*L" + str(i+9)) + str("+L" + str(i+9) + "*L" + str(i+1)) + str("+B" + str(i) + "*L" + str(i+9)) + str("+B" + str(i) + "*L" + str(i+20)) + str("+A" + str(i) + "*L" + str(i+9)) + str("+A" + str(i) + "*L" + str(i+20))
eq2 = str("A" + str(i) + "+L" + str(i+31) + "*L" + str(i+26))
eq3 = str("B" + str(i) + "+L" + str(i+31) + "*L" + str(i+1))
fo.write("%s,%s,%s,"%(eq1,eq2,eq3))
fo.close();
|
<commit_before><commit_msg>Write equations for sage to convert to CNF<commit_after>#!/usr/bin/python
#
# Write equations needed to feed into sage in alpha-numeric form
#
# Cahlen Humphreys (2015)
fo = open("anf.txt", "wb")
ptext = "01100010100101110000101011100011"
ctext = "01101000110010010100101001111001"
key = "00110100110111111001011000011100"
ptextl = list(ptext)
ctextl = list(ctext)
keyl = list(key)
ctextl.reverse()
ptextls = ""
ctextls = ""
keyls = ""
for i in range(0,32):
ptextls+="L" + str(i) + "+" + str(ptext[i]) + ","
ctextls+="L" + str(191-i) + "+" + str(ctext[31-i]) + ","
keyls+="K" + str(i) + "+" + str(keyl[i]) + ","
fo.write("%s%s%s"%(ptextls,ctextls,keyls))
print ctextls;
for i in range(0,160):
eq1 = str("L" + str(32+i)) + str("+" + "K" + str(i % 64)) + str("+L"+str(i)) + str("+L" + str(i+16)) + str("+L" + str(i+9)) + str("+L" + str(i+1)) + str("+L" + str(i+31) + "*" "L" + str(i+20))+str("+B" + str(i)) + str("+L" + str(i+26) + "*L" + str(i+20)) + str("+L" + str(i+26) + "*L" + str(i+1)) + str("+L" + str(i+20) + "*L" + str(i+9)) + str("+L" + str(i+9) + "*L" + str(i+1)) + str("+B" + str(i) + "*L" + str(i+9)) + str("+B" + str(i) + "*L" + str(i+20)) + str("+A" + str(i) + "*L" + str(i+9)) + str("+A" + str(i) + "*L" + str(i+20))
eq2 = str("A" + str(i) + "+L" + str(i+31) + "*L" + str(i+26))
eq3 = str("B" + str(i) + "+L" + str(i+31) + "*L" + str(i+1))
fo.write("%s,%s,%s,"%(eq1,eq2,eq3))
fo.close();
|
|
3e50ff3074f1734db165ad9a601aa8177f97a068
|
linkedin_scraper/spiders/search.py
|
linkedin_scraper/spiders/search.py
|
import scrapy
class SearchSpider(scrapy.Spider):
name = 'search'
allowed_domains = ['linkedin.com']
start_urls = ['http://linkedin.com/']
def parse(self, response):
pass
|
Add SearchSpider generated by `scrapy genspider` command
|
Add SearchSpider generated by `scrapy genspider` command
|
Python
|
mit
|
nihn/linkedin-scraper,nihn/linkedin-scraper
|
Add SearchSpider generated by `scrapy genspider` command
|
import scrapy
class SearchSpider(scrapy.Spider):
name = 'search'
allowed_domains = ['linkedin.com']
start_urls = ['http://linkedin.com/']
def parse(self, response):
pass
|
<commit_before><commit_msg>Add SearchSpider generated by `scrapy genspider` command<commit_after>
|
import scrapy
class SearchSpider(scrapy.Spider):
name = 'search'
allowed_domains = ['linkedin.com']
start_urls = ['http://linkedin.com/']
def parse(self, response):
pass
|
Add SearchSpider generated by `scrapy genspider` commandimport scrapy
class SearchSpider(scrapy.Spider):
name = 'search'
allowed_domains = ['linkedin.com']
start_urls = ['http://linkedin.com/']
def parse(self, response):
pass
|
<commit_before><commit_msg>Add SearchSpider generated by `scrapy genspider` command<commit_after>import scrapy
class SearchSpider(scrapy.Spider):
name = 'search'
allowed_domains = ['linkedin.com']
start_urls = ['http://linkedin.com/']
def parse(self, response):
pass
|
|
602676fbdde3fc7bdf8203159a96215cc91dc340
|
src/ig.py
|
src/ig.py
|
from scipy import stats
from math import log2
import numpy as np
def majority_value(binary_targets):
mode = stats.mode(binary_targets)
return tuple(map(lambda x: x[0], mode))
def entropy(binary_targets):
size, _ = binary_targets.shape
_, mode_size = majority_value(binary_targets)
majority_proportion = float(mode_size) / float(size)
minority_proportion = 1. - majority_proportion
return - majority_proportion * log2(majority_proportion) \
- minority_proportion * log2(minority_proportion)
def information_gain(examples, binary_targets, attribute):
size_targets, _ = binary_targets.shape
ed = entropy(binary_targets)
column = examples[:,attribute]
mask1 = column == 1
positive_proportion = float(np.count_nonzero(mask1)) / size_targets
negative_proportion = 1. - positive_proportion
return ed - positive_proportion * entropy(binary_targets[mask1]) \
- negative_proportion * entropy(binary_targets[~mask1])
def choose_best_decision_attribute(examples, attributes, binary_targets):
best_attribute = None
best_gain = float("-inf")
for attribute in np.ndindex(attributes.shape):
if attributes[attribute] == 0:
continue
ig = information_gain(examples, binary_targets, attribute)
if ig > best_gain:
best_attribute = attribute
best_gain = ig
return best_attribute
|
Implement information gain related functions (untested).
|
Implement information gain related functions (untested).
|
Python
|
mit
|
MLNotWar/decision-trees-algorithm,MLNotWar/decision-trees-algorithm
|
Implement information gain related functions (untested).
|
from scipy import stats
from math import log2
import numpy as np
def majority_value(binary_targets):
mode = stats.mode(binary_targets)
return tuple(map(lambda x: x[0], mode))
def entropy(binary_targets):
size, _ = binary_targets.shape
_, mode_size = majority_value(binary_targets)
majority_proportion = float(mode_size) / float(size)
minority_proportion = 1. - majority_proportion
return - majority_proportion * log2(majority_proportion) \
- minority_proportion * log2(minority_proportion)
def information_gain(examples, binary_targets, attribute):
size_targets, _ = binary_targets.shape
ed = entropy(binary_targets)
column = examples[:,attribute]
mask1 = column == 1
positive_proportion = float(np.count_nonzero(mask1)) / size_targets
negative_proportion = 1. - positive_proportion
return ed - positive_proportion * entropy(binary_targets[mask1]) \
- negative_proportion * entropy(binary_targets[~mask1])
def choose_best_decision_attribute(examples, attributes, binary_targets):
best_attribute = None
best_gain = float("-inf")
for attribute in np.ndindex(attributes.shape):
if attributes[attribute] == 0:
continue
ig = information_gain(examples, binary_targets, attribute)
if ig > best_gain:
best_attribute = attribute
best_gain = ig
return best_attribute
|
<commit_before><commit_msg>Implement information gain related functions (untested).<commit_after>
|
from scipy import stats
from math import log2
import numpy as np
def majority_value(binary_targets):
mode = stats.mode(binary_targets)
return tuple(map(lambda x: x[0], mode))
def entropy(binary_targets):
size, _ = binary_targets.shape
_, mode_size = majority_value(binary_targets)
majority_proportion = float(mode_size) / float(size)
minority_proportion = 1. - majority_proportion
return - majority_proportion * log2(majority_proportion) \
- minority_proportion * log2(minority_proportion)
def information_gain(examples, binary_targets, attribute):
size_targets, _ = binary_targets.shape
ed = entropy(binary_targets)
column = examples[:,attribute]
mask1 = column == 1
positive_proportion = float(np.count_nonzero(mask1)) / size_targets
negative_proportion = 1. - positive_proportion
return ed - positive_proportion * entropy(binary_targets[mask1]) \
- negative_proportion * entropy(binary_targets[~mask1])
def choose_best_decision_attribute(examples, attributes, binary_targets):
best_attribute = None
best_gain = float("-inf")
for attribute in np.ndindex(attributes.shape):
if attributes[attribute] == 0:
continue
ig = information_gain(examples, binary_targets, attribute)
if ig > best_gain:
best_attribute = attribute
best_gain = ig
return best_attribute
|
Implement information gain related functions (untested).from scipy import stats
from math import log2
import numpy as np
def majority_value(binary_targets):
mode = stats.mode(binary_targets)
return tuple(map(lambda x: x[0], mode))
def entropy(binary_targets):
size, _ = binary_targets.shape
_, mode_size = majority_value(binary_targets)
majority_proportion = float(mode_size) / float(size)
minority_proportion = 1. - majority_proportion
return - majority_proportion * log2(majority_proportion) \
- minority_proportion * log2(minority_proportion)
def information_gain(examples, binary_targets, attribute):
size_targets, _ = binary_targets.shape
ed = entropy(binary_targets)
column = examples[:,attribute]
mask1 = column == 1
positive_proportion = float(np.count_nonzero(mask1)) / size_targets
negative_proportion = 1. - positive_proportion
return ed - positive_proportion * entropy(binary_targets[mask1]) \
- negative_proportion * entropy(binary_targets[~mask1])
def choose_best_decision_attribute(examples, attributes, binary_targets):
best_attribute = None
best_gain = float("-inf")
for attribute in np.ndindex(attributes.shape):
if attributes[attribute] == 0:
continue
ig = information_gain(examples, binary_targets, attribute)
if ig > best_gain:
best_attribute = attribute
best_gain = ig
return best_attribute
|
<commit_before><commit_msg>Implement information gain related functions (untested).<commit_after>from scipy import stats
from math import log2
import numpy as np
def majority_value(binary_targets):
mode = stats.mode(binary_targets)
return tuple(map(lambda x: x[0], mode))
def entropy(binary_targets):
size, _ = binary_targets.shape
_, mode_size = majority_value(binary_targets)
majority_proportion = float(mode_size) / float(size)
minority_proportion = 1. - majority_proportion
return - majority_proportion * log2(majority_proportion) \
- minority_proportion * log2(minority_proportion)
def information_gain(examples, binary_targets, attribute):
size_targets, _ = binary_targets.shape
ed = entropy(binary_targets)
column = examples[:,attribute]
mask1 = column == 1
positive_proportion = float(np.count_nonzero(mask1)) / size_targets
negative_proportion = 1. - positive_proportion
return ed - positive_proportion * entropy(binary_targets[mask1]) \
- negative_proportion * entropy(binary_targets[~mask1])
def choose_best_decision_attribute(examples, attributes, binary_targets):
best_attribute = None
best_gain = float("-inf")
for attribute in np.ndindex(attributes.shape):
if attributes[attribute] == 0:
continue
ig = information_gain(examples, binary_targets, attribute)
if ig > best_gain:
best_attribute = attribute
best_gain = ig
return best_attribute
|
|
e5b112169653322eaf6d7e8a7816753f919bf0f9
|
skia/PRESUBMIT.py
|
skia/PRESUBMIT.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for skia/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def GetPreferredTrySlaves(project, change):
return [
'linux_gpu',
'win_gpu',
'mac_gpu',
'mac_gpu_retina',
'linux_layout_rel',
]
|
Add extra bots to the default set of try bots for changes in skia/
|
skia: Add extra bots to the default set of try bots for changes in skia/
This adds linux_layout_rel, win_gpu, linux_gpu, mac_gpu, and
mac_gpu_retina to the default set of bots to run on patches that touch
skia/ when doing "git cl try".
This has no effect on the commit queue bot set which is maintained
elsewhere by the infra team.
BUG=
Review URL: https://codereview.chromium.org/34493005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@230471 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
Jonekee/chromium.src,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,dednal/chromium.src,dednal/chromium.src,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,Jonekee/chromium.src,dednal/chromium.src,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,ltilve/chromium,chuan9/chromium-crosswalk,Just-D/chromium-1,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,bright-sparks/chromium-spacewalk,Just-D/chromium-1,Chilledheart/chromium,ChromiumWebApps/chromium,ondra-novak/chromium.src,Just-D/chromium-1,Fireblend/chromium-crosswalk,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,dednal/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,anirudhSK/chromium,dushu1203/chromium.src,patrickm/chromium.src,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ltilve/chromium,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,jaruba/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,littlstar/chromium.src,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,dednal/chromium.src,chuan9/chromium-crosswalk,M4sse/chromium.src,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk,jaruba/chromium.src,fujunwei/chromium-crosswalk,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,patrickm/chromium.src,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,patrickm/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,ltilve/chromium,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk,ondra-novak/chromium.src,anirudhSK/chromium,Just-D/chromium-1,axinging/chromium-crosswalk,jaruba/chromium.src,axinging/chromium-crosswalk,jaruba/chromium.src,patrickm/chromium.src,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,littlstar/chromium.src,anirudhSK/chromium,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,ChromiumWebApps/chromium,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,Jonekee/chromium.src,markYoungH/chromium.src,M4sse/chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,anirudhSK/chromium,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,Jonekee/chromium.src,ChromiumWebApps/chromium,ChromiumWebApps/chromium,ChromiumWebApps/chromium,ondra-novak/chromium.src,patrickm/chromium.src,anirudhSK/chromium,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,ChromiumWebApps/chromium,Jonekee/chromium.src,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,Chilledheart/chromium,jaruba/chromium.src,bright-sparks/chromium-spacewalk,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,anirudhSK/chromium,ltilve/chromium,Fireblend/chromium-crosswalk,markYoungH/chromium.src,dushu1203/chromium.src,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,M4sse/chromium.src,jaruba/chromium.src,anirudhSK/chromium,dednal/chromium.src,M4sse/chromium.src,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,markYoungH/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,anirudhSK/chromium,M4sse/chromium.src,dednal/chromium.src,M4sse/chromium.src,markYoungH/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,Just-D/chromium-1,Chilledheart/chromium,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,ltilve/chromium,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,chuan9/chromium-crosswalk,Just-D/chromium-1,Just-D/chromium-1,patrickm/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,patrickm/chromium.src,Jonekee/chromium.src,ltilve/chromium,Chilledheart/chromium,dushu1203/chromium.src,dushu1203/chromium.src,dednal/chromium.src,ChromiumWebApps/chromium
|
skia: Add extra bots to the default set of try bots for changes in skia/
This adds linux_layout_rel, win_gpu, linux_gpu, mac_gpu, and
mac_gpu_retina to the default set of bots to run on patches that touch
skia/ when doing "git cl try".
This has no effect on the commit queue bot set which is maintained
elsewhere by the infra team.
BUG=
Review URL: https://codereview.chromium.org/34493005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@230471 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for skia/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def GetPreferredTrySlaves(project, change):
return [
'linux_gpu',
'win_gpu',
'mac_gpu',
'mac_gpu_retina',
'linux_layout_rel',
]
|
<commit_before><commit_msg>skia: Add extra bots to the default set of try bots for changes in skia/
This adds linux_layout_rel, win_gpu, linux_gpu, mac_gpu, and
mac_gpu_retina to the default set of bots to run on patches that touch
skia/ when doing "git cl try".
This has no effect on the commit queue bot set which is maintained
elsewhere by the infra team.
BUG=
Review URL: https://codereview.chromium.org/34493005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@230471 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for skia/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def GetPreferredTrySlaves(project, change):
return [
'linux_gpu',
'win_gpu',
'mac_gpu',
'mac_gpu_retina',
'linux_layout_rel',
]
|
skia: Add extra bots to the default set of try bots for changes in skia/
This adds linux_layout_rel, win_gpu, linux_gpu, mac_gpu, and
mac_gpu_retina to the default set of bots to run on patches that touch
skia/ when doing "git cl try".
This has no effect on the commit queue bot set which is maintained
elsewhere by the infra team.
BUG=
Review URL: https://codereview.chromium.org/34493005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@230471 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for skia/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def GetPreferredTrySlaves(project, change):
return [
'linux_gpu',
'win_gpu',
'mac_gpu',
'mac_gpu_retina',
'linux_layout_rel',
]
|
<commit_before><commit_msg>skia: Add extra bots to the default set of try bots for changes in skia/
This adds linux_layout_rel, win_gpu, linux_gpu, mac_gpu, and
mac_gpu_retina to the default set of bots to run on patches that touch
skia/ when doing "git cl try".
This has no effect on the commit queue bot set which is maintained
elsewhere by the infra team.
BUG=
Review URL: https://codereview.chromium.org/34493005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@230471 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for skia/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def GetPreferredTrySlaves(project, change):
return [
'linux_gpu',
'win_gpu',
'mac_gpu',
'mac_gpu_retina',
'linux_layout_rel',
]
|
|
edf9101eae52ce0a3f87b8f233e3f8ba1dcbbee0
|
src/helper_sql.py
|
src/helper_sql.py
|
import shared
def sqlQuery(sqlStatement, *args):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(sqlStatement)
if args == ():
shared.sqlSubmitQueue.put('')
else:
shared.sqlSubmitQueue.put(args)
queryreturn = shared.sqlReturnQueue.get()
shared.sqlLock.release()
return queryreturn
def sqlExecute(sqlStatement, *args):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(sqlStatement)
if args == ():
shared.sqlSubmitQueue.put('')
else:
shared.sqlSubmitQueue.put(args)
shared.sqlReturnQueue.get()
shared.sqlSubmitQueue.put('commit')
shared.sqlLock.release()
def sqlStoredProcedure(procName):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(procName)
shared.sqlLock.release()
|
Move duplicated sql code into helper
|
Move duplicated sql code into helper
|
Python
|
mit
|
Erkan-Yilmaz/PyBitmessage,debguy0x/PyBitmessage,domob1812/PyBitmessage,tigerbunny/bitmessage,hb9kns/PyBitmessage,timothyparez/PyBitmessage,bmng-dev/PyBitmessage,tigerbunny/bitmessage,mailchuck/PyBitmessage,lightrabbit/PyBitmessage,gnu3ra/PyBitmessage,hb9kns/PyBitmessage,xeddmc/PyBitmessage,rzr/PyBitmessage,torifier/PyBitmessage,bmng-dev/PyBitmessage,timothyparez/PyBitmessage,Atheros1/PyBitmessage,kyucrane/PyBitmessage,krytarowski/PyBitmessage,torifier/PyBitmessage,debguy0x/PyBitmessage,torifier/PyBitmessage,hb9kns/PyBitmessage,gnu3ra/PyBitmessage,xeddmc/PyBitmessage,Erkan-Yilmaz/PyBitmessage,metamarcdw/PyBitmessage-I2P,lightrabbit/PyBitmessage,kyucrane/PyBitmessage,timothyparez/PyBitmessage,debguy0x/PyBitmessage,bmng-dev/PyBitmessage,rzr/PyBitmessage,Atheros1/PyBitmessage,krytarowski/PyBitmessage,lightrabbit/PyBitmessage,Atheros1/PyBitmessage,mailchuck/PyBitmessage,hb9kns/PyBitmessage,JosephGoulden/PyBitmessageF2F,metamarcdw/PyBitmessage-I2P,Atheros1/PyBitmessage,debguy0x/PyBitmessage,torifier/PyBitmessage,domob1812/PyBitmessage,lightrabbit/PyBitmessage,domob1812/PyBitmessage,mailchuck/PyBitmessage,timothyparez/PyBitmessage,tigerbunny/bitmessage,JosephGoulden/PyBitmessageF2F,mailchuck/PyBitmessage
|
Move duplicated sql code into helper
|
import shared
def sqlQuery(sqlStatement, *args):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(sqlStatement)
if args == ():
shared.sqlSubmitQueue.put('')
else:
shared.sqlSubmitQueue.put(args)
queryreturn = shared.sqlReturnQueue.get()
shared.sqlLock.release()
return queryreturn
def sqlExecute(sqlStatement, *args):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(sqlStatement)
if args == ():
shared.sqlSubmitQueue.put('')
else:
shared.sqlSubmitQueue.put(args)
shared.sqlReturnQueue.get()
shared.sqlSubmitQueue.put('commit')
shared.sqlLock.release()
def sqlStoredProcedure(procName):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(procName)
shared.sqlLock.release()
|
<commit_before><commit_msg>Move duplicated sql code into helper<commit_after>
|
import shared
def sqlQuery(sqlStatement, *args):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(sqlStatement)
if args == ():
shared.sqlSubmitQueue.put('')
else:
shared.sqlSubmitQueue.put(args)
queryreturn = shared.sqlReturnQueue.get()
shared.sqlLock.release()
return queryreturn
def sqlExecute(sqlStatement, *args):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(sqlStatement)
if args == ():
shared.sqlSubmitQueue.put('')
else:
shared.sqlSubmitQueue.put(args)
shared.sqlReturnQueue.get()
shared.sqlSubmitQueue.put('commit')
shared.sqlLock.release()
def sqlStoredProcedure(procName):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(procName)
shared.sqlLock.release()
|
Move duplicated sql code into helperimport shared
def sqlQuery(sqlStatement, *args):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(sqlStatement)
if args == ():
shared.sqlSubmitQueue.put('')
else:
shared.sqlSubmitQueue.put(args)
queryreturn = shared.sqlReturnQueue.get()
shared.sqlLock.release()
return queryreturn
def sqlExecute(sqlStatement, *args):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(sqlStatement)
if args == ():
shared.sqlSubmitQueue.put('')
else:
shared.sqlSubmitQueue.put(args)
shared.sqlReturnQueue.get()
shared.sqlSubmitQueue.put('commit')
shared.sqlLock.release()
def sqlStoredProcedure(procName):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(procName)
shared.sqlLock.release()
|
<commit_before><commit_msg>Move duplicated sql code into helper<commit_after>import shared
def sqlQuery(sqlStatement, *args):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(sqlStatement)
if args == ():
shared.sqlSubmitQueue.put('')
else:
shared.sqlSubmitQueue.put(args)
queryreturn = shared.sqlReturnQueue.get()
shared.sqlLock.release()
return queryreturn
def sqlExecute(sqlStatement, *args):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(sqlStatement)
if args == ():
shared.sqlSubmitQueue.put('')
else:
shared.sqlSubmitQueue.put(args)
shared.sqlReturnQueue.get()
shared.sqlSubmitQueue.put('commit')
shared.sqlLock.release()
def sqlStoredProcedure(procName):
shared.sqlLock.acquire()
shared.sqlSubmitQueue.put(procName)
shared.sqlLock.release()
|
|
74acd01d0997924944fdf4c9d7833c08b42c7004
|
phylogenetics/exttools/taxonomy.py
|
phylogenetics/exttools/taxonomy.py
|
"""Ping the NCBI Taxonomy web service and retrieve data about a sequence.
Stupidly simple module... probably needs more intelligent/safe parsing of data.
"""
import re
import requests
def query(keyword, type="name", **kwargs):
"""Send a query to retrieve taxonomic data about a set of sequences.
"""
main_url = "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?"
# Construct arguments for query.
kwargs[type] = keyword
end_url = "&".join([key+"="+kwargs[key] for key in kwargs])
total_url = main_url + end_url
data = requests.get(total_url)
# Strip out the HTML with the taxonomic lineage
regex = re.compile('<[aA][^<]+</[aA]>')
lineage = regex.findall(data.text)
# Get taxonomic tags
##### Result looks like a list of: #####
# <a ALT="superkingdom"
# href="/Taxonomy/Browser/wwwtax.cgi?mode=Undef&id=2759&lvl=3&keep=1&srchmode=1&unlock"
# TITLE="superkingdom">Eukaryota
#</a>
taxonomy = {}
for level in lineage:
# Get the classification
try:
classification = re.compile('TITLE="([A-Za-z\s]+)"').search(level).group(1)
# Get the named classification
try:
label = re.compile('>([\w ]+)<').search(level).group(1)
except AttributeError:
raise Exception("Found classification, but cannot find ")
# Add classification to taxonomy dictionary
taxonomy[classification] = label
# If the tag pulled from last REGEX is not a classification (but
# another <a> tag), skip it.
except AttributeError:
pass
return taxonomy
|
Add module for pulling taxonomic data from BLAST
|
Add module for pulling taxonomic data from BLAST
|
Python
|
unlicense
|
Zsailer/phylogenetics,Zsailer/phylo_tools_2
|
Add module for pulling taxonomic data from BLAST
|
"""Ping the NCBI Taxonomy web service and retrieve data about a sequence.
Stupidly simple module... probably needs more intelligent/safe parsing of data.
"""
import re
import requests
def query(keyword, type="name", **kwargs):
"""Send a query to retrieve taxonomic data about a set of sequences.
"""
main_url = "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?"
# Construct arguments for query.
kwargs[type] = keyword
end_url = "&".join([key+"="+kwargs[key] for key in kwargs])
total_url = main_url + end_url
data = requests.get(total_url)
# Strip out the HTML with the taxonomic lineage
regex = re.compile('<[aA][^<]+</[aA]>')
lineage = regex.findall(data.text)
# Get taxonomic tags
##### Result looks like a list of: #####
# <a ALT="superkingdom"
# href="/Taxonomy/Browser/wwwtax.cgi?mode=Undef&id=2759&lvl=3&keep=1&srchmode=1&unlock"
# TITLE="superkingdom">Eukaryota
#</a>
taxonomy = {}
for level in lineage:
# Get the classification
try:
classification = re.compile('TITLE="([A-Za-z\s]+)"').search(level).group(1)
# Get the named classification
try:
label = re.compile('>([\w ]+)<').search(level).group(1)
except AttributeError:
raise Exception("Found classification, but cannot find ")
# Add classification to taxonomy dictionary
taxonomy[classification] = label
# If the tag pulled from last REGEX is not a classification (but
# another <a> tag), skip it.
except AttributeError:
pass
return taxonomy
|
<commit_before><commit_msg>Add module for pulling taxonomic data from BLAST<commit_after>
|
"""Ping the NCBI Taxonomy web service and retrieve data about a sequence.
Stupidly simple module... probably needs more intelligent/safe parsing of data.
"""
import re
import requests
def query(keyword, type="name", **kwargs):
"""Send a query to retrieve taxonomic data about a set of sequences.
"""
main_url = "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?"
# Construct arguments for query.
kwargs[type] = keyword
end_url = "&".join([key+"="+kwargs[key] for key in kwargs])
total_url = main_url + end_url
data = requests.get(total_url)
# Strip out the HTML with the taxonomic lineage
regex = re.compile('<[aA][^<]+</[aA]>')
lineage = regex.findall(data.text)
# Get taxonomic tags
##### Result looks like a list of: #####
# <a ALT="superkingdom"
# href="/Taxonomy/Browser/wwwtax.cgi?mode=Undef&id=2759&lvl=3&keep=1&srchmode=1&unlock"
# TITLE="superkingdom">Eukaryota
#</a>
taxonomy = {}
for level in lineage:
# Get the classification
try:
classification = re.compile('TITLE="([A-Za-z\s]+)"').search(level).group(1)
# Get the named classification
try:
label = re.compile('>([\w ]+)<').search(level).group(1)
except AttributeError:
raise Exception("Found classification, but cannot find ")
# Add classification to taxonomy dictionary
taxonomy[classification] = label
# If the tag pulled from last REGEX is not a classification (but
# another <a> tag), skip it.
except AttributeError:
pass
return taxonomy
|
Add module for pulling taxonomic data from BLAST"""Ping the NCBI Taxonomy web service and retrieve data about a sequence.
Stupidly simple module... probably needs more intelligent/safe parsing of data.
"""
import re
import requests
def query(keyword, type="name", **kwargs):
"""Send a query to retrieve taxonomic data about a set of sequences.
"""
main_url = "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?"
# Construct arguments for query.
kwargs[type] = keyword
end_url = "&".join([key+"="+kwargs[key] for key in kwargs])
total_url = main_url + end_url
data = requests.get(total_url)
# Strip out the HTML with the taxonomic lineage
regex = re.compile('<[aA][^<]+</[aA]>')
lineage = regex.findall(data.text)
# Get taxonomic tags
##### Result looks like a list of: #####
# <a ALT="superkingdom"
# href="/Taxonomy/Browser/wwwtax.cgi?mode=Undef&id=2759&lvl=3&keep=1&srchmode=1&unlock"
# TITLE="superkingdom">Eukaryota
#</a>
taxonomy = {}
for level in lineage:
# Get the classification
try:
classification = re.compile('TITLE="([A-Za-z\s]+)"').search(level).group(1)
# Get the named classification
try:
label = re.compile('>([\w ]+)<').search(level).group(1)
except AttributeError:
raise Exception("Found classification, but cannot find ")
# Add classification to taxonomy dictionary
taxonomy[classification] = label
# If the tag pulled from last REGEX is not a classification (but
# another <a> tag), skip it.
except AttributeError:
pass
return taxonomy
|
<commit_before><commit_msg>Add module for pulling taxonomic data from BLAST<commit_after>"""Ping the NCBI Taxonomy web service and retrieve data about a sequence.
Stupidly simple module... probably needs more intelligent/safe parsing of data.
"""
import re
import requests
def query(keyword, type="name", **kwargs):
"""Send a query to retrieve taxonomic data about a set of sequences.
"""
main_url = "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?"
# Construct arguments for query.
kwargs[type] = keyword
end_url = "&".join([key+"="+kwargs[key] for key in kwargs])
total_url = main_url + end_url
data = requests.get(total_url)
# Strip out the HTML with the taxonomic lineage
regex = re.compile('<[aA][^<]+</[aA]>')
lineage = regex.findall(data.text)
# Get taxonomic tags
##### Result looks like a list of: #####
# <a ALT="superkingdom"
# href="/Taxonomy/Browser/wwwtax.cgi?mode=Undef&id=2759&lvl=3&keep=1&srchmode=1&unlock"
# TITLE="superkingdom">Eukaryota
#</a>
taxonomy = {}
for level in lineage:
# Get the classification
try:
classification = re.compile('TITLE="([A-Za-z\s]+)"').search(level).group(1)
# Get the named classification
try:
label = re.compile('>([\w ]+)<').search(level).group(1)
except AttributeError:
raise Exception("Found classification, but cannot find ")
# Add classification to taxonomy dictionary
taxonomy[classification] = label
# If the tag pulled from last REGEX is not a classification (but
# another <a> tag), skip it.
except AttributeError:
pass
return taxonomy
|
|
191e041239378583040596f8fe08b37e95ebd31c
|
osf/migrations/0117_set_is_root.py
|
osf/migrations/0117_set_is_root.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-11 18:05
from __future__ import unicode_literals
from django.db import migrations
def set_is_root(state, *args, **kwargs):
OsfStorageFolder = state.get_model('osf', 'osfstoragefolder')
OsfStorageFolder.objects.filter(nodesettings__isnull=False, is_root__isnull=True).update(is_root=True)
def unset_is_root(state, *args, **kwargs):
state.get_model('osf', 'osfstoragefolder').objects.filter(is_root=True).update(is_root=None)
class Migration(migrations.Migration):
dependencies = [
('osf', '0116_merge_20180706_0901'),
]
operations = [
migrations.RunPython(set_is_root, unset_is_root)
]
|
Set is_root=True for old folders set as root_node for existing NodeSettings
|
Set is_root=True for old folders set as root_node for existing NodeSettings
[#PLAT-578]
|
Python
|
apache-2.0
|
mfraezz/osf.io,felliott/osf.io,saradbowman/osf.io,mattclark/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,mattclark/osf.io,felliott/osf.io,HalcyonChimera/osf.io,erinspace/osf.io,Johnetordoff/osf.io,caseyrollins/osf.io,felliott/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,adlius/osf.io,cslzchen/osf.io,caseyrollins/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,cslzchen/osf.io,adlius/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,cslzchen/osf.io,adlius/osf.io,mfraezz/osf.io,baylee-d/osf.io,aaxelb/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,erinspace/osf.io,saradbowman/osf.io,pattisdr/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,erinspace/osf.io,caseyrollins/osf.io,felliott/osf.io,adlius/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,baylee-d/osf.io,cslzchen/osf.io,mattclark/osf.io
|
Set is_root=True for old folders set as root_node for existing NodeSettings
[#PLAT-578]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-11 18:05
from __future__ import unicode_literals
from django.db import migrations
def set_is_root(state, *args, **kwargs):
OsfStorageFolder = state.get_model('osf', 'osfstoragefolder')
OsfStorageFolder.objects.filter(nodesettings__isnull=False, is_root__isnull=True).update(is_root=True)
def unset_is_root(state, *args, **kwargs):
state.get_model('osf', 'osfstoragefolder').objects.filter(is_root=True).update(is_root=None)
class Migration(migrations.Migration):
dependencies = [
('osf', '0116_merge_20180706_0901'),
]
operations = [
migrations.RunPython(set_is_root, unset_is_root)
]
|
<commit_before><commit_msg>Set is_root=True for old folders set as root_node for existing NodeSettings
[#PLAT-578]<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-11 18:05
from __future__ import unicode_literals
from django.db import migrations
def set_is_root(state, *args, **kwargs):
OsfStorageFolder = state.get_model('osf', 'osfstoragefolder')
OsfStorageFolder.objects.filter(nodesettings__isnull=False, is_root__isnull=True).update(is_root=True)
def unset_is_root(state, *args, **kwargs):
state.get_model('osf', 'osfstoragefolder').objects.filter(is_root=True).update(is_root=None)
class Migration(migrations.Migration):
dependencies = [
('osf', '0116_merge_20180706_0901'),
]
operations = [
migrations.RunPython(set_is_root, unset_is_root)
]
|
Set is_root=True for old folders set as root_node for existing NodeSettings
[#PLAT-578]# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-11 18:05
from __future__ import unicode_literals
from django.db import migrations
def set_is_root(state, *args, **kwargs):
OsfStorageFolder = state.get_model('osf', 'osfstoragefolder')
OsfStorageFolder.objects.filter(nodesettings__isnull=False, is_root__isnull=True).update(is_root=True)
def unset_is_root(state, *args, **kwargs):
state.get_model('osf', 'osfstoragefolder').objects.filter(is_root=True).update(is_root=None)
class Migration(migrations.Migration):
dependencies = [
('osf', '0116_merge_20180706_0901'),
]
operations = [
migrations.RunPython(set_is_root, unset_is_root)
]
|
<commit_before><commit_msg>Set is_root=True for old folders set as root_node for existing NodeSettings
[#PLAT-578]<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-11 18:05
from __future__ import unicode_literals
from django.db import migrations
def set_is_root(state, *args, **kwargs):
OsfStorageFolder = state.get_model('osf', 'osfstoragefolder')
OsfStorageFolder.objects.filter(nodesettings__isnull=False, is_root__isnull=True).update(is_root=True)
def unset_is_root(state, *args, **kwargs):
state.get_model('osf', 'osfstoragefolder').objects.filter(is_root=True).update(is_root=None)
class Migration(migrations.Migration):
dependencies = [
('osf', '0116_merge_20180706_0901'),
]
operations = [
migrations.RunPython(set_is_root, unset_is_root)
]
|
|
71c47c8374cf6c5f53cdfbb71763f165bcd6c013
|
oneflow/base/tests/__init__.py
|
oneflow/base/tests/__init__.py
|
# -*- coding: utf-8 -*-
import redis
from mongoengine.connection import connect, disconnect
from django.conf import settings
TEST_REDIS = redis.StrictRedis(host=settings.REDIS_TEST_HOST,
port=settings.REDIS_TEST_PORT,
db=settings.REDIS_TEST_DB)
def connect_mongodb_testsuite():
disconnect()
connect('{0}_testsuite'.format(settings.MONGODB_NAME))
__all__ = ('TEST_REDIS', 'connect_mongodb_testsuite', )
|
# -*- coding: utf-8 -*-
import redis
from mongoengine.connection import connect, disconnect
from django.conf import settings
TEST_REDIS = redis.StrictRedis(host=settings.REDIS_TEST_HOST,
port=settings.REDIS_TEST_PORT,
db=settings.REDIS_TEST_DB)
def connect_mongodb_testsuite():
disconnect()
connect('{0}_testsuite'.format(settings.MONGODB_NAME),
tz_aware=settings.USE_TZ)
__all__ = ('TEST_REDIS', 'connect_mongodb_testsuite', )
|
Make the test MongoDB database TZ aware like the production one, else some date comparisons fail, whereas they succeed in production.
|
Make the test MongoDB database TZ aware like the production one, else some date comparisons fail, whereas they succeed in production.
|
Python
|
agpl-3.0
|
1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow
|
# -*- coding: utf-8 -*-
import redis
from mongoengine.connection import connect, disconnect
from django.conf import settings
TEST_REDIS = redis.StrictRedis(host=settings.REDIS_TEST_HOST,
port=settings.REDIS_TEST_PORT,
db=settings.REDIS_TEST_DB)
def connect_mongodb_testsuite():
disconnect()
connect('{0}_testsuite'.format(settings.MONGODB_NAME))
__all__ = ('TEST_REDIS', 'connect_mongodb_testsuite', )
Make the test MongoDB database TZ aware like the production one, else some date comparisons fail, whereas they succeed in production.
|
# -*- coding: utf-8 -*-
import redis
from mongoengine.connection import connect, disconnect
from django.conf import settings
TEST_REDIS = redis.StrictRedis(host=settings.REDIS_TEST_HOST,
port=settings.REDIS_TEST_PORT,
db=settings.REDIS_TEST_DB)
def connect_mongodb_testsuite():
disconnect()
connect('{0}_testsuite'.format(settings.MONGODB_NAME),
tz_aware=settings.USE_TZ)
__all__ = ('TEST_REDIS', 'connect_mongodb_testsuite', )
|
<commit_before># -*- coding: utf-8 -*-
import redis
from mongoengine.connection import connect, disconnect
from django.conf import settings
TEST_REDIS = redis.StrictRedis(host=settings.REDIS_TEST_HOST,
port=settings.REDIS_TEST_PORT,
db=settings.REDIS_TEST_DB)
def connect_mongodb_testsuite():
disconnect()
connect('{0}_testsuite'.format(settings.MONGODB_NAME))
__all__ = ('TEST_REDIS', 'connect_mongodb_testsuite', )
<commit_msg>Make the test MongoDB database TZ aware like the production one, else some date comparisons fail, whereas they succeed in production.<commit_after>
|
# -*- coding: utf-8 -*-
import redis
from mongoengine.connection import connect, disconnect
from django.conf import settings
TEST_REDIS = redis.StrictRedis(host=settings.REDIS_TEST_HOST,
port=settings.REDIS_TEST_PORT,
db=settings.REDIS_TEST_DB)
def connect_mongodb_testsuite():
disconnect()
connect('{0}_testsuite'.format(settings.MONGODB_NAME),
tz_aware=settings.USE_TZ)
__all__ = ('TEST_REDIS', 'connect_mongodb_testsuite', )
|
# -*- coding: utf-8 -*-
import redis
from mongoengine.connection import connect, disconnect
from django.conf import settings
TEST_REDIS = redis.StrictRedis(host=settings.REDIS_TEST_HOST,
port=settings.REDIS_TEST_PORT,
db=settings.REDIS_TEST_DB)
def connect_mongodb_testsuite():
disconnect()
connect('{0}_testsuite'.format(settings.MONGODB_NAME))
__all__ = ('TEST_REDIS', 'connect_mongodb_testsuite', )
Make the test MongoDB database TZ aware like the production one, else some date comparisons fail, whereas they succeed in production.# -*- coding: utf-8 -*-
import redis
from mongoengine.connection import connect, disconnect
from django.conf import settings
TEST_REDIS = redis.StrictRedis(host=settings.REDIS_TEST_HOST,
port=settings.REDIS_TEST_PORT,
db=settings.REDIS_TEST_DB)
def connect_mongodb_testsuite():
disconnect()
connect('{0}_testsuite'.format(settings.MONGODB_NAME),
tz_aware=settings.USE_TZ)
__all__ = ('TEST_REDIS', 'connect_mongodb_testsuite', )
|
<commit_before># -*- coding: utf-8 -*-
import redis
from mongoengine.connection import connect, disconnect
from django.conf import settings
TEST_REDIS = redis.StrictRedis(host=settings.REDIS_TEST_HOST,
port=settings.REDIS_TEST_PORT,
db=settings.REDIS_TEST_DB)
def connect_mongodb_testsuite():
disconnect()
connect('{0}_testsuite'.format(settings.MONGODB_NAME))
__all__ = ('TEST_REDIS', 'connect_mongodb_testsuite', )
<commit_msg>Make the test MongoDB database TZ aware like the production one, else some date comparisons fail, whereas they succeed in production.<commit_after># -*- coding: utf-8 -*-
import redis
from mongoengine.connection import connect, disconnect
from django.conf import settings
TEST_REDIS = redis.StrictRedis(host=settings.REDIS_TEST_HOST,
port=settings.REDIS_TEST_PORT,
db=settings.REDIS_TEST_DB)
def connect_mongodb_testsuite():
disconnect()
connect('{0}_testsuite'.format(settings.MONGODB_NAME),
tz_aware=settings.USE_TZ)
__all__ = ('TEST_REDIS', 'connect_mongodb_testsuite', )
|
23c368e149d1cdb49f67a92d020f5eabc8527dd0
|
base.py
|
base.py
|
import abc
from PyQt5 import QtWidgets
class BaseWidgetView(QtWidgets.QWidget):
__metaclass__ = abc.ABCMeta
def notify(self, kwargs):
u"""
Name based notification with callable() validation
"""
func_name = kwargs.pop('func')
func = getattr(self, func_name)
if not func:
raise AttributeError('Unknown function to notify '
'{}'.format(func_name))
if not callable(func):
raise RuntimeError('{} is not a function'.format(func_name))
func(**kwargs.pop('data'))
@classmethod
@abc.abstractmethod
def as_view(cls, parent, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def setup_ui(self):
raise NotImplementedError()
class BaseController(metaclass=abc.ABCMeta):
@classmethod
@abc.abstractmethod
def get_instance(cls, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def pre_switch(self):
raise NotImplementedError()
@abc.abstractmethod
def start(self):
raise NotImplementedError()
|
Refactor of controllers and views into heritage from abstract classes
|
Refactor of controllers and views into heritage from abstract classes
|
Python
|
mit
|
PereBal/advanced-algorithms
|
Refactor of controllers and views into heritage from abstract classes
|
import abc
from PyQt5 import QtWidgets
class BaseWidgetView(QtWidgets.QWidget):
__metaclass__ = abc.ABCMeta
def notify(self, kwargs):
u"""
Name based notification with callable() validation
"""
func_name = kwargs.pop('func')
func = getattr(self, func_name)
if not func:
raise AttributeError('Unknown function to notify '
'{}'.format(func_name))
if not callable(func):
raise RuntimeError('{} is not a function'.format(func_name))
func(**kwargs.pop('data'))
@classmethod
@abc.abstractmethod
def as_view(cls, parent, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def setup_ui(self):
raise NotImplementedError()
class BaseController(metaclass=abc.ABCMeta):
@classmethod
@abc.abstractmethod
def get_instance(cls, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def pre_switch(self):
raise NotImplementedError()
@abc.abstractmethod
def start(self):
raise NotImplementedError()
|
<commit_before><commit_msg>Refactor of controllers and views into heritage from abstract classes<commit_after>
|
import abc
from PyQt5 import QtWidgets
class BaseWidgetView(QtWidgets.QWidget):
__metaclass__ = abc.ABCMeta
def notify(self, kwargs):
u"""
Name based notification with callable() validation
"""
func_name = kwargs.pop('func')
func = getattr(self, func_name)
if not func:
raise AttributeError('Unknown function to notify '
'{}'.format(func_name))
if not callable(func):
raise RuntimeError('{} is not a function'.format(func_name))
func(**kwargs.pop('data'))
@classmethod
@abc.abstractmethod
def as_view(cls, parent, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def setup_ui(self):
raise NotImplementedError()
class BaseController(metaclass=abc.ABCMeta):
@classmethod
@abc.abstractmethod
def get_instance(cls, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def pre_switch(self):
raise NotImplementedError()
@abc.abstractmethod
def start(self):
raise NotImplementedError()
|
Refactor of controllers and views into heritage from abstract classesimport abc
from PyQt5 import QtWidgets
class BaseWidgetView(QtWidgets.QWidget):
__metaclass__ = abc.ABCMeta
def notify(self, kwargs):
u"""
Name based notification with callable() validation
"""
func_name = kwargs.pop('func')
func = getattr(self, func_name)
if not func:
raise AttributeError('Unknown function to notify '
'{}'.format(func_name))
if not callable(func):
raise RuntimeError('{} is not a function'.format(func_name))
func(**kwargs.pop('data'))
@classmethod
@abc.abstractmethod
def as_view(cls, parent, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def setup_ui(self):
raise NotImplementedError()
class BaseController(metaclass=abc.ABCMeta):
@classmethod
@abc.abstractmethod
def get_instance(cls, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def pre_switch(self):
raise NotImplementedError()
@abc.abstractmethod
def start(self):
raise NotImplementedError()
|
<commit_before><commit_msg>Refactor of controllers and views into heritage from abstract classes<commit_after>import abc
from PyQt5 import QtWidgets
class BaseWidgetView(QtWidgets.QWidget):
__metaclass__ = abc.ABCMeta
def notify(self, kwargs):
u"""
Name based notification with callable() validation
"""
func_name = kwargs.pop('func')
func = getattr(self, func_name)
if not func:
raise AttributeError('Unknown function to notify '
'{}'.format(func_name))
if not callable(func):
raise RuntimeError('{} is not a function'.format(func_name))
func(**kwargs.pop('data'))
@classmethod
@abc.abstractmethod
def as_view(cls, parent, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def setup_ui(self):
raise NotImplementedError()
class BaseController(metaclass=abc.ABCMeta):
@classmethod
@abc.abstractmethod
def get_instance(cls, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def pre_switch(self):
raise NotImplementedError()
@abc.abstractmethod
def start(self):
raise NotImplementedError()
|
|
cf5a47dde195152a3f414e20e075694c7bf5e70e
|
examples/concurrent.py
|
examples/concurrent.py
|
try:
import _path
except NameError:
pass
import spyral
import sys
SIZE = (600, 600)
BG_COLOR = (0, 0, 0)
class StupidSprite(spyral.Sprite, spyral.Actor):
def __init__(self, camera):
spyral.Sprite.__init__(self, camera)
spyral.Actor.__init__(self)
self.image = spyral.Image(size=(10, 10))
self.image.fill((255, 255, 255))
self.pos = (0, 0)
self.anchor = 'center'
def main(self, dt):
right = spyral.Animation('x', spyral.animator.Linear(0, 600), duration = 1.0)
down = spyral.Animation('y', spyral.animator.Linear(0, 600), duration = 1.0)
left = spyral.Animation('x', spyral.animator.Linear(600, 0), duration = 1.0)
up = spyral.Animation('y', spyral.animator.Linear(600, 0), duration = 1.0)
while True:
self.run_animation(right)
self.run_animation(down)
self.run_animation(left)
self.run_animation(up)
class Game(spyral.Scene):
def __init__(self):
spyral.Scene.__init__(self)
self.clock.max_ups = 60.
self.camera = self.parent_camera.make_child(SIZE)
self.initialized = False
def add_new_box():
StupidSprite(self.camera)
add_new_box()
self.register('system.quit', sys.exit)
self.register('input.keyboard.down', add_new_box)
def on_enter(self):
if self.initialized:
return
self.initialized = True
bg = spyral.Image(size=SIZE)
bg.fill(BG_COLOR)
self.camera.set_background(bg)
def render(self):
self.camera.draw()
if __name__ == "__main__":
spyral.init()
spyral.director.init(SIZE)
spyral.director.push(Game())
spyral.director.run()
|
Add a simple actor example.
|
Add a simple actor example.
Signed-off-by: Robert Deaton <eb00a885478926d5d594195591fb94a03acb1062@udel.edu>
|
Python
|
lgpl-2.1
|
platipy/spyral
|
Add a simple actor example.
Signed-off-by: Robert Deaton <eb00a885478926d5d594195591fb94a03acb1062@udel.edu>
|
try:
import _path
except NameError:
pass
import spyral
import sys
SIZE = (600, 600)
BG_COLOR = (0, 0, 0)
class StupidSprite(spyral.Sprite, spyral.Actor):
def __init__(self, camera):
spyral.Sprite.__init__(self, camera)
spyral.Actor.__init__(self)
self.image = spyral.Image(size=(10, 10))
self.image.fill((255, 255, 255))
self.pos = (0, 0)
self.anchor = 'center'
def main(self, dt):
right = spyral.Animation('x', spyral.animator.Linear(0, 600), duration = 1.0)
down = spyral.Animation('y', spyral.animator.Linear(0, 600), duration = 1.0)
left = spyral.Animation('x', spyral.animator.Linear(600, 0), duration = 1.0)
up = spyral.Animation('y', spyral.animator.Linear(600, 0), duration = 1.0)
while True:
self.run_animation(right)
self.run_animation(down)
self.run_animation(left)
self.run_animation(up)
class Game(spyral.Scene):
def __init__(self):
spyral.Scene.__init__(self)
self.clock.max_ups = 60.
self.camera = self.parent_camera.make_child(SIZE)
self.initialized = False
def add_new_box():
StupidSprite(self.camera)
add_new_box()
self.register('system.quit', sys.exit)
self.register('input.keyboard.down', add_new_box)
def on_enter(self):
if self.initialized:
return
self.initialized = True
bg = spyral.Image(size=SIZE)
bg.fill(BG_COLOR)
self.camera.set_background(bg)
def render(self):
self.camera.draw()
if __name__ == "__main__":
spyral.init()
spyral.director.init(SIZE)
spyral.director.push(Game())
spyral.director.run()
|
<commit_before><commit_msg>Add a simple actor example.
Signed-off-by: Robert Deaton <eb00a885478926d5d594195591fb94a03acb1062@udel.edu><commit_after>
|
try:
import _path
except NameError:
pass
import spyral
import sys
SIZE = (600, 600)
BG_COLOR = (0, 0, 0)
class StupidSprite(spyral.Sprite, spyral.Actor):
def __init__(self, camera):
spyral.Sprite.__init__(self, camera)
spyral.Actor.__init__(self)
self.image = spyral.Image(size=(10, 10))
self.image.fill((255, 255, 255))
self.pos = (0, 0)
self.anchor = 'center'
def main(self, dt):
right = spyral.Animation('x', spyral.animator.Linear(0, 600), duration = 1.0)
down = spyral.Animation('y', spyral.animator.Linear(0, 600), duration = 1.0)
left = spyral.Animation('x', spyral.animator.Linear(600, 0), duration = 1.0)
up = spyral.Animation('y', spyral.animator.Linear(600, 0), duration = 1.0)
while True:
self.run_animation(right)
self.run_animation(down)
self.run_animation(left)
self.run_animation(up)
class Game(spyral.Scene):
def __init__(self):
spyral.Scene.__init__(self)
self.clock.max_ups = 60.
self.camera = self.parent_camera.make_child(SIZE)
self.initialized = False
def add_new_box():
StupidSprite(self.camera)
add_new_box()
self.register('system.quit', sys.exit)
self.register('input.keyboard.down', add_new_box)
def on_enter(self):
if self.initialized:
return
self.initialized = True
bg = spyral.Image(size=SIZE)
bg.fill(BG_COLOR)
self.camera.set_background(bg)
def render(self):
self.camera.draw()
if __name__ == "__main__":
spyral.init()
spyral.director.init(SIZE)
spyral.director.push(Game())
spyral.director.run()
|
Add a simple actor example.
Signed-off-by: Robert Deaton <eb00a885478926d5d594195591fb94a03acb1062@udel.edu>try:
import _path
except NameError:
pass
import spyral
import sys
SIZE = (600, 600)
BG_COLOR = (0, 0, 0)
class StupidSprite(spyral.Sprite, spyral.Actor):
def __init__(self, camera):
spyral.Sprite.__init__(self, camera)
spyral.Actor.__init__(self)
self.image = spyral.Image(size=(10, 10))
self.image.fill((255, 255, 255))
self.pos = (0, 0)
self.anchor = 'center'
def main(self, dt):
right = spyral.Animation('x', spyral.animator.Linear(0, 600), duration = 1.0)
down = spyral.Animation('y', spyral.animator.Linear(0, 600), duration = 1.0)
left = spyral.Animation('x', spyral.animator.Linear(600, 0), duration = 1.0)
up = spyral.Animation('y', spyral.animator.Linear(600, 0), duration = 1.0)
while True:
self.run_animation(right)
self.run_animation(down)
self.run_animation(left)
self.run_animation(up)
class Game(spyral.Scene):
def __init__(self):
spyral.Scene.__init__(self)
self.clock.max_ups = 60.
self.camera = self.parent_camera.make_child(SIZE)
self.initialized = False
def add_new_box():
StupidSprite(self.camera)
add_new_box()
self.register('system.quit', sys.exit)
self.register('input.keyboard.down', add_new_box)
def on_enter(self):
if self.initialized:
return
self.initialized = True
bg = spyral.Image(size=SIZE)
bg.fill(BG_COLOR)
self.camera.set_background(bg)
def render(self):
self.camera.draw()
if __name__ == "__main__":
spyral.init()
spyral.director.init(SIZE)
spyral.director.push(Game())
spyral.director.run()
|
<commit_before><commit_msg>Add a simple actor example.
Signed-off-by: Robert Deaton <eb00a885478926d5d594195591fb94a03acb1062@udel.edu><commit_after>try:
import _path
except NameError:
pass
import spyral
import sys
SIZE = (600, 600)
BG_COLOR = (0, 0, 0)
class StupidSprite(spyral.Sprite, spyral.Actor):
def __init__(self, camera):
spyral.Sprite.__init__(self, camera)
spyral.Actor.__init__(self)
self.image = spyral.Image(size=(10, 10))
self.image.fill((255, 255, 255))
self.pos = (0, 0)
self.anchor = 'center'
def main(self, dt):
right = spyral.Animation('x', spyral.animator.Linear(0, 600), duration = 1.0)
down = spyral.Animation('y', spyral.animator.Linear(0, 600), duration = 1.0)
left = spyral.Animation('x', spyral.animator.Linear(600, 0), duration = 1.0)
up = spyral.Animation('y', spyral.animator.Linear(600, 0), duration = 1.0)
while True:
self.run_animation(right)
self.run_animation(down)
self.run_animation(left)
self.run_animation(up)
class Game(spyral.Scene):
def __init__(self):
spyral.Scene.__init__(self)
self.clock.max_ups = 60.
self.camera = self.parent_camera.make_child(SIZE)
self.initialized = False
def add_new_box():
StupidSprite(self.camera)
add_new_box()
self.register('system.quit', sys.exit)
self.register('input.keyboard.down', add_new_box)
def on_enter(self):
if self.initialized:
return
self.initialized = True
bg = spyral.Image(size=SIZE)
bg.fill(BG_COLOR)
self.camera.set_background(bg)
def render(self):
self.camera.draw()
if __name__ == "__main__":
spyral.init()
spyral.director.init(SIZE)
spyral.director.push(Game())
spyral.director.run()
|
|
6c7f0006fd6324c544eda48fb9d7dd1316d39e7a
|
samples/forms.py
|
samples/forms.py
|
import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
class Meta:
model = FluVaccine
fields = [
'was_applied',
]
|
import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
class Meta:
model = FluVaccine
fields = [
'was_applied',
]
widgets = {
'was_applied': forms.RadioSelect(
choices=(
(True, "Sim"), (False, "Não"), (None, "Ignorado"),
),
),
}
|
Change Vaccine was_applied widget to RadioSelect
|
:rocket: Change Vaccine was_applied widget to RadioSelect
|
Python
|
mit
|
gems-uff/labsys,gcrsaldanha/fiocruz,gcrsaldanha/fiocruz,gems-uff/labsys,gems-uff/labsys
|
import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
class Meta:
model = FluVaccine
fields = [
'was_applied',
]
:rocket: Change Vaccine was_applied widget to RadioSelect
|
import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
class Meta:
model = FluVaccine
fields = [
'was_applied',
]
widgets = {
'was_applied': forms.RadioSelect(
choices=(
(True, "Sim"), (False, "Não"), (None, "Ignorado"),
),
),
}
|
<commit_before>import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
class Meta:
model = FluVaccine
fields = [
'was_applied',
]
<commit_msg>:rocket: Change Vaccine was_applied widget to RadioSelect<commit_after>
|
import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
class Meta:
model = FluVaccine
fields = [
'was_applied',
]
widgets = {
'was_applied': forms.RadioSelect(
choices=(
(True, "Sim"), (False, "Não"), (None, "Ignorado"),
),
),
}
|
import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
class Meta:
model = FluVaccine
fields = [
'was_applied',
]
:rocket: Change Vaccine was_applied widget to RadioSelectimport datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
class Meta:
model = FluVaccine
fields = [
'was_applied',
]
widgets = {
'was_applied': forms.RadioSelect(
choices=(
(True, "Sim"), (False, "Não"), (None, "Ignorado"),
),
),
}
|
<commit_before>import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
class Meta:
model = FluVaccine
fields = [
'was_applied',
]
<commit_msg>:rocket: Change Vaccine was_applied widget to RadioSelect<commit_after>import datetime #for checking renewal date range.
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from .models import (Patient, AdmissionNote, FluVaccine,
CollectionType, CollectedSample,
Symptom, ObservedSymptom,
)
class AdmissionNoteForm(forms.ModelForm):
class Meta:
model = AdmissionNote
fields = [
'id_gal_origin',
]
class PatientForm(forms.ModelForm):
class Meta:
model = Patient
fields = [
'name',
]
class FluVaccineForm(forms.ModelForm):
class Meta:
model = FluVaccine
fields = [
'was_applied',
]
widgets = {
'was_applied': forms.RadioSelect(
choices=(
(True, "Sim"), (False, "Não"), (None, "Ignorado"),
),
),
}
|
6523578278f93a549b37ca448d7e5140337c1e8d
|
contentcuration/contentcuration/migrations/0103_auto_20190905_0408.py
|
contentcuration/contentcuration/migrations/0103_auto_20190905_0408.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-09-05 04:08
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0102_auto_20190904_1627'),
]
operations = [
migrations.AlterField(
model_name='fileformat',
name='extension',
field=models.CharField(choices=[(b'mp4', b'MP4 Video'), (b'vtt', b'VTT Subtitle'), (b'mp3', b'MP3 Audio'), (b'pdf', b'PDF Document'), (b'jpg', b'JPG Image'), (b'jpeg', b'JPEG Image'), (b'png', b'PNG Image'), (b'gif', b'GIF Image'), (b'json', b'JSON'), (b'svg', b'SVG Image'), (b'perseus', b'Perseus Exercise'), (b'graphie', b'Graphie Exercise'), (b'zip', b'HTML5 Zip'), (b'h5p', b'H5P'), (b'epub', b'ePub Document')], max_length=40, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='formatpreset',
name='id',
field=models.CharField(choices=[(b'high_res_video', b'High Resolution'), (b'low_res_video', b'Low Resolution'), (b'video_thumbnail', b'Thumbnail'), (b'video_subtitle', b'Subtitle'), (b'video_dependency', b'Video (dependency)'), (b'audio', b'Audio'), (b'audio_thumbnail', b'Thumbnail'), (b'document', b'Document'), (b'epub', b'ePub Document'), (b'document_thumbnail', b'Thumbnail'), (b'exercise', b'Exercise'), (b'exercise_thumbnail', b'Thumbnail'), (b'exercise_image', b'Exercise Image'), (b'exercise_graphie', b'Exercise Graphie'), (b'channel_thumbnail', b'Channel Thumbnail'), (b'topic_thumbnail', b'Thumbnail'), (b'html5_zip', b'HTML5 Zip'), (b'html5_dependency', b'HTML5 Dependency (Zip format)'), (b'html5_thumbnail', b'HTML5 Thumbnail'), (b'h5p', b'H5P Zip'), (b'h5p_thumbnail', b'H5P Thumbnail'), (b'slideshow_image', b'Slideshow Image'), (b'slideshow_thumbnail', b'Slideshow Thumbnail'), (b'slideshow_manifest', b'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False),
),
]
|
Add migration for le-utils update.
|
Add migration for le-utils update.
|
Python
|
mit
|
DXCanas/content-curation,DXCanas/content-curation,DXCanas/content-curation,DXCanas/content-curation
|
Add migration for le-utils update.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-09-05 04:08
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0102_auto_20190904_1627'),
]
operations = [
migrations.AlterField(
model_name='fileformat',
name='extension',
field=models.CharField(choices=[(b'mp4', b'MP4 Video'), (b'vtt', b'VTT Subtitle'), (b'mp3', b'MP3 Audio'), (b'pdf', b'PDF Document'), (b'jpg', b'JPG Image'), (b'jpeg', b'JPEG Image'), (b'png', b'PNG Image'), (b'gif', b'GIF Image'), (b'json', b'JSON'), (b'svg', b'SVG Image'), (b'perseus', b'Perseus Exercise'), (b'graphie', b'Graphie Exercise'), (b'zip', b'HTML5 Zip'), (b'h5p', b'H5P'), (b'epub', b'ePub Document')], max_length=40, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='formatpreset',
name='id',
field=models.CharField(choices=[(b'high_res_video', b'High Resolution'), (b'low_res_video', b'Low Resolution'), (b'video_thumbnail', b'Thumbnail'), (b'video_subtitle', b'Subtitle'), (b'video_dependency', b'Video (dependency)'), (b'audio', b'Audio'), (b'audio_thumbnail', b'Thumbnail'), (b'document', b'Document'), (b'epub', b'ePub Document'), (b'document_thumbnail', b'Thumbnail'), (b'exercise', b'Exercise'), (b'exercise_thumbnail', b'Thumbnail'), (b'exercise_image', b'Exercise Image'), (b'exercise_graphie', b'Exercise Graphie'), (b'channel_thumbnail', b'Channel Thumbnail'), (b'topic_thumbnail', b'Thumbnail'), (b'html5_zip', b'HTML5 Zip'), (b'html5_dependency', b'HTML5 Dependency (Zip format)'), (b'html5_thumbnail', b'HTML5 Thumbnail'), (b'h5p', b'H5P Zip'), (b'h5p_thumbnail', b'H5P Thumbnail'), (b'slideshow_image', b'Slideshow Image'), (b'slideshow_thumbnail', b'Slideshow Thumbnail'), (b'slideshow_manifest', b'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False),
),
]
|
<commit_before><commit_msg>Add migration for le-utils update.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-09-05 04:08
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0102_auto_20190904_1627'),
]
operations = [
migrations.AlterField(
model_name='fileformat',
name='extension',
field=models.CharField(choices=[(b'mp4', b'MP4 Video'), (b'vtt', b'VTT Subtitle'), (b'mp3', b'MP3 Audio'), (b'pdf', b'PDF Document'), (b'jpg', b'JPG Image'), (b'jpeg', b'JPEG Image'), (b'png', b'PNG Image'), (b'gif', b'GIF Image'), (b'json', b'JSON'), (b'svg', b'SVG Image'), (b'perseus', b'Perseus Exercise'), (b'graphie', b'Graphie Exercise'), (b'zip', b'HTML5 Zip'), (b'h5p', b'H5P'), (b'epub', b'ePub Document')], max_length=40, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='formatpreset',
name='id',
field=models.CharField(choices=[(b'high_res_video', b'High Resolution'), (b'low_res_video', b'Low Resolution'), (b'video_thumbnail', b'Thumbnail'), (b'video_subtitle', b'Subtitle'), (b'video_dependency', b'Video (dependency)'), (b'audio', b'Audio'), (b'audio_thumbnail', b'Thumbnail'), (b'document', b'Document'), (b'epub', b'ePub Document'), (b'document_thumbnail', b'Thumbnail'), (b'exercise', b'Exercise'), (b'exercise_thumbnail', b'Thumbnail'), (b'exercise_image', b'Exercise Image'), (b'exercise_graphie', b'Exercise Graphie'), (b'channel_thumbnail', b'Channel Thumbnail'), (b'topic_thumbnail', b'Thumbnail'), (b'html5_zip', b'HTML5 Zip'), (b'html5_dependency', b'HTML5 Dependency (Zip format)'), (b'html5_thumbnail', b'HTML5 Thumbnail'), (b'h5p', b'H5P Zip'), (b'h5p_thumbnail', b'H5P Thumbnail'), (b'slideshow_image', b'Slideshow Image'), (b'slideshow_thumbnail', b'Slideshow Thumbnail'), (b'slideshow_manifest', b'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False),
),
]
|
Add migration for le-utils update.# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-09-05 04:08
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0102_auto_20190904_1627'),
]
operations = [
migrations.AlterField(
model_name='fileformat',
name='extension',
field=models.CharField(choices=[(b'mp4', b'MP4 Video'), (b'vtt', b'VTT Subtitle'), (b'mp3', b'MP3 Audio'), (b'pdf', b'PDF Document'), (b'jpg', b'JPG Image'), (b'jpeg', b'JPEG Image'), (b'png', b'PNG Image'), (b'gif', b'GIF Image'), (b'json', b'JSON'), (b'svg', b'SVG Image'), (b'perseus', b'Perseus Exercise'), (b'graphie', b'Graphie Exercise'), (b'zip', b'HTML5 Zip'), (b'h5p', b'H5P'), (b'epub', b'ePub Document')], max_length=40, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='formatpreset',
name='id',
field=models.CharField(choices=[(b'high_res_video', b'High Resolution'), (b'low_res_video', b'Low Resolution'), (b'video_thumbnail', b'Thumbnail'), (b'video_subtitle', b'Subtitle'), (b'video_dependency', b'Video (dependency)'), (b'audio', b'Audio'), (b'audio_thumbnail', b'Thumbnail'), (b'document', b'Document'), (b'epub', b'ePub Document'), (b'document_thumbnail', b'Thumbnail'), (b'exercise', b'Exercise'), (b'exercise_thumbnail', b'Thumbnail'), (b'exercise_image', b'Exercise Image'), (b'exercise_graphie', b'Exercise Graphie'), (b'channel_thumbnail', b'Channel Thumbnail'), (b'topic_thumbnail', b'Thumbnail'), (b'html5_zip', b'HTML5 Zip'), (b'html5_dependency', b'HTML5 Dependency (Zip format)'), (b'html5_thumbnail', b'HTML5 Thumbnail'), (b'h5p', b'H5P Zip'), (b'h5p_thumbnail', b'H5P Thumbnail'), (b'slideshow_image', b'Slideshow Image'), (b'slideshow_thumbnail', b'Slideshow Thumbnail'), (b'slideshow_manifest', b'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False),
),
]
|
<commit_before><commit_msg>Add migration for le-utils update.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-09-05 04:08
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0102_auto_20190904_1627'),
]
operations = [
migrations.AlterField(
model_name='fileformat',
name='extension',
field=models.CharField(choices=[(b'mp4', b'MP4 Video'), (b'vtt', b'VTT Subtitle'), (b'mp3', b'MP3 Audio'), (b'pdf', b'PDF Document'), (b'jpg', b'JPG Image'), (b'jpeg', b'JPEG Image'), (b'png', b'PNG Image'), (b'gif', b'GIF Image'), (b'json', b'JSON'), (b'svg', b'SVG Image'), (b'perseus', b'Perseus Exercise'), (b'graphie', b'Graphie Exercise'), (b'zip', b'HTML5 Zip'), (b'h5p', b'H5P'), (b'epub', b'ePub Document')], max_length=40, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='formatpreset',
name='id',
field=models.CharField(choices=[(b'high_res_video', b'High Resolution'), (b'low_res_video', b'Low Resolution'), (b'video_thumbnail', b'Thumbnail'), (b'video_subtitle', b'Subtitle'), (b'video_dependency', b'Video (dependency)'), (b'audio', b'Audio'), (b'audio_thumbnail', b'Thumbnail'), (b'document', b'Document'), (b'epub', b'ePub Document'), (b'document_thumbnail', b'Thumbnail'), (b'exercise', b'Exercise'), (b'exercise_thumbnail', b'Thumbnail'), (b'exercise_image', b'Exercise Image'), (b'exercise_graphie', b'Exercise Graphie'), (b'channel_thumbnail', b'Channel Thumbnail'), (b'topic_thumbnail', b'Thumbnail'), (b'html5_zip', b'HTML5 Zip'), (b'html5_dependency', b'HTML5 Dependency (Zip format)'), (b'html5_thumbnail', b'HTML5 Thumbnail'), (b'h5p', b'H5P Zip'), (b'h5p_thumbnail', b'H5P Thumbnail'), (b'slideshow_image', b'Slideshow Image'), (b'slideshow_thumbnail', b'Slideshow Thumbnail'), (b'slideshow_manifest', b'Slideshow Manifest')], max_length=150, primary_key=True, serialize=False),
),
]
|
|
0bd2d9c3bcbb1471208d2d05a6fb50137d6ad89b
|
sli/config/scripts/index-misses.py
|
sli/config/scripts/index-misses.py
|
# use sli
# db.system.profile.drop()
# db.createCollection("system.profile", {capped:true, size:1024000000});
# db.setProfilingLevel(2)
#
# mongodump --db sli --collection system.profile
# bsondump dump/sli/system.profile.bson > system.profile.json
# python index-misses.py system.profile.json
import sys
import json
import re
queries = []
missed = 0
if len(sys.argv) > 1:
jsonfile = open(sys.argv[1])
queries = jsonfile.read().splitlines()
else:
print "python index-misses.py system.profile.json"
exit(0)
def missed_index(json_object):
return 'nscanned' in json_object and 'nreturned' in json_object and json_object['nscanned'] > json_object['nreturned']
# iterate through lines of json, looking for query operations, ignoring
# sli.system and sli.custom
for query in queries:
# reformat the date, don't care about the actual value
query = re.sub(r'Date\(\s\d*\s\)', '"date"', query)
if 'sli.system' in query or 'sli.custom' in query:
continue
json_object = json.loads(query)
if missed_index(json_object):
missed += 1
print query
print "\n----------------------------------------"
print "Missed index on %d of %d commands." % (missed, len(queries))
|
Add script to analyze mongo profile and determine index misses.
|
Add script to analyze mongo profile and determine index misses.
|
Python
|
apache-2.0
|
inbloom/secure-data-service,inbloom/secure-data-service,inbloom/secure-data-service,inbloom/secure-data-service,inbloom/secure-data-service
|
Add script to analyze mongo profile and determine index misses.
|
# use sli
# db.system.profile.drop()
# db.createCollection("system.profile", {capped:true, size:1024000000});
# db.setProfilingLevel(2)
#
# mongodump --db sli --collection system.profile
# bsondump dump/sli/system.profile.bson > system.profile.json
# python index-misses.py system.profile.json
import sys
import json
import re
queries = []
missed = 0
if len(sys.argv) > 1:
jsonfile = open(sys.argv[1])
queries = jsonfile.read().splitlines()
else:
print "python index-misses.py system.profile.json"
exit(0)
def missed_index(json_object):
return 'nscanned' in json_object and 'nreturned' in json_object and json_object['nscanned'] > json_object['nreturned']
# iterate through lines of json, looking for query operations, ignoring
# sli.system and sli.custom
for query in queries:
# reformat the date, don't care about the actual value
query = re.sub(r'Date\(\s\d*\s\)', '"date"', query)
if 'sli.system' in query or 'sli.custom' in query:
continue
json_object = json.loads(query)
if missed_index(json_object):
missed += 1
print query
print "\n----------------------------------------"
print "Missed index on %d of %d commands." % (missed, len(queries))
|
<commit_before><commit_msg>Add script to analyze mongo profile and determine index misses.<commit_after>
|
# use sli
# db.system.profile.drop()
# db.createCollection("system.profile", {capped:true, size:1024000000});
# db.setProfilingLevel(2)
#
# mongodump --db sli --collection system.profile
# bsondump dump/sli/system.profile.bson > system.profile.json
# python index-misses.py system.profile.json
import sys
import json
import re
queries = []
missed = 0
if len(sys.argv) > 1:
jsonfile = open(sys.argv[1])
queries = jsonfile.read().splitlines()
else:
print "python index-misses.py system.profile.json"
exit(0)
def missed_index(json_object):
return 'nscanned' in json_object and 'nreturned' in json_object and json_object['nscanned'] > json_object['nreturned']
# iterate through lines of json, looking for query operations, ignoring
# sli.system and sli.custom
for query in queries:
# reformat the date, don't care about the actual value
query = re.sub(r'Date\(\s\d*\s\)', '"date"', query)
if 'sli.system' in query or 'sli.custom' in query:
continue
json_object = json.loads(query)
if missed_index(json_object):
missed += 1
print query
print "\n----------------------------------------"
print "Missed index on %d of %d commands." % (missed, len(queries))
|
Add script to analyze mongo profile and determine index misses.# use sli
# db.system.profile.drop()
# db.createCollection("system.profile", {capped:true, size:1024000000});
# db.setProfilingLevel(2)
#
# mongodump --db sli --collection system.profile
# bsondump dump/sli/system.profile.bson > system.profile.json
# python index-misses.py system.profile.json
import sys
import json
import re
queries = []
missed = 0
if len(sys.argv) > 1:
jsonfile = open(sys.argv[1])
queries = jsonfile.read().splitlines()
else:
print "python index-misses.py system.profile.json"
exit(0)
def missed_index(json_object):
return 'nscanned' in json_object and 'nreturned' in json_object and json_object['nscanned'] > json_object['nreturned']
# iterate through lines of json, looking for query operations, ignoring
# sli.system and sli.custom
for query in queries:
# reformat the date, don't care about the actual value
query = re.sub(r'Date\(\s\d*\s\)', '"date"', query)
if 'sli.system' in query or 'sli.custom' in query:
continue
json_object = json.loads(query)
if missed_index(json_object):
missed += 1
print query
print "\n----------------------------------------"
print "Missed index on %d of %d commands." % (missed, len(queries))
|
<commit_before><commit_msg>Add script to analyze mongo profile and determine index misses.<commit_after># use sli
# db.system.profile.drop()
# db.createCollection("system.profile", {capped:true, size:1024000000});
# db.setProfilingLevel(2)
#
# mongodump --db sli --collection system.profile
# bsondump dump/sli/system.profile.bson > system.profile.json
# python index-misses.py system.profile.json
import sys
import json
import re
queries = []
missed = 0
if len(sys.argv) > 1:
jsonfile = open(sys.argv[1])
queries = jsonfile.read().splitlines()
else:
print "python index-misses.py system.profile.json"
exit(0)
def missed_index(json_object):
return 'nscanned' in json_object and 'nreturned' in json_object and json_object['nscanned'] > json_object['nreturned']
# iterate through lines of json, looking for query operations, ignoring
# sli.system and sli.custom
for query in queries:
# reformat the date, don't care about the actual value
query = re.sub(r'Date\(\s\d*\s\)', '"date"', query)
if 'sli.system' in query or 'sli.custom' in query:
continue
json_object = json.loads(query)
if missed_index(json_object):
missed += 1
print query
print "\n----------------------------------------"
print "Missed index on %d of %d commands." % (missed, len(queries))
|
|
2c83148a2de978382875e6b474c0b3d899a7c5dc
|
swift/__init__.py
|
swift/__init__.py
|
import gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.2', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
|
import gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.3', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
|
Switch trunk to 1.4.3 development
|
Switch trunk to 1.4.3 development
|
Python
|
apache-2.0
|
mja054/swift_plugin,clayg/swift,notmyname/swift,clayg/swift,thiagodasilva/swift,Triv90/SwiftUml,aerwin3/swift,nadeemsyed/swift,Khushbu27/Tutorial,nadeemsyed/swift,tsli/test,ceph/swift,notmyname/swift,dpgoetz/swift,revoer/keystone-8.0.0,revoer/keystone-8.0.0,eatbyte/Swift,aerwin3/swift,SUSE/swift,tsli/test,thiagodasilva/swift,JioCloud/swift,mjwtom/swift,tipabu/swift,NeCTAR-RC/swift,psachin/swift,Akanoa/swift,shibaniahegde/OpenStak_swift,hbhdytf/mac2,daasbank/swift,hbhdytf/mac2,citrix-openstack-build/swift,houseurmusic/my-swift,VictorLowther/swift,notmyname/swift,bouncestorage/swift,gold3bear/swift,ceph/swift,openstack/swift,daasbank/swift,nadeemsyed/swift,openstack/swift,eatbyte/Swift,tipabu/swift,bkolli/swift,iostackproject/IO-Bandwidth-Differentiation,openstack/swift,bkolli/swift,anishnarang/gswift,psachin/swift,NeCTAR-RC/swift,smerritt/swift,clayg/swift,Khushbu27/Tutorial,Intel-bigdata/swift,wenhuizhang/swift,orion/swift-config,levythu/swift,AfonsoFGarcia/swift,mjwtom/swift,sarvesh-ranjan/swift,hurricanerix/swift,redbo/swift,dencaval/swift,openstack/swift,citrix-openstack/build-swift,matthewoliver/swift,zaitcev/swift-lfs,prashanthpai/swift,xiaoguoai/ec-dev-swift,wenhuizhang/swift,larsbutler/swift,dencaval/swift,Akanoa/swift,tipabu/swift,psachin/swift,hbhdytf/mac,zackmdavis/swift,Mirantis/swift-encrypt,psachin/swift,scality/ScalitySproxydSwift,williamthegrey/swift,rackerlabs/swift,smerritt/swift,iostackproject/IO-Bandwidth-Differentiation,orion/swift-config,zaitcev/swift-lfs,hbhdytf/mac2,JioCloud/swift,hurricanerix/swift,NewpTone/StackLab-swift,IPVL/swift-kilo,Em-Pan/swift,levythu/swift,mjzmjz/swift,hbhdytf/mac2,swiftstack/swift,takeshineshiro/swift,Mirantis/swift-encrypt,redbo/swift,nadeemsyed/swift,mjzmjz/swift,dpgoetz/swift,IPVL/swift-kilo,smerritt/swift,anishnarang/gswift,takeshineshiro/swift,tipabu/swift,AfonsoFGarcia/swift,Em-Pan/swift,hurricanerix/swift,scality/ScalitySproxydSwift,swiftstack/swift,mja054/swift_plugin,Seagate/swift,larsbutler/swift,SUSE/swift,shibaniahegde/OpenStak_swift,notmyname/swift,swiftstack/swift,xiaoguoai/ec-dev-swift,gold3bear/swift,NewpTone/StackLab-swift,sarvesh-ranjan/swift,citrix-openstack-build/swift,mja054/swift_plugin,redhat-openstack/swift,Triv90/SwiftUml,Intel-bigdata/swift,prashanthpai/swift,bradleypj823/swift,maginatics/swift,houseurmusic/my-swift,clayg/swift,hurricanerix/swift,maginatics/swift,VictorLowther/swift,hbhdytf/mac,bradleypj823/swift,williamthegrey/swift,zackmdavis/swift,citrix-openstack/build-swift,redhat-openstack/swift,Seagate/swift,rackerlabs/swift,matthewoliver/swift,bouncestorage/swift,smerritt/swift,matthewoliver/swift,matthewoliver/swift
|
import gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.2', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
Switch trunk to 1.4.3 development
|
import gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.3', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
|
<commit_before>import gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.2', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
<commit_msg>Switch trunk to 1.4.3 development<commit_after>
|
import gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.3', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
|
import gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.2', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
Switch trunk to 1.4.3 developmentimport gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.3', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
|
<commit_before>import gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.2', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
<commit_msg>Switch trunk to 1.4.3 development<commit_after>import gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.3', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
|
7930fd15a3077764be4b7f1ab7953dba1a97905e
|
event/example/linkevent.py
|
event/example/linkevent.py
|
#! /usr/local/bin/python
import sys
import time
sys.path.append("/usr/testbed/lib")
from tbevent import *
server = "event-server"
port = None
keyfile = "/proj/emulab-ops/exp/one-node/tbdata/eventkey"
# Construct a regular client. Do this only once.
ec = EventClient(server=server, port=port, url=None, keyfile=keyfile)
#
# Allocate and initialize an address tuple like any other python object.
# You can reuse this tuple.
#
at = address_tuple()
at.objname = "link0"
at.eventtype = "modify"
at.expt = "emulab-ops/one-node"
# ... create our notification from the tuple.
note = ec.create_notification(at)
# Add extra arguments to the notification.
note.setArguments("bandwith=1000 delay=13");
# Schedule the notification for right now.
tval = timeval();
tval.tv_sec = long(time.time())
tval.tv_usec = 0;
# And Fire it.
ec.schedule(note, tval)
# Delete the notification.
del note
|
Add an example of sending a link event written in python.
|
Add an example of sending a link event written in python.
|
Python
|
agpl-3.0
|
nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome,nmc-probe/emulab-nome
|
Add an example of sending a link event written in python.
|
#! /usr/local/bin/python
import sys
import time
sys.path.append("/usr/testbed/lib")
from tbevent import *
server = "event-server"
port = None
keyfile = "/proj/emulab-ops/exp/one-node/tbdata/eventkey"
# Construct a regular client. Do this only once.
ec = EventClient(server=server, port=port, url=None, keyfile=keyfile)
#
# Allocate and initialize an address tuple like any other python object.
# You can reuse this tuple.
#
at = address_tuple()
at.objname = "link0"
at.eventtype = "modify"
at.expt = "emulab-ops/one-node"
# ... create our notification from the tuple.
note = ec.create_notification(at)
# Add extra arguments to the notification.
note.setArguments("bandwith=1000 delay=13");
# Schedule the notification for right now.
tval = timeval();
tval.tv_sec = long(time.time())
tval.tv_usec = 0;
# And Fire it.
ec.schedule(note, tval)
# Delete the notification.
del note
|
<commit_before><commit_msg>Add an example of sending a link event written in python.<commit_after>
|
#! /usr/local/bin/python
import sys
import time
sys.path.append("/usr/testbed/lib")
from tbevent import *
server = "event-server"
port = None
keyfile = "/proj/emulab-ops/exp/one-node/tbdata/eventkey"
# Construct a regular client. Do this only once.
ec = EventClient(server=server, port=port, url=None, keyfile=keyfile)
#
# Allocate and initialize an address tuple like any other python object.
# You can reuse this tuple.
#
at = address_tuple()
at.objname = "link0"
at.eventtype = "modify"
at.expt = "emulab-ops/one-node"
# ... create our notification from the tuple.
note = ec.create_notification(at)
# Add extra arguments to the notification.
note.setArguments("bandwith=1000 delay=13");
# Schedule the notification for right now.
tval = timeval();
tval.tv_sec = long(time.time())
tval.tv_usec = 0;
# And Fire it.
ec.schedule(note, tval)
# Delete the notification.
del note
|
Add an example of sending a link event written in python.#! /usr/local/bin/python
import sys
import time
sys.path.append("/usr/testbed/lib")
from tbevent import *
server = "event-server"
port = None
keyfile = "/proj/emulab-ops/exp/one-node/tbdata/eventkey"
# Construct a regular client. Do this only once.
ec = EventClient(server=server, port=port, url=None, keyfile=keyfile)
#
# Allocate and initialize an address tuple like any other python object.
# You can reuse this tuple.
#
at = address_tuple()
at.objname = "link0"
at.eventtype = "modify"
at.expt = "emulab-ops/one-node"
# ... create our notification from the tuple.
note = ec.create_notification(at)
# Add extra arguments to the notification.
note.setArguments("bandwith=1000 delay=13");
# Schedule the notification for right now.
tval = timeval();
tval.tv_sec = long(time.time())
tval.tv_usec = 0;
# And Fire it.
ec.schedule(note, tval)
# Delete the notification.
del note
|
<commit_before><commit_msg>Add an example of sending a link event written in python.<commit_after>#! /usr/local/bin/python
import sys
import time
sys.path.append("/usr/testbed/lib")
from tbevent import *
server = "event-server"
port = None
keyfile = "/proj/emulab-ops/exp/one-node/tbdata/eventkey"
# Construct a regular client. Do this only once.
ec = EventClient(server=server, port=port, url=None, keyfile=keyfile)
#
# Allocate and initialize an address tuple like any other python object.
# You can reuse this tuple.
#
at = address_tuple()
at.objname = "link0"
at.eventtype = "modify"
at.expt = "emulab-ops/one-node"
# ... create our notification from the tuple.
note = ec.create_notification(at)
# Add extra arguments to the notification.
note.setArguments("bandwith=1000 delay=13");
# Schedule the notification for right now.
tval = timeval();
tval.tv_sec = long(time.time())
tval.tv_usec = 0;
# And Fire it.
ec.schedule(note, tval)
# Delete the notification.
del note
|
|
90d4128a13029ca56901e901d381d6fadfd376f5
|
tools/json2csv.py
|
tools/json2csv.py
|
#!/usr/bin/env python
from pathlib import Path
from collections import defaultdict
import json
import numpy as np
import pandas as pd
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
parser = ArgumentParser(description='MRIQC-WebAPI: massaging bson dumps',
formatter_class=RawTextHelpFormatter)
parser.add_argument('input_file', action='store', type=Path, help='input')
parser.add_argument('output_file', action='store', type=Path, help='output')
return parser
def main():
args = get_parser().parse_args()
dictlist = []
with args.input_file.open() as f:
for entry in f.readlines():
entry = entry.strip().replace('+Infinity', '"+Infinity"')
entry = entry.replace('-Infinity', '"-Infinity"')
try:
dictlist += [json.loads(entry.strip())]
except:
print("Error reading: ", entry.strip())
def emptylist():
"""Create an array of NaNs of the same size as currently
stored entries"""
return [np.nan] * max(0, len(data.get('md5sum', [])) - 1)
data = defaultdict(emptylist, {})
for entry in dictlist:
md5sum = entry['provenance']['md5sum']
if not md5sum:
continue
data['md5sum'] += [md5sum]
keys = []
for key, val in entry.items():
if isinstance(val, dict):
for subkey, subval in val.items():
if subkey == 'md5sum':
continue
keys.append('_'.join((key, subkey)))
data['_'.join((key, subkey))] += [subval]
else:
data[key] += [val]
keys.append(key)
# Fill with nans existing keys without value in the current record
missing = list(set(list(data.keys())) - set(keys + ['md5sum']))
for k in missing:
data[k] += [np.nan]
df = pd.DataFrame(data)
origcols = df.columns.ravel().tolist()
iqms = list(set(origcols) - set(['md5sum'] + [k for k in origcols if k.startswith('bids') or k.startswith('provenance') or k.startswith('_')]))
cols = ['md5sum', '_created_$date'] + iqms + list(sorted(
[k for k in origcols if k.startswith('provenance') or k.startswith('bids')]))
df = df[cols]
df.columns = ['created' if k == '_created_$date' else k for k in cols]
df = df.drop_duplicates(subset=['md5sum'])
df.to_csv(str(args.output_file), index=None)
return 0
if __name__ == '__main__':
main()
|
Add a script to generate csv files
|
[ENH] Add a script to generate csv files
First, convert bson to json using mongodb's tool. Second, use this
script to generate csv tables.
|
Python
|
apache-2.0
|
poldracklab/mriqcwebapi
|
[ENH] Add a script to generate csv files
First, convert bson to json using mongodb's tool. Second, use this
script to generate csv tables.
|
#!/usr/bin/env python
from pathlib import Path
from collections import defaultdict
import json
import numpy as np
import pandas as pd
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
parser = ArgumentParser(description='MRIQC-WebAPI: massaging bson dumps',
formatter_class=RawTextHelpFormatter)
parser.add_argument('input_file', action='store', type=Path, help='input')
parser.add_argument('output_file', action='store', type=Path, help='output')
return parser
def main():
args = get_parser().parse_args()
dictlist = []
with args.input_file.open() as f:
for entry in f.readlines():
entry = entry.strip().replace('+Infinity', '"+Infinity"')
entry = entry.replace('-Infinity', '"-Infinity"')
try:
dictlist += [json.loads(entry.strip())]
except:
print("Error reading: ", entry.strip())
def emptylist():
"""Create an array of NaNs of the same size as currently
stored entries"""
return [np.nan] * max(0, len(data.get('md5sum', [])) - 1)
data = defaultdict(emptylist, {})
for entry in dictlist:
md5sum = entry['provenance']['md5sum']
if not md5sum:
continue
data['md5sum'] += [md5sum]
keys = []
for key, val in entry.items():
if isinstance(val, dict):
for subkey, subval in val.items():
if subkey == 'md5sum':
continue
keys.append('_'.join((key, subkey)))
data['_'.join((key, subkey))] += [subval]
else:
data[key] += [val]
keys.append(key)
# Fill with nans existing keys without value in the current record
missing = list(set(list(data.keys())) - set(keys + ['md5sum']))
for k in missing:
data[k] += [np.nan]
df = pd.DataFrame(data)
origcols = df.columns.ravel().tolist()
iqms = list(set(origcols) - set(['md5sum'] + [k for k in origcols if k.startswith('bids') or k.startswith('provenance') or k.startswith('_')]))
cols = ['md5sum', '_created_$date'] + iqms + list(sorted(
[k for k in origcols if k.startswith('provenance') or k.startswith('bids')]))
df = df[cols]
df.columns = ['created' if k == '_created_$date' else k for k in cols]
df = df.drop_duplicates(subset=['md5sum'])
df.to_csv(str(args.output_file), index=None)
return 0
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>[ENH] Add a script to generate csv files
First, convert bson to json using mongodb's tool. Second, use this
script to generate csv tables.<commit_after>
|
#!/usr/bin/env python
from pathlib import Path
from collections import defaultdict
import json
import numpy as np
import pandas as pd
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
parser = ArgumentParser(description='MRIQC-WebAPI: massaging bson dumps',
formatter_class=RawTextHelpFormatter)
parser.add_argument('input_file', action='store', type=Path, help='input')
parser.add_argument('output_file', action='store', type=Path, help='output')
return parser
def main():
args = get_parser().parse_args()
dictlist = []
with args.input_file.open() as f:
for entry in f.readlines():
entry = entry.strip().replace('+Infinity', '"+Infinity"')
entry = entry.replace('-Infinity', '"-Infinity"')
try:
dictlist += [json.loads(entry.strip())]
except:
print("Error reading: ", entry.strip())
def emptylist():
"""Create an array of NaNs of the same size as currently
stored entries"""
return [np.nan] * max(0, len(data.get('md5sum', [])) - 1)
data = defaultdict(emptylist, {})
for entry in dictlist:
md5sum = entry['provenance']['md5sum']
if not md5sum:
continue
data['md5sum'] += [md5sum]
keys = []
for key, val in entry.items():
if isinstance(val, dict):
for subkey, subval in val.items():
if subkey == 'md5sum':
continue
keys.append('_'.join((key, subkey)))
data['_'.join((key, subkey))] += [subval]
else:
data[key] += [val]
keys.append(key)
# Fill with nans existing keys without value in the current record
missing = list(set(list(data.keys())) - set(keys + ['md5sum']))
for k in missing:
data[k] += [np.nan]
df = pd.DataFrame(data)
origcols = df.columns.ravel().tolist()
iqms = list(set(origcols) - set(['md5sum'] + [k for k in origcols if k.startswith('bids') or k.startswith('provenance') or k.startswith('_')]))
cols = ['md5sum', '_created_$date'] + iqms + list(sorted(
[k for k in origcols if k.startswith('provenance') or k.startswith('bids')]))
df = df[cols]
df.columns = ['created' if k == '_created_$date' else k for k in cols]
df = df.drop_duplicates(subset=['md5sum'])
df.to_csv(str(args.output_file), index=None)
return 0
if __name__ == '__main__':
main()
|
[ENH] Add a script to generate csv files
First, convert bson to json using mongodb's tool. Second, use this
script to generate csv tables.#!/usr/bin/env python
from pathlib import Path
from collections import defaultdict
import json
import numpy as np
import pandas as pd
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
parser = ArgumentParser(description='MRIQC-WebAPI: massaging bson dumps',
formatter_class=RawTextHelpFormatter)
parser.add_argument('input_file', action='store', type=Path, help='input')
parser.add_argument('output_file', action='store', type=Path, help='output')
return parser
def main():
args = get_parser().parse_args()
dictlist = []
with args.input_file.open() as f:
for entry in f.readlines():
entry = entry.strip().replace('+Infinity', '"+Infinity"')
entry = entry.replace('-Infinity', '"-Infinity"')
try:
dictlist += [json.loads(entry.strip())]
except:
print("Error reading: ", entry.strip())
def emptylist():
"""Create an array of NaNs of the same size as currently
stored entries"""
return [np.nan] * max(0, len(data.get('md5sum', [])) - 1)
data = defaultdict(emptylist, {})
for entry in dictlist:
md5sum = entry['provenance']['md5sum']
if not md5sum:
continue
data['md5sum'] += [md5sum]
keys = []
for key, val in entry.items():
if isinstance(val, dict):
for subkey, subval in val.items():
if subkey == 'md5sum':
continue
keys.append('_'.join((key, subkey)))
data['_'.join((key, subkey))] += [subval]
else:
data[key] += [val]
keys.append(key)
# Fill with nans existing keys without value in the current record
missing = list(set(list(data.keys())) - set(keys + ['md5sum']))
for k in missing:
data[k] += [np.nan]
df = pd.DataFrame(data)
origcols = df.columns.ravel().tolist()
iqms = list(set(origcols) - set(['md5sum'] + [k for k in origcols if k.startswith('bids') or k.startswith('provenance') or k.startswith('_')]))
cols = ['md5sum', '_created_$date'] + iqms + list(sorted(
[k for k in origcols if k.startswith('provenance') or k.startswith('bids')]))
df = df[cols]
df.columns = ['created' if k == '_created_$date' else k for k in cols]
df = df.drop_duplicates(subset=['md5sum'])
df.to_csv(str(args.output_file), index=None)
return 0
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>[ENH] Add a script to generate csv files
First, convert bson to json using mongodb's tool. Second, use this
script to generate csv tables.<commit_after>#!/usr/bin/env python
from pathlib import Path
from collections import defaultdict
import json
import numpy as np
import pandas as pd
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
parser = ArgumentParser(description='MRIQC-WebAPI: massaging bson dumps',
formatter_class=RawTextHelpFormatter)
parser.add_argument('input_file', action='store', type=Path, help='input')
parser.add_argument('output_file', action='store', type=Path, help='output')
return parser
def main():
args = get_parser().parse_args()
dictlist = []
with args.input_file.open() as f:
for entry in f.readlines():
entry = entry.strip().replace('+Infinity', '"+Infinity"')
entry = entry.replace('-Infinity', '"-Infinity"')
try:
dictlist += [json.loads(entry.strip())]
except:
print("Error reading: ", entry.strip())
def emptylist():
"""Create an array of NaNs of the same size as currently
stored entries"""
return [np.nan] * max(0, len(data.get('md5sum', [])) - 1)
data = defaultdict(emptylist, {})
for entry in dictlist:
md5sum = entry['provenance']['md5sum']
if not md5sum:
continue
data['md5sum'] += [md5sum]
keys = []
for key, val in entry.items():
if isinstance(val, dict):
for subkey, subval in val.items():
if subkey == 'md5sum':
continue
keys.append('_'.join((key, subkey)))
data['_'.join((key, subkey))] += [subval]
else:
data[key] += [val]
keys.append(key)
# Fill with nans existing keys without value in the current record
missing = list(set(list(data.keys())) - set(keys + ['md5sum']))
for k in missing:
data[k] += [np.nan]
df = pd.DataFrame(data)
origcols = df.columns.ravel().tolist()
iqms = list(set(origcols) - set(['md5sum'] + [k for k in origcols if k.startswith('bids') or k.startswith('provenance') or k.startswith('_')]))
cols = ['md5sum', '_created_$date'] + iqms + list(sorted(
[k for k in origcols if k.startswith('provenance') or k.startswith('bids')]))
df = df[cols]
df.columns = ['created' if k == '_created_$date' else k for k in cols]
df = df.drop_duplicates(subset=['md5sum'])
df.to_csv(str(args.output_file), index=None)
return 0
if __name__ == '__main__':
main()
|
|
54551601b9b89e41760110dbf6351f94db633f95
|
startup/GafferOSL/oslHideShaders.py
|
startup/GafferOSL/oslHideShaders.py
|
##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferSceneUI
import IECore
toHide = IECore.PathMatcher()
toHide.addPath( "ObjectProcessing/Out*.oso" )
GafferSceneUI.ShaderUI.hideShaders( toHide )
|
Hide deprecated shaders used by old OSLObject
|
Hide deprecated shaders used by old OSLObject
|
Python
|
bsd-3-clause
|
hradec/gaffer,hradec/gaffer,GafferHQ/gaffer,appleseedhq/gaffer,johnhaddon/gaffer,boberfly/gaffer,johnhaddon/gaffer,andrewkaufman/gaffer,hradec/gaffer,GafferHQ/gaffer,andrewkaufman/gaffer,lucienfostier/gaffer,ImageEngine/gaffer,appleseedhq/gaffer,lucienfostier/gaffer,hradec/gaffer,lucienfostier/gaffer,andrewkaufman/gaffer,ImageEngine/gaffer,andrewkaufman/gaffer,boberfly/gaffer,johnhaddon/gaffer,GafferHQ/gaffer,andrewkaufman/gaffer,johnhaddon/gaffer,lucienfostier/gaffer,johnhaddon/gaffer,GafferHQ/gaffer,GafferHQ/gaffer,appleseedhq/gaffer,boberfly/gaffer,hradec/gaffer,appleseedhq/gaffer,ImageEngine/gaffer,boberfly/gaffer,ImageEngine/gaffer
|
Hide deprecated shaders used by old OSLObject
|
##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferSceneUI
import IECore
toHide = IECore.PathMatcher()
toHide.addPath( "ObjectProcessing/Out*.oso" )
GafferSceneUI.ShaderUI.hideShaders( toHide )
|
<commit_before><commit_msg>Hide deprecated shaders used by old OSLObject<commit_after>
|
##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferSceneUI
import IECore
toHide = IECore.PathMatcher()
toHide.addPath( "ObjectProcessing/Out*.oso" )
GafferSceneUI.ShaderUI.hideShaders( toHide )
|
Hide deprecated shaders used by old OSLObject##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferSceneUI
import IECore
toHide = IECore.PathMatcher()
toHide.addPath( "ObjectProcessing/Out*.oso" )
GafferSceneUI.ShaderUI.hideShaders( toHide )
|
<commit_before><commit_msg>Hide deprecated shaders used by old OSLObject<commit_after>##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferSceneUI
import IECore
toHide = IECore.PathMatcher()
toHide.addPath( "ObjectProcessing/Out*.oso" )
GafferSceneUI.ShaderUI.hideShaders( toHide )
|
|
ca08020793926adad524ddab6ccb5dee8d3e2cc5
|
tw-cover-comp.py
|
tw-cover-comp.py
|
#!/usr/bin/env python3
import sys
from PIL import Image
COVER_HEIGHT = 480
BORDER_WIDTH = 10
BACKGROUND_COLOR = (39, 46, 111, 0)
def gather_covers(input_files):
'''Given a list of files, return a list of resized RGB images'''
result = []
for input_file in sys.argv[1:]:
img = Image.open(input_file)
orig_width, orig_height = img.size
new_height = COVER_HEIGHT
new_width = COVER_HEIGHT * (orig_width / orig_height)
img = img.resize((int(new_width), int(new_height)), Image.ANTIALIAS)
if img.mode == 'P':
img = img.convert(mode='RGB', dither=None)
result.append(img)
return result
def get_composite_width(covers):
'''Given a list of images, return composite width including borders'''
width = BORDER_WIDTH
for cover in covers:
width += cover.size[0] + BORDER_WIDTH
return width
def set_up_cover_paste(cover, x_offset):
'''Given cover image and offset, define inputs for next cover placement'''
crop_frame = (0, 0, cover.size[0], cover.size[1])
cover_region = cover.crop(crop_frame)
paste_frame = \
(x_offset, BORDER_WIDTH,
x_offset + cover.size[0], COVER_HEIGHT + BORDER_WIDTH)
return (cover_region, paste_frame)
if __name__ == '__main__':
covers = gather_covers(sys.argv[1:])
composite_width = get_composite_width(covers)
composite = Image.new(
mode='RGB',
size=(composite_width, COVER_HEIGHT + (2 * BORDER_WIDTH)),
color=BACKGROUND_COLOR)
x_offset = BORDER_WIDTH
for idx, cover in enumerate(covers):
cover_region, paste_frame = set_up_cover_paste(cover, x_offset)
composite.paste(cover_region, paste_frame)
# Move next x draw position over one cover and one border
x_offset += cover.size[0] + BORDER_WIDTH
composite.save('twitter-cover-image.jpg', 'JPEG', quality=70)
|
Add script to create cover composite images for Twitter posts
|
Add script to create cover composite images for Twitter posts
|
Python
|
mit
|
Backlist/backlist-workflows
|
Add script to create cover composite images for Twitter posts
|
#!/usr/bin/env python3
import sys
from PIL import Image
COVER_HEIGHT = 480
BORDER_WIDTH = 10
BACKGROUND_COLOR = (39, 46, 111, 0)
def gather_covers(input_files):
'''Given a list of files, return a list of resized RGB images'''
result = []
for input_file in sys.argv[1:]:
img = Image.open(input_file)
orig_width, orig_height = img.size
new_height = COVER_HEIGHT
new_width = COVER_HEIGHT * (orig_width / orig_height)
img = img.resize((int(new_width), int(new_height)), Image.ANTIALIAS)
if img.mode == 'P':
img = img.convert(mode='RGB', dither=None)
result.append(img)
return result
def get_composite_width(covers):
'''Given a list of images, return composite width including borders'''
width = BORDER_WIDTH
for cover in covers:
width += cover.size[0] + BORDER_WIDTH
return width
def set_up_cover_paste(cover, x_offset):
'''Given cover image and offset, define inputs for next cover placement'''
crop_frame = (0, 0, cover.size[0], cover.size[1])
cover_region = cover.crop(crop_frame)
paste_frame = \
(x_offset, BORDER_WIDTH,
x_offset + cover.size[0], COVER_HEIGHT + BORDER_WIDTH)
return (cover_region, paste_frame)
if __name__ == '__main__':
covers = gather_covers(sys.argv[1:])
composite_width = get_composite_width(covers)
composite = Image.new(
mode='RGB',
size=(composite_width, COVER_HEIGHT + (2 * BORDER_WIDTH)),
color=BACKGROUND_COLOR)
x_offset = BORDER_WIDTH
for idx, cover in enumerate(covers):
cover_region, paste_frame = set_up_cover_paste(cover, x_offset)
composite.paste(cover_region, paste_frame)
# Move next x draw position over one cover and one border
x_offset += cover.size[0] + BORDER_WIDTH
composite.save('twitter-cover-image.jpg', 'JPEG', quality=70)
|
<commit_before><commit_msg>Add script to create cover composite images for Twitter posts<commit_after>
|
#!/usr/bin/env python3
import sys
from PIL import Image
COVER_HEIGHT = 480
BORDER_WIDTH = 10
BACKGROUND_COLOR = (39, 46, 111, 0)
def gather_covers(input_files):
'''Given a list of files, return a list of resized RGB images'''
result = []
for input_file in sys.argv[1:]:
img = Image.open(input_file)
orig_width, orig_height = img.size
new_height = COVER_HEIGHT
new_width = COVER_HEIGHT * (orig_width / orig_height)
img = img.resize((int(new_width), int(new_height)), Image.ANTIALIAS)
if img.mode == 'P':
img = img.convert(mode='RGB', dither=None)
result.append(img)
return result
def get_composite_width(covers):
'''Given a list of images, return composite width including borders'''
width = BORDER_WIDTH
for cover in covers:
width += cover.size[0] + BORDER_WIDTH
return width
def set_up_cover_paste(cover, x_offset):
'''Given cover image and offset, define inputs for next cover placement'''
crop_frame = (0, 0, cover.size[0], cover.size[1])
cover_region = cover.crop(crop_frame)
paste_frame = \
(x_offset, BORDER_WIDTH,
x_offset + cover.size[0], COVER_HEIGHT + BORDER_WIDTH)
return (cover_region, paste_frame)
if __name__ == '__main__':
covers = gather_covers(sys.argv[1:])
composite_width = get_composite_width(covers)
composite = Image.new(
mode='RGB',
size=(composite_width, COVER_HEIGHT + (2 * BORDER_WIDTH)),
color=BACKGROUND_COLOR)
x_offset = BORDER_WIDTH
for idx, cover in enumerate(covers):
cover_region, paste_frame = set_up_cover_paste(cover, x_offset)
composite.paste(cover_region, paste_frame)
# Move next x draw position over one cover and one border
x_offset += cover.size[0] + BORDER_WIDTH
composite.save('twitter-cover-image.jpg', 'JPEG', quality=70)
|
Add script to create cover composite images for Twitter posts#!/usr/bin/env python3
import sys
from PIL import Image
COVER_HEIGHT = 480
BORDER_WIDTH = 10
BACKGROUND_COLOR = (39, 46, 111, 0)
def gather_covers(input_files):
'''Given a list of files, return a list of resized RGB images'''
result = []
for input_file in sys.argv[1:]:
img = Image.open(input_file)
orig_width, orig_height = img.size
new_height = COVER_HEIGHT
new_width = COVER_HEIGHT * (orig_width / orig_height)
img = img.resize((int(new_width), int(new_height)), Image.ANTIALIAS)
if img.mode == 'P':
img = img.convert(mode='RGB', dither=None)
result.append(img)
return result
def get_composite_width(covers):
'''Given a list of images, return composite width including borders'''
width = BORDER_WIDTH
for cover in covers:
width += cover.size[0] + BORDER_WIDTH
return width
def set_up_cover_paste(cover, x_offset):
'''Given cover image and offset, define inputs for next cover placement'''
crop_frame = (0, 0, cover.size[0], cover.size[1])
cover_region = cover.crop(crop_frame)
paste_frame = \
(x_offset, BORDER_WIDTH,
x_offset + cover.size[0], COVER_HEIGHT + BORDER_WIDTH)
return (cover_region, paste_frame)
if __name__ == '__main__':
covers = gather_covers(sys.argv[1:])
composite_width = get_composite_width(covers)
composite = Image.new(
mode='RGB',
size=(composite_width, COVER_HEIGHT + (2 * BORDER_WIDTH)),
color=BACKGROUND_COLOR)
x_offset = BORDER_WIDTH
for idx, cover in enumerate(covers):
cover_region, paste_frame = set_up_cover_paste(cover, x_offset)
composite.paste(cover_region, paste_frame)
# Move next x draw position over one cover and one border
x_offset += cover.size[0] + BORDER_WIDTH
composite.save('twitter-cover-image.jpg', 'JPEG', quality=70)
|
<commit_before><commit_msg>Add script to create cover composite images for Twitter posts<commit_after>#!/usr/bin/env python3
import sys
from PIL import Image
COVER_HEIGHT = 480
BORDER_WIDTH = 10
BACKGROUND_COLOR = (39, 46, 111, 0)
def gather_covers(input_files):
'''Given a list of files, return a list of resized RGB images'''
result = []
for input_file in sys.argv[1:]:
img = Image.open(input_file)
orig_width, orig_height = img.size
new_height = COVER_HEIGHT
new_width = COVER_HEIGHT * (orig_width / orig_height)
img = img.resize((int(new_width), int(new_height)), Image.ANTIALIAS)
if img.mode == 'P':
img = img.convert(mode='RGB', dither=None)
result.append(img)
return result
def get_composite_width(covers):
'''Given a list of images, return composite width including borders'''
width = BORDER_WIDTH
for cover in covers:
width += cover.size[0] + BORDER_WIDTH
return width
def set_up_cover_paste(cover, x_offset):
'''Given cover image and offset, define inputs for next cover placement'''
crop_frame = (0, 0, cover.size[0], cover.size[1])
cover_region = cover.crop(crop_frame)
paste_frame = \
(x_offset, BORDER_WIDTH,
x_offset + cover.size[0], COVER_HEIGHT + BORDER_WIDTH)
return (cover_region, paste_frame)
if __name__ == '__main__':
covers = gather_covers(sys.argv[1:])
composite_width = get_composite_width(covers)
composite = Image.new(
mode='RGB',
size=(composite_width, COVER_HEIGHT + (2 * BORDER_WIDTH)),
color=BACKGROUND_COLOR)
x_offset = BORDER_WIDTH
for idx, cover in enumerate(covers):
cover_region, paste_frame = set_up_cover_paste(cover, x_offset)
composite.paste(cover_region, paste_frame)
# Move next x draw position over one cover and one border
x_offset += cover.size[0] + BORDER_WIDTH
composite.save('twitter-cover-image.jpg', 'JPEG', quality=70)
|
|
082f515391f116728c892389d594521520fe2f1a
|
firecares/firestation/migrations/0014_auto_20150901_1052.py
|
firecares/firestation/migrations/0014_auto_20150901_1052.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('firestation', '0013_populationclass0quartile_populationclass1quartile_populationclass2quartile_populationclass3quartile_'),
]
operations = [
migrations.AlterField(
model_name='staffing',
name='chief_officer',
field=models.PositiveIntegerField(default=0, null=True, verbose_name=b'Chief Officer', blank=True, validators=[django.core.validators.MaxValueValidator(99)]),
),
]
|
Add migration to fix misspelling in verbose name.
|
Add migration to fix misspelling in verbose name.
|
Python
|
mit
|
HunterConnelly/firecares,garnertb/firecares,garnertb/firecares,ROGUE-JCTD/vida,meilinger/firecares,ROGUE-JCTD/vida,ROGUE-JCTD/vida,meilinger/firecares,FireCARES/firecares,ROGUE-JCTD/vida,garnertb/firecares,ROGUE-JCTD/vida,HunterConnelly/firecares,HunterConnelly/firecares,FireCARES/firecares,FireCARES/firecares,meilinger/firecares,FireCARES/firecares,garnertb/firecares,HunterConnelly/firecares,FireCARES/firecares,meilinger/firecares
|
Add migration to fix misspelling in verbose name.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('firestation', '0013_populationclass0quartile_populationclass1quartile_populationclass2quartile_populationclass3quartile_'),
]
operations = [
migrations.AlterField(
model_name='staffing',
name='chief_officer',
field=models.PositiveIntegerField(default=0, null=True, verbose_name=b'Chief Officer', blank=True, validators=[django.core.validators.MaxValueValidator(99)]),
),
]
|
<commit_before><commit_msg>Add migration to fix misspelling in verbose name.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('firestation', '0013_populationclass0quartile_populationclass1quartile_populationclass2quartile_populationclass3quartile_'),
]
operations = [
migrations.AlterField(
model_name='staffing',
name='chief_officer',
field=models.PositiveIntegerField(default=0, null=True, verbose_name=b'Chief Officer', blank=True, validators=[django.core.validators.MaxValueValidator(99)]),
),
]
|
Add migration to fix misspelling in verbose name.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('firestation', '0013_populationclass0quartile_populationclass1quartile_populationclass2quartile_populationclass3quartile_'),
]
operations = [
migrations.AlterField(
model_name='staffing',
name='chief_officer',
field=models.PositiveIntegerField(default=0, null=True, verbose_name=b'Chief Officer', blank=True, validators=[django.core.validators.MaxValueValidator(99)]),
),
]
|
<commit_before><commit_msg>Add migration to fix misspelling in verbose name.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('firestation', '0013_populationclass0quartile_populationclass1quartile_populationclass2quartile_populationclass3quartile_'),
]
operations = [
migrations.AlterField(
model_name='staffing',
name='chief_officer',
field=models.PositiveIntegerField(default=0, null=True, verbose_name=b'Chief Officer', blank=True, validators=[django.core.validators.MaxValueValidator(99)]),
),
]
|
|
c35bee6ee578dd2bdd2673887eaac81d4c5c87c0
|
jsonmat2h5.py
|
jsonmat2h5.py
|
import json
import h5py
import numpy as np
import scipy.io
jdata = json.load(open("flickr8k/dataset.json"))
features_struct = scipy.io.loadmat('flickr8k/vgg_feats.mat')['feats']
h5output = h5py.File("flickr8k.h5", "w")
# The HDF5 file will contain a top-level group for each split
train = h5output.create_group("train")
val = h5output.create_group("val")
test = h5output.create_group("test")
# We need these counters to enable easy indexing in downstream applications
val_counter = 0
test_counter = 0
for idx, image in enumerate(jdata['images']):
split = image['split']
image_filename = image['filename']
image_id = image['imgid']
# Each image has its own H5 Group, which will contain two "Dataset" objects.
if split == "train":
container = train.create_group("%04d" % idx)
if split == "val":
container = val.create_group("%04d" % val_counter)
val_counter += 1
if split == "test":
container = test.create_group("%04d" % test_counter)
test_counter += 1
# The descriptions "Dataset" contains one row per description in unicode
text_data = container.create_dataset("descriptions", (5,),
dtype=h5py.special_dtype(vlen=unicode))
# The visual features "Dataset" contains one row per description in float32
image_data = container.create_dataset("feats", (5,4096), dtype='float32')
for idx2, text in enumerate(image['sentences']):
text_data[idx2] = text['raw']
image_data[idx2] = features_struct[:,idx]
'''
Here is an example of how to access the descriptions and the visual features at
the same time. This shows the descriptions and visual features for the image
with ID=7 in the original flickr8k/dataset.json.
for text, vis in zip(train['0007']['descriptions'], train['0007']['feats']):
print("%s %s" % (text, vis))
'''
h5output.close()
|
Convert a JSON dataset file and a MATLAB visual features file into a single H5 file.
|
Convert a JSON dataset file and a MATLAB visual features file into a single H5 file.
|
Python
|
bsd-3-clause
|
elliottd/GroundedTranslation,elliottd/GroundedTranslation,elliottd/GroundedTranslation
|
Convert a JSON dataset file and a MATLAB visual features file into a single H5 file.
|
import json
import h5py
import numpy as np
import scipy.io
jdata = json.load(open("flickr8k/dataset.json"))
features_struct = scipy.io.loadmat('flickr8k/vgg_feats.mat')['feats']
h5output = h5py.File("flickr8k.h5", "w")
# The HDF5 file will contain a top-level group for each split
train = h5output.create_group("train")
val = h5output.create_group("val")
test = h5output.create_group("test")
# We need these counters to enable easy indexing in downstream applications
val_counter = 0
test_counter = 0
for idx, image in enumerate(jdata['images']):
split = image['split']
image_filename = image['filename']
image_id = image['imgid']
# Each image has its own H5 Group, which will contain two "Dataset" objects.
if split == "train":
container = train.create_group("%04d" % idx)
if split == "val":
container = val.create_group("%04d" % val_counter)
val_counter += 1
if split == "test":
container = test.create_group("%04d" % test_counter)
test_counter += 1
# The descriptions "Dataset" contains one row per description in unicode
text_data = container.create_dataset("descriptions", (5,),
dtype=h5py.special_dtype(vlen=unicode))
# The visual features "Dataset" contains one row per description in float32
image_data = container.create_dataset("feats", (5,4096), dtype='float32')
for idx2, text in enumerate(image['sentences']):
text_data[idx2] = text['raw']
image_data[idx2] = features_struct[:,idx]
'''
Here is an example of how to access the descriptions and the visual features at
the same time. This shows the descriptions and visual features for the image
with ID=7 in the original flickr8k/dataset.json.
for text, vis in zip(train['0007']['descriptions'], train['0007']['feats']):
print("%s %s" % (text, vis))
'''
h5output.close()
|
<commit_before><commit_msg>Convert a JSON dataset file and a MATLAB visual features file into a single H5 file.<commit_after>
|
import json
import h5py
import numpy as np
import scipy.io
jdata = json.load(open("flickr8k/dataset.json"))
features_struct = scipy.io.loadmat('flickr8k/vgg_feats.mat')['feats']
h5output = h5py.File("flickr8k.h5", "w")
# The HDF5 file will contain a top-level group for each split
train = h5output.create_group("train")
val = h5output.create_group("val")
test = h5output.create_group("test")
# We need these counters to enable easy indexing in downstream applications
val_counter = 0
test_counter = 0
for idx, image in enumerate(jdata['images']):
split = image['split']
image_filename = image['filename']
image_id = image['imgid']
# Each image has its own H5 Group, which will contain two "Dataset" objects.
if split == "train":
container = train.create_group("%04d" % idx)
if split == "val":
container = val.create_group("%04d" % val_counter)
val_counter += 1
if split == "test":
container = test.create_group("%04d" % test_counter)
test_counter += 1
# The descriptions "Dataset" contains one row per description in unicode
text_data = container.create_dataset("descriptions", (5,),
dtype=h5py.special_dtype(vlen=unicode))
# The visual features "Dataset" contains one row per description in float32
image_data = container.create_dataset("feats", (5,4096), dtype='float32')
for idx2, text in enumerate(image['sentences']):
text_data[idx2] = text['raw']
image_data[idx2] = features_struct[:,idx]
'''
Here is an example of how to access the descriptions and the visual features at
the same time. This shows the descriptions and visual features for the image
with ID=7 in the original flickr8k/dataset.json.
for text, vis in zip(train['0007']['descriptions'], train['0007']['feats']):
print("%s %s" % (text, vis))
'''
h5output.close()
|
Convert a JSON dataset file and a MATLAB visual features file into a single H5 file.import json
import h5py
import numpy as np
import scipy.io
jdata = json.load(open("flickr8k/dataset.json"))
features_struct = scipy.io.loadmat('flickr8k/vgg_feats.mat')['feats']
h5output = h5py.File("flickr8k.h5", "w")
# The HDF5 file will contain a top-level group for each split
train = h5output.create_group("train")
val = h5output.create_group("val")
test = h5output.create_group("test")
# We need these counters to enable easy indexing in downstream applications
val_counter = 0
test_counter = 0
for idx, image in enumerate(jdata['images']):
split = image['split']
image_filename = image['filename']
image_id = image['imgid']
# Each image has its own H5 Group, which will contain two "Dataset" objects.
if split == "train":
container = train.create_group("%04d" % idx)
if split == "val":
container = val.create_group("%04d" % val_counter)
val_counter += 1
if split == "test":
container = test.create_group("%04d" % test_counter)
test_counter += 1
# The descriptions "Dataset" contains one row per description in unicode
text_data = container.create_dataset("descriptions", (5,),
dtype=h5py.special_dtype(vlen=unicode))
# The visual features "Dataset" contains one row per description in float32
image_data = container.create_dataset("feats", (5,4096), dtype='float32')
for idx2, text in enumerate(image['sentences']):
text_data[idx2] = text['raw']
image_data[idx2] = features_struct[:,idx]
'''
Here is an example of how to access the descriptions and the visual features at
the same time. This shows the descriptions and visual features for the image
with ID=7 in the original flickr8k/dataset.json.
for text, vis in zip(train['0007']['descriptions'], train['0007']['feats']):
print("%s %s" % (text, vis))
'''
h5output.close()
|
<commit_before><commit_msg>Convert a JSON dataset file and a MATLAB visual features file into a single H5 file.<commit_after>import json
import h5py
import numpy as np
import scipy.io
jdata = json.load(open("flickr8k/dataset.json"))
features_struct = scipy.io.loadmat('flickr8k/vgg_feats.mat')['feats']
h5output = h5py.File("flickr8k.h5", "w")
# The HDF5 file will contain a top-level group for each split
train = h5output.create_group("train")
val = h5output.create_group("val")
test = h5output.create_group("test")
# We need these counters to enable easy indexing in downstream applications
val_counter = 0
test_counter = 0
for idx, image in enumerate(jdata['images']):
split = image['split']
image_filename = image['filename']
image_id = image['imgid']
# Each image has its own H5 Group, which will contain two "Dataset" objects.
if split == "train":
container = train.create_group("%04d" % idx)
if split == "val":
container = val.create_group("%04d" % val_counter)
val_counter += 1
if split == "test":
container = test.create_group("%04d" % test_counter)
test_counter += 1
# The descriptions "Dataset" contains one row per description in unicode
text_data = container.create_dataset("descriptions", (5,),
dtype=h5py.special_dtype(vlen=unicode))
# The visual features "Dataset" contains one row per description in float32
image_data = container.create_dataset("feats", (5,4096), dtype='float32')
for idx2, text in enumerate(image['sentences']):
text_data[idx2] = text['raw']
image_data[idx2] = features_struct[:,idx]
'''
Here is an example of how to access the descriptions and the visual features at
the same time. This shows the descriptions and visual features for the image
with ID=7 in the original flickr8k/dataset.json.
for text, vis in zip(train['0007']['descriptions'], train['0007']['feats']):
print("%s %s" % (text, vis))
'''
h5output.close()
|
|
0f174aac0b8fc99e7c7666f4a2958ae048ef62cb
|
py/maximum-length-of-pair-chain.py
|
py/maximum-length-of-pair-chain.py
|
class Solution(object):
def findLongestChain(self, pairs):
"""
:type pairs: List[List[int]]
:rtype: int
"""
pairs.sort()
LIS = []
for p in pairs:
L, U = -1, len(LIS)
while L + 1 < U:
mid = (L + U) / 2
if LIS[mid] < p[0]:
L = mid
else:
U = mid
if len(LIS) == U:
LIS.append(p[1])
else:
LIS[L + 1] = min(LIS[L + 1], p[1])
return len(LIS)
|
Add py solution for 646. Maximum Length of Pair Chain
|
Add py solution for 646. Maximum Length of Pair Chain
646. Maximum Length of Pair Chain: https://leetcode.com/problems/maximum-length-of-pair-chain/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 646. Maximum Length of Pair Chain
646. Maximum Length of Pair Chain: https://leetcode.com/problems/maximum-length-of-pair-chain/
|
class Solution(object):
def findLongestChain(self, pairs):
"""
:type pairs: List[List[int]]
:rtype: int
"""
pairs.sort()
LIS = []
for p in pairs:
L, U = -1, len(LIS)
while L + 1 < U:
mid = (L + U) / 2
if LIS[mid] < p[0]:
L = mid
else:
U = mid
if len(LIS) == U:
LIS.append(p[1])
else:
LIS[L + 1] = min(LIS[L + 1], p[1])
return len(LIS)
|
<commit_before><commit_msg>Add py solution for 646. Maximum Length of Pair Chain
646. Maximum Length of Pair Chain: https://leetcode.com/problems/maximum-length-of-pair-chain/<commit_after>
|
class Solution(object):
def findLongestChain(self, pairs):
"""
:type pairs: List[List[int]]
:rtype: int
"""
pairs.sort()
LIS = []
for p in pairs:
L, U = -1, len(LIS)
while L + 1 < U:
mid = (L + U) / 2
if LIS[mid] < p[0]:
L = mid
else:
U = mid
if len(LIS) == U:
LIS.append(p[1])
else:
LIS[L + 1] = min(LIS[L + 1], p[1])
return len(LIS)
|
Add py solution for 646. Maximum Length of Pair Chain
646. Maximum Length of Pair Chain: https://leetcode.com/problems/maximum-length-of-pair-chain/class Solution(object):
def findLongestChain(self, pairs):
"""
:type pairs: List[List[int]]
:rtype: int
"""
pairs.sort()
LIS = []
for p in pairs:
L, U = -1, len(LIS)
while L + 1 < U:
mid = (L + U) / 2
if LIS[mid] < p[0]:
L = mid
else:
U = mid
if len(LIS) == U:
LIS.append(p[1])
else:
LIS[L + 1] = min(LIS[L + 1], p[1])
return len(LIS)
|
<commit_before><commit_msg>Add py solution for 646. Maximum Length of Pair Chain
646. Maximum Length of Pair Chain: https://leetcode.com/problems/maximum-length-of-pair-chain/<commit_after>class Solution(object):
def findLongestChain(self, pairs):
"""
:type pairs: List[List[int]]
:rtype: int
"""
pairs.sort()
LIS = []
for p in pairs:
L, U = -1, len(LIS)
while L + 1 < U:
mid = (L + U) / 2
if LIS[mid] < p[0]:
L = mid
else:
U = mid
if len(LIS) == U:
LIS.append(p[1])
else:
LIS[L + 1] = min(LIS[L + 1], p[1])
return len(LIS)
|
|
6dd32078aa461488872745bcbc7c43cd3988ed53
|
tests/api/benchmark/test_order.py
|
tests/api/benchmark/test_order.py
|
import pytest
from tests.api.utils import get_graphql_content
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_user_order_details(user_api_client, order_with_lines, count_queries):
query = """
fragment OrderPrice on TaxedMoney {
gross {
amount
currency
}
net {
amount
currency
}
}
fragment Address on Address {
id
firstName
lastName
companyName
streetAddress1
streetAddress2
city
postalCode
country {
code
country
}
countryArea
phone
isDefaultBillingAddress
isDefaultShippingAddress
}
fragment Price on TaxedMoney {
gross {
amount
currency
}
net {
amount
currency
}
}
fragment ProductVariant on ProductVariant {
id
name
pricing {
onSale
priceUndiscounted {
...Price
}
price {
...Price
}
}
product {
id
name
thumbnail {
url
alt
}
thumbnail2x: thumbnail(size: 510) {
url
}
}
}
fragment OrderDetail on Order {
userEmail
paymentStatus
paymentStatusDisplay
status
statusDisplay
id
number
shippingAddress {
...Address
}
lines {
productName
quantity
variant {
...ProductVariant
}
unitPrice {
currency
...OrderPrice
}
}
subtotal {
...OrderPrice
}
total {
...OrderPrice
}
shippingPrice {
...OrderPrice
}
}
query OrderByToken($token: UUID!) {
orderByToken(token: $token) {
...OrderDetail
}
}
"""
variables = {
"token": order_with_lines.token,
}
get_graphql_content(user_api_client.post_graphql(query, variables))
|
Add test for order history view
|
Add test for order history view
|
Python
|
bsd-3-clause
|
mociepka/saleor,mociepka/saleor,mociepka/saleor
|
Add test for order history view
|
import pytest
from tests.api.utils import get_graphql_content
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_user_order_details(user_api_client, order_with_lines, count_queries):
query = """
fragment OrderPrice on TaxedMoney {
gross {
amount
currency
}
net {
amount
currency
}
}
fragment Address on Address {
id
firstName
lastName
companyName
streetAddress1
streetAddress2
city
postalCode
country {
code
country
}
countryArea
phone
isDefaultBillingAddress
isDefaultShippingAddress
}
fragment Price on TaxedMoney {
gross {
amount
currency
}
net {
amount
currency
}
}
fragment ProductVariant on ProductVariant {
id
name
pricing {
onSale
priceUndiscounted {
...Price
}
price {
...Price
}
}
product {
id
name
thumbnail {
url
alt
}
thumbnail2x: thumbnail(size: 510) {
url
}
}
}
fragment OrderDetail on Order {
userEmail
paymentStatus
paymentStatusDisplay
status
statusDisplay
id
number
shippingAddress {
...Address
}
lines {
productName
quantity
variant {
...ProductVariant
}
unitPrice {
currency
...OrderPrice
}
}
subtotal {
...OrderPrice
}
total {
...OrderPrice
}
shippingPrice {
...OrderPrice
}
}
query OrderByToken($token: UUID!) {
orderByToken(token: $token) {
...OrderDetail
}
}
"""
variables = {
"token": order_with_lines.token,
}
get_graphql_content(user_api_client.post_graphql(query, variables))
|
<commit_before><commit_msg>Add test for order history view<commit_after>
|
import pytest
from tests.api.utils import get_graphql_content
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_user_order_details(user_api_client, order_with_lines, count_queries):
query = """
fragment OrderPrice on TaxedMoney {
gross {
amount
currency
}
net {
amount
currency
}
}
fragment Address on Address {
id
firstName
lastName
companyName
streetAddress1
streetAddress2
city
postalCode
country {
code
country
}
countryArea
phone
isDefaultBillingAddress
isDefaultShippingAddress
}
fragment Price on TaxedMoney {
gross {
amount
currency
}
net {
amount
currency
}
}
fragment ProductVariant on ProductVariant {
id
name
pricing {
onSale
priceUndiscounted {
...Price
}
price {
...Price
}
}
product {
id
name
thumbnail {
url
alt
}
thumbnail2x: thumbnail(size: 510) {
url
}
}
}
fragment OrderDetail on Order {
userEmail
paymentStatus
paymentStatusDisplay
status
statusDisplay
id
number
shippingAddress {
...Address
}
lines {
productName
quantity
variant {
...ProductVariant
}
unitPrice {
currency
...OrderPrice
}
}
subtotal {
...OrderPrice
}
total {
...OrderPrice
}
shippingPrice {
...OrderPrice
}
}
query OrderByToken($token: UUID!) {
orderByToken(token: $token) {
...OrderDetail
}
}
"""
variables = {
"token": order_with_lines.token,
}
get_graphql_content(user_api_client.post_graphql(query, variables))
|
Add test for order history viewimport pytest
from tests.api.utils import get_graphql_content
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_user_order_details(user_api_client, order_with_lines, count_queries):
query = """
fragment OrderPrice on TaxedMoney {
gross {
amount
currency
}
net {
amount
currency
}
}
fragment Address on Address {
id
firstName
lastName
companyName
streetAddress1
streetAddress2
city
postalCode
country {
code
country
}
countryArea
phone
isDefaultBillingAddress
isDefaultShippingAddress
}
fragment Price on TaxedMoney {
gross {
amount
currency
}
net {
amount
currency
}
}
fragment ProductVariant on ProductVariant {
id
name
pricing {
onSale
priceUndiscounted {
...Price
}
price {
...Price
}
}
product {
id
name
thumbnail {
url
alt
}
thumbnail2x: thumbnail(size: 510) {
url
}
}
}
fragment OrderDetail on Order {
userEmail
paymentStatus
paymentStatusDisplay
status
statusDisplay
id
number
shippingAddress {
...Address
}
lines {
productName
quantity
variant {
...ProductVariant
}
unitPrice {
currency
...OrderPrice
}
}
subtotal {
...OrderPrice
}
total {
...OrderPrice
}
shippingPrice {
...OrderPrice
}
}
query OrderByToken($token: UUID!) {
orderByToken(token: $token) {
...OrderDetail
}
}
"""
variables = {
"token": order_with_lines.token,
}
get_graphql_content(user_api_client.post_graphql(query, variables))
|
<commit_before><commit_msg>Add test for order history view<commit_after>import pytest
from tests.api.utils import get_graphql_content
@pytest.mark.django_db
@pytest.mark.count_queries(autouse=False)
def test_user_order_details(user_api_client, order_with_lines, count_queries):
query = """
fragment OrderPrice on TaxedMoney {
gross {
amount
currency
}
net {
amount
currency
}
}
fragment Address on Address {
id
firstName
lastName
companyName
streetAddress1
streetAddress2
city
postalCode
country {
code
country
}
countryArea
phone
isDefaultBillingAddress
isDefaultShippingAddress
}
fragment Price on TaxedMoney {
gross {
amount
currency
}
net {
amount
currency
}
}
fragment ProductVariant on ProductVariant {
id
name
pricing {
onSale
priceUndiscounted {
...Price
}
price {
...Price
}
}
product {
id
name
thumbnail {
url
alt
}
thumbnail2x: thumbnail(size: 510) {
url
}
}
}
fragment OrderDetail on Order {
userEmail
paymentStatus
paymentStatusDisplay
status
statusDisplay
id
number
shippingAddress {
...Address
}
lines {
productName
quantity
variant {
...ProductVariant
}
unitPrice {
currency
...OrderPrice
}
}
subtotal {
...OrderPrice
}
total {
...OrderPrice
}
shippingPrice {
...OrderPrice
}
}
query OrderByToken($token: UUID!) {
orderByToken(token: $token) {
...OrderDetail
}
}
"""
variables = {
"token": order_with_lines.token,
}
get_graphql_content(user_api_client.post_graphql(query, variables))
|
|
d009c998007fe3bd4a8d3134499e3fde99513381
|
pyimgaug/test2.py
|
pyimgaug/test2.py
|
from __future__ import print_function, division
import augmenters2 as iaa
import parameters as iap
#from skimage import
import numpy as np
import time
def main():
seq = iaa.Sequence([iaa.Fliplr(0.5), iaa.Flipud(0.5)])
imgs = np.zeros((1, 2, 2, 1), dtype=np.uint8)
imgs[0, 0, :, 0] = 255
imgs[0, 1, 1, 0] = 255
print("[Test 1] random hflips/vflips")
print("imgs", imgs[:, :, :, 0])
for i in range(10):
auged = seq.transform(imgs)
print("#%02d" % (i,), auged[:, :, :, 0])
print("[Test 2] deterministic hflips/vflips")
dseqs = seq.to_deterministic(4)
for dseq_i, dseq in enumerate(dseqs):
for i in range(10):
auged = dseq.transform(imgs)
print("#%02d/%02d" % (dseq_i, i,), auged[:, :, :, 0])
print("[Time Measurements]")
times = []
for i in range(1):
start = time.time()
dseq = seq.to_deterministic(1000)
times.append(time.time() - start)
print("[Time 1] avg=%.4f, var=%.4f, range=[%.4f, %.4f]" % (np.average(times), np.var(times), np.min(times), np.max(times)))
param = 1
start = time.time()
for i in range(1000 * 1000):
iaa.Deterministic(1)
req = time.time() - start
print("[Time 2] %.4f per 1M, %.4f per 1k" % (req, req/1000))
param = iap.Binomial(0.5)
start = time.time()
for i in range(1000 * 1000):
param.draw_sample(random_state=np.random)
req = time.time() - start
print("[Time 3] %.4f per 1M, %.4f per 1k" % (req, req/1000))
param = iap.Binomial(0.5)
start = time.time()
for i in range(1000 * 1000):
param.draw_sample()
req = time.time() - start
print("[Time 4] %.4f per 1M, %.4f per 1k" % (req, req/1000))
if __name__ == "__main__":
main()
|
Refactor tests for current version
|
Refactor tests for current version
|
Python
|
mit
|
aleju/ImageAugmenter,nektor211/imgaug,aleju/imgaug,aleju/imgaug
|
Refactor tests for current version
|
from __future__ import print_function, division
import augmenters2 as iaa
import parameters as iap
#from skimage import
import numpy as np
import time
def main():
seq = iaa.Sequence([iaa.Fliplr(0.5), iaa.Flipud(0.5)])
imgs = np.zeros((1, 2, 2, 1), dtype=np.uint8)
imgs[0, 0, :, 0] = 255
imgs[0, 1, 1, 0] = 255
print("[Test 1] random hflips/vflips")
print("imgs", imgs[:, :, :, 0])
for i in range(10):
auged = seq.transform(imgs)
print("#%02d" % (i,), auged[:, :, :, 0])
print("[Test 2] deterministic hflips/vflips")
dseqs = seq.to_deterministic(4)
for dseq_i, dseq in enumerate(dseqs):
for i in range(10):
auged = dseq.transform(imgs)
print("#%02d/%02d" % (dseq_i, i,), auged[:, :, :, 0])
print("[Time Measurements]")
times = []
for i in range(1):
start = time.time()
dseq = seq.to_deterministic(1000)
times.append(time.time() - start)
print("[Time 1] avg=%.4f, var=%.4f, range=[%.4f, %.4f]" % (np.average(times), np.var(times), np.min(times), np.max(times)))
param = 1
start = time.time()
for i in range(1000 * 1000):
iaa.Deterministic(1)
req = time.time() - start
print("[Time 2] %.4f per 1M, %.4f per 1k" % (req, req/1000))
param = iap.Binomial(0.5)
start = time.time()
for i in range(1000 * 1000):
param.draw_sample(random_state=np.random)
req = time.time() - start
print("[Time 3] %.4f per 1M, %.4f per 1k" % (req, req/1000))
param = iap.Binomial(0.5)
start = time.time()
for i in range(1000 * 1000):
param.draw_sample()
req = time.time() - start
print("[Time 4] %.4f per 1M, %.4f per 1k" % (req, req/1000))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Refactor tests for current version<commit_after>
|
from __future__ import print_function, division
import augmenters2 as iaa
import parameters as iap
#from skimage import
import numpy as np
import time
def main():
seq = iaa.Sequence([iaa.Fliplr(0.5), iaa.Flipud(0.5)])
imgs = np.zeros((1, 2, 2, 1), dtype=np.uint8)
imgs[0, 0, :, 0] = 255
imgs[0, 1, 1, 0] = 255
print("[Test 1] random hflips/vflips")
print("imgs", imgs[:, :, :, 0])
for i in range(10):
auged = seq.transform(imgs)
print("#%02d" % (i,), auged[:, :, :, 0])
print("[Test 2] deterministic hflips/vflips")
dseqs = seq.to_deterministic(4)
for dseq_i, dseq in enumerate(dseqs):
for i in range(10):
auged = dseq.transform(imgs)
print("#%02d/%02d" % (dseq_i, i,), auged[:, :, :, 0])
print("[Time Measurements]")
times = []
for i in range(1):
start = time.time()
dseq = seq.to_deterministic(1000)
times.append(time.time() - start)
print("[Time 1] avg=%.4f, var=%.4f, range=[%.4f, %.4f]" % (np.average(times), np.var(times), np.min(times), np.max(times)))
param = 1
start = time.time()
for i in range(1000 * 1000):
iaa.Deterministic(1)
req = time.time() - start
print("[Time 2] %.4f per 1M, %.4f per 1k" % (req, req/1000))
param = iap.Binomial(0.5)
start = time.time()
for i in range(1000 * 1000):
param.draw_sample(random_state=np.random)
req = time.time() - start
print("[Time 3] %.4f per 1M, %.4f per 1k" % (req, req/1000))
param = iap.Binomial(0.5)
start = time.time()
for i in range(1000 * 1000):
param.draw_sample()
req = time.time() - start
print("[Time 4] %.4f per 1M, %.4f per 1k" % (req, req/1000))
if __name__ == "__main__":
main()
|
Refactor tests for current versionfrom __future__ import print_function, division
import augmenters2 as iaa
import parameters as iap
#from skimage import
import numpy as np
import time
def main():
seq = iaa.Sequence([iaa.Fliplr(0.5), iaa.Flipud(0.5)])
imgs = np.zeros((1, 2, 2, 1), dtype=np.uint8)
imgs[0, 0, :, 0] = 255
imgs[0, 1, 1, 0] = 255
print("[Test 1] random hflips/vflips")
print("imgs", imgs[:, :, :, 0])
for i in range(10):
auged = seq.transform(imgs)
print("#%02d" % (i,), auged[:, :, :, 0])
print("[Test 2] deterministic hflips/vflips")
dseqs = seq.to_deterministic(4)
for dseq_i, dseq in enumerate(dseqs):
for i in range(10):
auged = dseq.transform(imgs)
print("#%02d/%02d" % (dseq_i, i,), auged[:, :, :, 0])
print("[Time Measurements]")
times = []
for i in range(1):
start = time.time()
dseq = seq.to_deterministic(1000)
times.append(time.time() - start)
print("[Time 1] avg=%.4f, var=%.4f, range=[%.4f, %.4f]" % (np.average(times), np.var(times), np.min(times), np.max(times)))
param = 1
start = time.time()
for i in range(1000 * 1000):
iaa.Deterministic(1)
req = time.time() - start
print("[Time 2] %.4f per 1M, %.4f per 1k" % (req, req/1000))
param = iap.Binomial(0.5)
start = time.time()
for i in range(1000 * 1000):
param.draw_sample(random_state=np.random)
req = time.time() - start
print("[Time 3] %.4f per 1M, %.4f per 1k" % (req, req/1000))
param = iap.Binomial(0.5)
start = time.time()
for i in range(1000 * 1000):
param.draw_sample()
req = time.time() - start
print("[Time 4] %.4f per 1M, %.4f per 1k" % (req, req/1000))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Refactor tests for current version<commit_after>from __future__ import print_function, division
import augmenters2 as iaa
import parameters as iap
#from skimage import
import numpy as np
import time
def main():
seq = iaa.Sequence([iaa.Fliplr(0.5), iaa.Flipud(0.5)])
imgs = np.zeros((1, 2, 2, 1), dtype=np.uint8)
imgs[0, 0, :, 0] = 255
imgs[0, 1, 1, 0] = 255
print("[Test 1] random hflips/vflips")
print("imgs", imgs[:, :, :, 0])
for i in range(10):
auged = seq.transform(imgs)
print("#%02d" % (i,), auged[:, :, :, 0])
print("[Test 2] deterministic hflips/vflips")
dseqs = seq.to_deterministic(4)
for dseq_i, dseq in enumerate(dseqs):
for i in range(10):
auged = dseq.transform(imgs)
print("#%02d/%02d" % (dseq_i, i,), auged[:, :, :, 0])
print("[Time Measurements]")
times = []
for i in range(1):
start = time.time()
dseq = seq.to_deterministic(1000)
times.append(time.time() - start)
print("[Time 1] avg=%.4f, var=%.4f, range=[%.4f, %.4f]" % (np.average(times), np.var(times), np.min(times), np.max(times)))
param = 1
start = time.time()
for i in range(1000 * 1000):
iaa.Deterministic(1)
req = time.time() - start
print("[Time 2] %.4f per 1M, %.4f per 1k" % (req, req/1000))
param = iap.Binomial(0.5)
start = time.time()
for i in range(1000 * 1000):
param.draw_sample(random_state=np.random)
req = time.time() - start
print("[Time 3] %.4f per 1M, %.4f per 1k" % (req, req/1000))
param = iap.Binomial(0.5)
start = time.time()
for i in range(1000 * 1000):
param.draw_sample()
req = time.time() - start
print("[Time 4] %.4f per 1M, %.4f per 1k" % (req, req/1000))
if __name__ == "__main__":
main()
|
|
fff7b3bd57fe94d7ac450da89d9a241ddc5c2f06
|
ipmi_syncexample.py
|
ipmi_syncexample.py
|
#!/usr/bin/env python
from ipmi_command import ipmi_command
import os
import sys
password=os.environ['IPMIPASSWORD']
os.environ['IPMIPASSWORD']=""
if (len(sys.argv) < 3):
print "Usage:"
print " IPMIPASSWORD=password %s bmc username <cmd> <optarg>"%sys.argv[0]
sys.exit(1)
bmc=sys.argv[1]
userid=sys.argv[2]
command=sys.argv[3]
arg=None
if len(sys.argv)==5:
arg=sys.argv[4]
ipmicmd = ipmi_command(bmc=bmc,userid=userid,password=password)
if command == 'power':
if arg:
print ipmicmd.set_power(arg,wait=True)
else:
print ipmicmd.get_power()
elif command == 'bootdev':
if arg:
print ipmicmd.set_bootdev(arg)
else:
print ipmicmd.get_bootdev()
|
Add an example chunk of code showing how to code in a synchronous way to the python library
|
Add an example chunk of code showing how to code in a synchronous way to the python library
|
Python
|
apache-2.0
|
stackforge/pyghmi,benoit-canet/pyghmi,openstack/pyghmi,openstack/pyghmi
|
Add an example chunk of code showing how to code in a synchronous way to the python library
|
#!/usr/bin/env python
from ipmi_command import ipmi_command
import os
import sys
password=os.environ['IPMIPASSWORD']
os.environ['IPMIPASSWORD']=""
if (len(sys.argv) < 3):
print "Usage:"
print " IPMIPASSWORD=password %s bmc username <cmd> <optarg>"%sys.argv[0]
sys.exit(1)
bmc=sys.argv[1]
userid=sys.argv[2]
command=sys.argv[3]
arg=None
if len(sys.argv)==5:
arg=sys.argv[4]
ipmicmd = ipmi_command(bmc=bmc,userid=userid,password=password)
if command == 'power':
if arg:
print ipmicmd.set_power(arg,wait=True)
else:
print ipmicmd.get_power()
elif command == 'bootdev':
if arg:
print ipmicmd.set_bootdev(arg)
else:
print ipmicmd.get_bootdev()
|
<commit_before><commit_msg>Add an example chunk of code showing how to code in a synchronous way to the python library<commit_after>
|
#!/usr/bin/env python
from ipmi_command import ipmi_command
import os
import sys
password=os.environ['IPMIPASSWORD']
os.environ['IPMIPASSWORD']=""
if (len(sys.argv) < 3):
print "Usage:"
print " IPMIPASSWORD=password %s bmc username <cmd> <optarg>"%sys.argv[0]
sys.exit(1)
bmc=sys.argv[1]
userid=sys.argv[2]
command=sys.argv[3]
arg=None
if len(sys.argv)==5:
arg=sys.argv[4]
ipmicmd = ipmi_command(bmc=bmc,userid=userid,password=password)
if command == 'power':
if arg:
print ipmicmd.set_power(arg,wait=True)
else:
print ipmicmd.get_power()
elif command == 'bootdev':
if arg:
print ipmicmd.set_bootdev(arg)
else:
print ipmicmd.get_bootdev()
|
Add an example chunk of code showing how to code in a synchronous way to the python library#!/usr/bin/env python
from ipmi_command import ipmi_command
import os
import sys
password=os.environ['IPMIPASSWORD']
os.environ['IPMIPASSWORD']=""
if (len(sys.argv) < 3):
print "Usage:"
print " IPMIPASSWORD=password %s bmc username <cmd> <optarg>"%sys.argv[0]
sys.exit(1)
bmc=sys.argv[1]
userid=sys.argv[2]
command=sys.argv[3]
arg=None
if len(sys.argv)==5:
arg=sys.argv[4]
ipmicmd = ipmi_command(bmc=bmc,userid=userid,password=password)
if command == 'power':
if arg:
print ipmicmd.set_power(arg,wait=True)
else:
print ipmicmd.get_power()
elif command == 'bootdev':
if arg:
print ipmicmd.set_bootdev(arg)
else:
print ipmicmd.get_bootdev()
|
<commit_before><commit_msg>Add an example chunk of code showing how to code in a synchronous way to the python library<commit_after>#!/usr/bin/env python
from ipmi_command import ipmi_command
import os
import sys
password=os.environ['IPMIPASSWORD']
os.environ['IPMIPASSWORD']=""
if (len(sys.argv) < 3):
print "Usage:"
print " IPMIPASSWORD=password %s bmc username <cmd> <optarg>"%sys.argv[0]
sys.exit(1)
bmc=sys.argv[1]
userid=sys.argv[2]
command=sys.argv[3]
arg=None
if len(sys.argv)==5:
arg=sys.argv[4]
ipmicmd = ipmi_command(bmc=bmc,userid=userid,password=password)
if command == 'power':
if arg:
print ipmicmd.set_power(arg,wait=True)
else:
print ipmicmd.get_power()
elif command == 'bootdev':
if arg:
print ipmicmd.set_bootdev(arg)
else:
print ipmicmd.get_bootdev()
|
|
59da29fac5e232fce0d4bf15704c976175527750
|
src/users/migrations/0010_cocrecord.py
|
src/users/migrations/0010_cocrecord.py
|
# Generated by Django 3.0.2 on 2020-02-23 12:28
import core.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20160227_1656'),
]
operations = [
migrations.CreateModel(
name='CocRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coc_version', models.CharField(max_length=15, verbose_name='latest agreed CoC version')),
('user', core.models.BigForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
),
]
|
Add migration file for the model of CoC record
|
Add migration file for the model of CoC record
|
Python
|
mit
|
pycontw/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016
|
Add migration file for the model of CoC record
|
# Generated by Django 3.0.2 on 2020-02-23 12:28
import core.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20160227_1656'),
]
operations = [
migrations.CreateModel(
name='CocRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coc_version', models.CharField(max_length=15, verbose_name='latest agreed CoC version')),
('user', core.models.BigForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
),
]
|
<commit_before><commit_msg>Add migration file for the model of CoC record<commit_after>
|
# Generated by Django 3.0.2 on 2020-02-23 12:28
import core.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20160227_1656'),
]
operations = [
migrations.CreateModel(
name='CocRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coc_version', models.CharField(max_length=15, verbose_name='latest agreed CoC version')),
('user', core.models.BigForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
),
]
|
Add migration file for the model of CoC record# Generated by Django 3.0.2 on 2020-02-23 12:28
import core.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20160227_1656'),
]
operations = [
migrations.CreateModel(
name='CocRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coc_version', models.CharField(max_length=15, verbose_name='latest agreed CoC version')),
('user', core.models.BigForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
),
]
|
<commit_before><commit_msg>Add migration file for the model of CoC record<commit_after># Generated by Django 3.0.2 on 2020-02-23 12:28
import core.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20160227_1656'),
]
operations = [
migrations.CreateModel(
name='CocRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coc_version', models.CharField(max_length=15, verbose_name='latest agreed CoC version')),
('user', core.models.BigForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
),
]
|
|
18327e959a2e6c5cadbe63a7071bba8df478276a
|
tests/startsymbol_tests/SettingTest.py
|
tests/startsymbol_tests/SettingTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 10.08.2017 23:00
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class SettingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file fot test of start symbol setting
|
Add file fot test of start symbol setting
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add file fot test of start symbol setting
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 10.08.2017 23:00
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class SettingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file fot test of start symbol setting<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 10.08.2017 23:00
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class SettingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file fot test of start symbol setting#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 10.08.2017 23:00
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class SettingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file fot test of start symbol setting<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 10.08.2017 23:00
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class SettingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
|
f771a1fa3bc7cdaeb2ef5e09d3a01701b166a009
|
atoman/filtering/filters/tests/test_species.py
|
atoman/filtering/filters/tests/test_species.py
|
"""
Unit tests for the species filter
"""
import unittest
import numpy as np
from ....system import lattice
from .. import speciesFilter
from .. import base
################################################################################
class TestSpeciesFilter(unittest.TestCase):
"""
Test species filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("Au", [0,0,3], 0)
self.lattice.addAtom("He", [1,1,1], 0)
self.lattice.addAtom("He", [2,3,1], 0)
self.lattice.addAtom("Au", [3,0,0], 0)
self.lattice.addAtom("Au", [4,1,4], 0)
self.lattice.addAtom("He", [1,4,0], 0)
self.lattice.addAtom("He", [2,1.8,4], 0)
self.lattice.addAtom("H_", [4,4,4], 0)
self.lattice.PBC[:] = 1
# filter
self.filter = speciesFilter.SpeciesFilter("Species")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_speciesFilter(self):
"""
Species filter
"""
# test 1
settings = speciesFilter.SpeciesFilterSettings()
settings.updateSetting("visibleSpeciesList", ["H_", "Au"])
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
# run filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 4)
# make sure correct atoms selected
self.assertTrue(0 in visibleAtoms)
self.assertTrue(3 in visibleAtoms)
self.assertTrue(4 in visibleAtoms)
self.assertTrue(7 in visibleAtoms)
# test 2
settings = speciesFilter.SpeciesFilterSettings()
settings.updateSetting("visibleSpeciesList", [])
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
# run filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 0)
|
Add test for species filter.
|
Add test for species filter.
|
Python
|
mit
|
chrisdjscott/Atoman,chrisdjscott/Atoman,chrisdjscott/Atoman,chrisdjscott/Atoman,chrisdjscott/Atoman
|
Add test for species filter.
|
"""
Unit tests for the species filter
"""
import unittest
import numpy as np
from ....system import lattice
from .. import speciesFilter
from .. import base
################################################################################
class TestSpeciesFilter(unittest.TestCase):
"""
Test species filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("Au", [0,0,3], 0)
self.lattice.addAtom("He", [1,1,1], 0)
self.lattice.addAtom("He", [2,3,1], 0)
self.lattice.addAtom("Au", [3,0,0], 0)
self.lattice.addAtom("Au", [4,1,4], 0)
self.lattice.addAtom("He", [1,4,0], 0)
self.lattice.addAtom("He", [2,1.8,4], 0)
self.lattice.addAtom("H_", [4,4,4], 0)
self.lattice.PBC[:] = 1
# filter
self.filter = speciesFilter.SpeciesFilter("Species")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_speciesFilter(self):
"""
Species filter
"""
# test 1
settings = speciesFilter.SpeciesFilterSettings()
settings.updateSetting("visibleSpeciesList", ["H_", "Au"])
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
# run filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 4)
# make sure correct atoms selected
self.assertTrue(0 in visibleAtoms)
self.assertTrue(3 in visibleAtoms)
self.assertTrue(4 in visibleAtoms)
self.assertTrue(7 in visibleAtoms)
# test 2
settings = speciesFilter.SpeciesFilterSettings()
settings.updateSetting("visibleSpeciesList", [])
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
# run filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 0)
|
<commit_before><commit_msg>Add test for species filter.<commit_after>
|
"""
Unit tests for the species filter
"""
import unittest
import numpy as np
from ....system import lattice
from .. import speciesFilter
from .. import base
################################################################################
class TestSpeciesFilter(unittest.TestCase):
"""
Test species filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("Au", [0,0,3], 0)
self.lattice.addAtom("He", [1,1,1], 0)
self.lattice.addAtom("He", [2,3,1], 0)
self.lattice.addAtom("Au", [3,0,0], 0)
self.lattice.addAtom("Au", [4,1,4], 0)
self.lattice.addAtom("He", [1,4,0], 0)
self.lattice.addAtom("He", [2,1.8,4], 0)
self.lattice.addAtom("H_", [4,4,4], 0)
self.lattice.PBC[:] = 1
# filter
self.filter = speciesFilter.SpeciesFilter("Species")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_speciesFilter(self):
"""
Species filter
"""
# test 1
settings = speciesFilter.SpeciesFilterSettings()
settings.updateSetting("visibleSpeciesList", ["H_", "Au"])
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
# run filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 4)
# make sure correct atoms selected
self.assertTrue(0 in visibleAtoms)
self.assertTrue(3 in visibleAtoms)
self.assertTrue(4 in visibleAtoms)
self.assertTrue(7 in visibleAtoms)
# test 2
settings = speciesFilter.SpeciesFilterSettings()
settings.updateSetting("visibleSpeciesList", [])
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
# run filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 0)
|
Add test for species filter.
"""
Unit tests for the species filter
"""
import unittest
import numpy as np
from ....system import lattice
from .. import speciesFilter
from .. import base
################################################################################
class TestSpeciesFilter(unittest.TestCase):
"""
Test species filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("Au", [0,0,3], 0)
self.lattice.addAtom("He", [1,1,1], 0)
self.lattice.addAtom("He", [2,3,1], 0)
self.lattice.addAtom("Au", [3,0,0], 0)
self.lattice.addAtom("Au", [4,1,4], 0)
self.lattice.addAtom("He", [1,4,0], 0)
self.lattice.addAtom("He", [2,1.8,4], 0)
self.lattice.addAtom("H_", [4,4,4], 0)
self.lattice.PBC[:] = 1
# filter
self.filter = speciesFilter.SpeciesFilter("Species")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_speciesFilter(self):
"""
Species filter
"""
# test 1
settings = speciesFilter.SpeciesFilterSettings()
settings.updateSetting("visibleSpeciesList", ["H_", "Au"])
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
# run filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 4)
# make sure correct atoms selected
self.assertTrue(0 in visibleAtoms)
self.assertTrue(3 in visibleAtoms)
self.assertTrue(4 in visibleAtoms)
self.assertTrue(7 in visibleAtoms)
# test 2
settings = speciesFilter.SpeciesFilterSettings()
settings.updateSetting("visibleSpeciesList", [])
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
# run filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 0)
|
<commit_before><commit_msg>Add test for species filter.<commit_after>
"""
Unit tests for the species filter
"""
import unittest
import numpy as np
from ....system import lattice
from .. import speciesFilter
from .. import base
################################################################################
class TestSpeciesFilter(unittest.TestCase):
"""
Test species filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("Au", [0,0,3], 0)
self.lattice.addAtom("He", [1,1,1], 0)
self.lattice.addAtom("He", [2,3,1], 0)
self.lattice.addAtom("Au", [3,0,0], 0)
self.lattice.addAtom("Au", [4,1,4], 0)
self.lattice.addAtom("He", [1,4,0], 0)
self.lattice.addAtom("He", [2,1.8,4], 0)
self.lattice.addAtom("H_", [4,4,4], 0)
self.lattice.PBC[:] = 1
# filter
self.filter = speciesFilter.SpeciesFilter("Species")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_speciesFilter(self):
"""
Species filter
"""
# test 1
settings = speciesFilter.SpeciesFilterSettings()
settings.updateSetting("visibleSpeciesList", ["H_", "Au"])
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
# run filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 4)
# make sure correct atoms selected
self.assertTrue(0 in visibleAtoms)
self.assertTrue(3 in visibleAtoms)
self.assertTrue(4 in visibleAtoms)
self.assertTrue(7 in visibleAtoms)
# test 2
settings = speciesFilter.SpeciesFilterSettings()
settings.updateSetting("visibleSpeciesList", [])
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
# run filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 0)
|
|
875ef1e4b07944b9f18d65253d1a7380bb8023e1
|
jsonify.py
|
jsonify.py
|
"""Convert files to json."""
def to_json(file):
"""Convert ini to JSON like thing."""
ini = open(file, 'r')
ini = ini.readlines()
settings_file = open('settings.json', 'w')
settings_file.write("{")
# import pdb; pdb.set_trace()
for line in ini:
if '[' == line[0]:
settings_file.write('"' + line[1:-2] + '"' + ': {')
else:
line = line.split('=')
settings_file.write('"{0}": "{1}", '.format(line[0], line[1][0]))
settings_file.write("}")
to_json('practice.ini')
|
Add josnify file. Correct format, needs tweaks
|
Add josnify file. Correct format, needs tweaks
|
Python
|
mpl-2.0
|
flegald/Gameini,flegald/Gameini,flegald/Gameini
|
Add josnify file. Correct format, needs tweaks
|
"""Convert files to json."""
def to_json(file):
"""Convert ini to JSON like thing."""
ini = open(file, 'r')
ini = ini.readlines()
settings_file = open('settings.json', 'w')
settings_file.write("{")
# import pdb; pdb.set_trace()
for line in ini:
if '[' == line[0]:
settings_file.write('"' + line[1:-2] + '"' + ': {')
else:
line = line.split('=')
settings_file.write('"{0}": "{1}", '.format(line[0], line[1][0]))
settings_file.write("}")
to_json('practice.ini')
|
<commit_before><commit_msg>Add josnify file. Correct format, needs tweaks<commit_after>
|
"""Convert files to json."""
def to_json(file):
"""Convert ini to JSON like thing."""
ini = open(file, 'r')
ini = ini.readlines()
settings_file = open('settings.json', 'w')
settings_file.write("{")
# import pdb; pdb.set_trace()
for line in ini:
if '[' == line[0]:
settings_file.write('"' + line[1:-2] + '"' + ': {')
else:
line = line.split('=')
settings_file.write('"{0}": "{1}", '.format(line[0], line[1][0]))
settings_file.write("}")
to_json('practice.ini')
|
Add josnify file. Correct format, needs tweaks"""Convert files to json."""
def to_json(file):
"""Convert ini to JSON like thing."""
ini = open(file, 'r')
ini = ini.readlines()
settings_file = open('settings.json', 'w')
settings_file.write("{")
# import pdb; pdb.set_trace()
for line in ini:
if '[' == line[0]:
settings_file.write('"' + line[1:-2] + '"' + ': {')
else:
line = line.split('=')
settings_file.write('"{0}": "{1}", '.format(line[0], line[1][0]))
settings_file.write("}")
to_json('practice.ini')
|
<commit_before><commit_msg>Add josnify file. Correct format, needs tweaks<commit_after>"""Convert files to json."""
def to_json(file):
"""Convert ini to JSON like thing."""
ini = open(file, 'r')
ini = ini.readlines()
settings_file = open('settings.json', 'w')
settings_file.write("{")
# import pdb; pdb.set_trace()
for line in ini:
if '[' == line[0]:
settings_file.write('"' + line[1:-2] + '"' + ': {')
else:
line = line.split('=')
settings_file.write('"{0}": "{1}", '.format(line[0], line[1][0]))
settings_file.write("}")
to_json('practice.ini')
|
|
d0a7b697ab1517e071c70d5904f96f85fe12ac89
|
mainapp/management/commands/repair-file-index.py
|
mainapp/management/commands/repair-file-index.py
|
from typing import Set
from django.core.management.base import BaseCommand
from mainapp.functions.minio import minio_client, minio_file_bucket
from mainapp.models import File
class Command(BaseCommand):
help = "Marks files as missing in the database that are deleted in minio"
def handle(self, *args, **options):
existing_files = set(
int(file.object_name)
for file in minio_client().list_objects(minio_file_bucket)
)
expected_files: Set[int] = set(
File.objects.filter(filesize__gt=0).values_list("id", flat=True)
)
missing_files = expected_files - existing_files
if len(missing_files) > 0:
self.stdout.write(
f"{missing_files} files are marked as imported but aren't available in minio"
)
File.objects.filter(id__in=missing_files).update(filesize=None)
|
Add command to repair the file index
|
Add command to repair the file index
|
Python
|
mit
|
meine-stadt-transparent/meine-stadt-transparent,meine-stadt-transparent/meine-stadt-transparent,meine-stadt-transparent/meine-stadt-transparent,meine-stadt-transparent/meine-stadt-transparent
|
Add command to repair the file index
|
from typing import Set
from django.core.management.base import BaseCommand
from mainapp.functions.minio import minio_client, minio_file_bucket
from mainapp.models import File
class Command(BaseCommand):
help = "Marks files as missing in the database that are deleted in minio"
def handle(self, *args, **options):
existing_files = set(
int(file.object_name)
for file in minio_client().list_objects(minio_file_bucket)
)
expected_files: Set[int] = set(
File.objects.filter(filesize__gt=0).values_list("id", flat=True)
)
missing_files = expected_files - existing_files
if len(missing_files) > 0:
self.stdout.write(
f"{missing_files} files are marked as imported but aren't available in minio"
)
File.objects.filter(id__in=missing_files).update(filesize=None)
|
<commit_before><commit_msg>Add command to repair the file index<commit_after>
|
from typing import Set
from django.core.management.base import BaseCommand
from mainapp.functions.minio import minio_client, minio_file_bucket
from mainapp.models import File
class Command(BaseCommand):
help = "Marks files as missing in the database that are deleted in minio"
def handle(self, *args, **options):
existing_files = set(
int(file.object_name)
for file in minio_client().list_objects(minio_file_bucket)
)
expected_files: Set[int] = set(
File.objects.filter(filesize__gt=0).values_list("id", flat=True)
)
missing_files = expected_files - existing_files
if len(missing_files) > 0:
self.stdout.write(
f"{missing_files} files are marked as imported but aren't available in minio"
)
File.objects.filter(id__in=missing_files).update(filesize=None)
|
Add command to repair the file indexfrom typing import Set
from django.core.management.base import BaseCommand
from mainapp.functions.minio import minio_client, minio_file_bucket
from mainapp.models import File
class Command(BaseCommand):
help = "Marks files as missing in the database that are deleted in minio"
def handle(self, *args, **options):
existing_files = set(
int(file.object_name)
for file in minio_client().list_objects(minio_file_bucket)
)
expected_files: Set[int] = set(
File.objects.filter(filesize__gt=0).values_list("id", flat=True)
)
missing_files = expected_files - existing_files
if len(missing_files) > 0:
self.stdout.write(
f"{missing_files} files are marked as imported but aren't available in minio"
)
File.objects.filter(id__in=missing_files).update(filesize=None)
|
<commit_before><commit_msg>Add command to repair the file index<commit_after>from typing import Set
from django.core.management.base import BaseCommand
from mainapp.functions.minio import minio_client, minio_file_bucket
from mainapp.models import File
class Command(BaseCommand):
help = "Marks files as missing in the database that are deleted in minio"
def handle(self, *args, **options):
existing_files = set(
int(file.object_name)
for file in minio_client().list_objects(minio_file_bucket)
)
expected_files: Set[int] = set(
File.objects.filter(filesize__gt=0).values_list("id", flat=True)
)
missing_files = expected_files - existing_files
if len(missing_files) > 0:
self.stdout.write(
f"{missing_files} files are marked as imported but aren't available in minio"
)
File.objects.filter(id__in=missing_files).update(filesize=None)
|
|
e561c1354d2f9a550f2b27bb88d8e4d0f3f76203
|
common/djangoapps/student/management/commands/recover_truncated_anonymous_ids.py
|
common/djangoapps/student/management/commands/recover_truncated_anonymous_ids.py
|
"""
Generate sql commands to fix truncated anonymous student ids in the ORA database
"""
import sys
from django.core.management.base import NoArgsCommand
from student.models import AnonymousUserId, anonymous_id_for_user
class Command(NoArgsCommand):
help = __doc__
def handle_noargs(self, **options):
"""
Reads a list of ids (newline separated) from stdin, and
dumps sql queries to run on the ORA database to fix those ids
from their truncated form to the full 32 character change.
The following query will generate the list of ids needed to be fixed
from the ORA database:
SELECT student_id FROM peer_grading_calibrationhistory WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM controller_submission WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_timing WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_studentcourseprofile WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_studentprofile WHERE LENGTH(student_id) = 16;
"""
ids = [line.strip() for line in sys.stdin]
old_ids = AnonymousUserId.objects.raw(
"""
SELECT *
FROM student_anonymoususerid_temp_archive
WHERE anonymous_user_id IN ({})
""".format(','.join(['%s']*len(ids))),
ids
)
for old_id in old_ids:
new_id = anonymous_id_for_user(old_id.user, old_id.course_id)
for table in ('peer_grading_calibrationhistory', 'controller_submission', 'metrics_timing'):
self.stdout.write(
"UPDATE {} "
"SET student_id = '{}' "
"WHERE student_id = '{}';\n".format(
table,
new_id,
old_id.anonymous_user_id,
)
)
self.stdout.write(
"DELETE FROM metrics_studentcourseprofile "
"WHERE student_id = '{}' "
"AND problems_attempted = 0;\n".format(old_id.anonymous_user_id)
)
self.stdout.write(
"DELETE FROM metrics_studentprofile "
"WHERE student_id = '{}' "
"AND messages_sent = 0 "
"AND messages_received = 0 "
"AND average_message_feedback_length = 0 "
"AND student_is_staff_banned = 0 "
"AND student_cannot_submit_more_for_peer_grading = 0;\n".format(old_id.anonymous_user_id)
)
|
Add managemant command to generate sql to clean up tp truncated student ids in ORA db
|
Add managemant command to generate sql to clean up tp truncated student ids in ORA db
|
Python
|
agpl-3.0
|
openfun/edx-platform,synergeticsedx/deployment-wipro,shashank971/edx-platform,bigdatauniversity/edx-platform,shabab12/edx-platform,philanthropy-u/edx-platform,openfun/edx-platform,motion2015/edx-platform,ubc/edx-platform,jolyonb/edx-platform,cognitiveclass/edx-platform,ferabra/edx-platform,jswope00/griffinx,proversity-org/edx-platform,ampax/edx-platform,proversity-org/edx-platform,jazztpt/edx-platform,yokose-ks/edx-platform,UXE/local-edx,pabloborrego93/edx-platform,ahmadiga/min_edx,eemirtekin/edx-platform,hkawasaki/kawasaki-aio8-1,arifsetiawan/edx-platform,alu042/edx-platform,alexthered/kienhoc-platform,appliedx/edx-platform,IndonesiaX/edx-platform,4eek/edx-platform,ak2703/edx-platform,mahendra-r/edx-platform,LearnEra/LearnEraPlaftform,jswope00/griffinx,zerobatu/edx-platform,doganov/edx-platform,xuxiao19910803/edx-platform,jbassen/edx-platform,nanolearning/edx-platform,mitocw/edx-platform,WatanabeYasumasa/edx-platform,jbassen/edx-platform,abdoosh00/edraak,alexthered/kienhoc-platform,antonve/s4-project-mooc,Edraak/edraak-platform,longmen21/edx-platform,edx/edx-platform,chrisndodge/edx-platform,sameetb-cuelogic/edx-platform-test,cselis86/edx-platform,jamesblunt/edx-platform,hkawasaki/kawasaki-aio8-2,jonathan-beard/edx-platform,hamzehd/edx-platform,jonathan-beard/edx-platform,shubhdev/edx-platform,mushtaqak/edx-platform,romain-li/edx-platform,Stanford-Online/edx-platform,nagyistoce/edx-platform,shubhdev/edx-platform,longmen21/edx-platform,motion2015/edx-platform,AkA84/edx-platform,analyseuc3m/ANALYSE-v1,shurihell/testasia,Stanford-Online/edx-platform,torchingloom/edx-platform,chauhanhardik/populo,bdero/edx-platform,jamesblunt/edx-platform,BehavioralInsightsTeam/edx-platform,cyanna/edx-platform,peterm-itr/edx-platform,Unow/edx-platform,franosincic/edx-platform,Edraak/circleci-edx-platform,rhndg/openedx,ahmadiga/min_edx,proversity-org/edx-platform,beni55/edx-platform,etzhou/edx-platform,jswope00/griffinx,olexiim/edx-platform,kamalx/edx-platform,JioEducation/edx-platform,auferack08/edx-platform,doismellburning/edx-platform,utecuy/edx-platform,kmoocdev2/edx-platform,jruiperezv/ANALYSE,synergeticsedx/deployment-wipro,chudaol/edx-platform,edx-solutions/edx-platform,JioEducation/edx-platform,10clouds/edx-platform,4eek/edx-platform,alu042/edx-platform,jruiperezv/ANALYSE,vikas1885/test1,chauhanhardik/populo_2,nanolearningllc/edx-platform-cypress,eemirtekin/edx-platform,benpatterson/edx-platform,iivic/BoiseStateX,shubhdev/openedx,etzhou/edx-platform,louyihua/edx-platform,dsajkl/123,shubhdev/openedx,itsjeyd/edx-platform,10clouds/edx-platform,chand3040/cloud_that,CourseTalk/edx-platform,jzoldak/edx-platform,romain-li/edx-platform,sameetb-cuelogic/edx-platform-test,unicri/edx-platform,IONISx/edx-platform,motion2015/a3,Semi-global/edx-platform,zadgroup/edx-platform,halvertoluke/edx-platform,motion2015/edx-platform,ovnicraft/edx-platform,J861449197/edx-platform,benpatterson/edx-platform,vasyarv/edx-platform,arifsetiawan/edx-platform,philanthropy-u/edx-platform,romain-li/edx-platform,valtech-mooc/edx-platform,ampax/edx-platform-backup,synergeticsedx/deployment-wipro,zadgroup/edx-platform,ferabra/edx-platform,J861449197/edx-platform,dsajkl/reqiop,mbareta/edx-platform-ft,chrisndodge/edx-platform,morenopc/edx-platform,JioEducation/edx-platform,mjg2203/edx-platform-seas,DNFcode/edx-platform,LICEF/edx-platform,playm2mboy/edx-platform,shurihell/testasia,Shrhawk/edx-platform,unicri/edx-platform,mushtaqak/edx-platform,nikolas/edx-platform,4eek/edx-platform,mushtaqak/edx-platform,Softmotions/edx-platform,don-github/edx-platform,DefyVentures/edx-platform,shubhdev/edx-platform,WatanabeYasumasa/edx-platform,pelikanchik/edx-platform,nttks/jenkins-test,defance/edx-platform,IndonesiaX/edx-platform,edry/edx-platform,arifsetiawan/edx-platform,raccoongang/edx-platform,doismellburning/edx-platform,nanolearning/edx-platform,etzhou/edx-platform,olexiim/edx-platform,motion2015/a3,bdero/edx-platform,JCBarahona/edX,xuxiao19910803/edx,procangroup/edx-platform,torchingloom/edx-platform,hkawasaki/kawasaki-aio8-1,dkarakats/edx-platform,eduNEXT/edunext-platform,ampax/edx-platform-backup,Livit/Livit.Learn.EdX,simbs/edx-platform,zerobatu/edx-platform,ak2703/edx-platform,ubc/edx-platform,jonathan-beard/edx-platform,miptliot/edx-platform,a-parhom/edx-platform,dsajkl/123,carsongee/edx-platform,J861449197/edx-platform,openfun/edx-platform,knehez/edx-platform,jbzdak/edx-platform,hkawasaki/kawasaki-aio8-0,beacloudgenius/edx-platform,Softmotions/edx-platform,unicri/edx-platform,ovnicraft/edx-platform,Semi-global/edx-platform,B-MOOC/edx-platform,louyihua/edx-platform,jswope00/GAI,doganov/edx-platform,beni55/edx-platform,IONISx/edx-platform,IndonesiaX/edx-platform,deepsrijit1105/edx-platform,Edraak/edx-platform,shubhdev/edxOnBaadal,chauhanhardik/populo_2,nttks/edx-platform,SravanthiSinha/edx-platform,romain-li/edx-platform,devs1991/test_edx_docmode,chauhanhardik/populo,Kalyzee/edx-platform,hastexo/edx-platform,mjg2203/edx-platform-seas,Stanford-Online/edx-platform,beni55/edx-platform,kmoocdev2/edx-platform,eduNEXT/edx-platform,simbs/edx-platform,playm2mboy/edx-platform,ESOedX/edx-platform,longmen21/edx-platform,hamzehd/edx-platform,kamalx/edx-platform,jazkarta/edx-platform,pomegranited/edx-platform,SravanthiSinha/edx-platform,naresh21/synergetics-edx-platform,procangroup/edx-platform,zofuthan/edx-platform,chand3040/cloud_that,torchingloom/edx-platform,benpatterson/edx-platform,adoosii/edx-platform,lduarte1991/edx-platform,MakeHer/edx-platform,OmarIthawi/edx-platform,jamiefolsom/edx-platform,bigdatauniversity/edx-platform,miptliot/edx-platform,Shrhawk/edx-platform,ferabra/edx-platform,apigee/edx-platform,mjirayu/sit_academy,nanolearningllc/edx-platform-cypress,xinjiguaike/edx-platform,nanolearningllc/edx-platform-cypress-2,olexiim/edx-platform,vikas1885/test1,caesar2164/edx-platform,pku9104038/edx-platform,appsembler/edx-platform,leansoft/edx-platform,ESOedX/edx-platform,longmen21/edx-platform,edx-solutions/edx-platform,eduNEXT/edx-platform,B-MOOC/edx-platform,EDUlib/edx-platform,B-MOOC/edx-platform,don-github/edx-platform,jazkarta/edx-platform-for-isc,eemirtekin/edx-platform,openfun/edx-platform,msegado/edx-platform,motion2015/a3,arbrandes/edx-platform,appliedx/edx-platform,gymnasium/edx-platform,JCBarahona/edX,zhenzhai/edx-platform,shubhdev/edxOnBaadal,JCBarahona/edX,DNFcode/edx-platform,LICEF/edx-platform,xingyepei/edx-platform,polimediaupv/edx-platform,franosincic/edx-platform,pomegranited/edx-platform,EDUlib/edx-platform,eduNEXT/edunext-platform,UOMx/edx-platform,doganov/edx-platform,abdoosh00/edx-rtl-final,polimediaupv/edx-platform,jazkarta/edx-platform,itsjeyd/edx-platform,cselis86/edx-platform,edx/edx-platform,edry/edx-platform,hastexo/edx-platform,sameetb-cuelogic/edx-platform-test,kursitet/edx-platform,jazkarta/edx-platform,morenopc/edx-platform,pku9104038/edx-platform,jjmiranda/edx-platform,doismellburning/edx-platform,LearnEra/LearnEraPlaftform,pomegranited/edx-platform,pepeportela/edx-platform,kamalx/edx-platform,chauhanhardik/populo,xingyepei/edx-platform,nttks/edx-platform,sameetb-cuelogic/edx-platform-test,lduarte1991/edx-platform,miptliot/edx-platform,chand3040/cloud_that,tanmaykm/edx-platform,teltek/edx-platform,mbareta/edx-platform-ft,ahmadio/edx-platform,iivic/BoiseStateX,cecep-edu/edx-platform,TeachAtTUM/edx-platform,knehez/edx-platform,RPI-OPENEDX/edx-platform,SivilTaram/edx-platform,Endika/edx-platform,xuxiao19910803/edx-platform,gsehub/edx-platform,beni55/edx-platform,MSOpenTech/edx-platform,antoviaque/edx-platform,10clouds/edx-platform,a-parhom/edx-platform,4eek/edx-platform,MSOpenTech/edx-platform,fintech-circle/edx-platform,arbrandes/edx-platform,yokose-ks/edx-platform,jswope00/GAI,angelapper/edx-platform,vismartltd/edx-platform,franosincic/edx-platform,martynovp/edx-platform,jamiefolsom/edx-platform,nikolas/edx-platform,antonve/s4-project-mooc,stvstnfrd/edx-platform,nttks/edx-platform,carsongee/edx-platform,prarthitm/edxplatform,nagyistoce/edx-platform,Edraak/circleci-edx-platform,vasyarv/edx-platform,angelapper/edx-platform,rue89-tech/edx-platform,waheedahmed/edx-platform,alu042/edx-platform,vismartltd/edx-platform,mitocw/edx-platform,Edraak/circleci-edx-platform,jbassen/edx-platform,Edraak/edraak-platform,atsolakid/edx-platform,zhenzhai/edx-platform,sudheerchintala/LearnEraPlatForm,ovnicraft/edx-platform,teltek/edx-platform,torchingloom/edx-platform,kursitet/edx-platform,jbassen/edx-platform,jazkarta/edx-platform-for-isc,dcosentino/edx-platform,valtech-mooc/edx-platform,devs1991/test_edx_docmode,mbareta/edx-platform-ft,LearnEra/LearnEraPlaftform,alexthered/kienhoc-platform,cpennington/edx-platform,fly19890211/edx-platform,doganov/edx-platform,CredoReference/edx-platform,shubhdev/openedx,chudaol/edx-platform,atsolakid/edx-platform,mahendra-r/edx-platform,deepsrijit1105/edx-platform,alu042/edx-platform,simbs/edx-platform,dkarakats/edx-platform,zubair-arbi/edx-platform,DefyVentures/edx-platform,DNFcode/edx-platform,knehez/edx-platform,andyzsf/edx,antonve/s4-project-mooc,J861449197/edx-platform,simbs/edx-platform,Livit/Livit.Learn.EdX,jelugbo/tundex,utecuy/edx-platform,rue89-tech/edx-platform,appliedx/edx-platform,edry/edx-platform,ahmedaljazzar/edx-platform,stvstnfrd/edx-platform,SravanthiSinha/edx-platform,peterm-itr/edx-platform,J861449197/edx-platform,apigee/edx-platform,wwj718/ANALYSE,pomegranited/edx-platform,procangroup/edx-platform,rhndg/openedx,mjg2203/edx-platform-seas,LICEF/edx-platform,beacloudgenius/edx-platform,eduNEXT/edunext-platform,ahmadio/edx-platform,ferabra/edx-platform,dsajkl/reqiop,TeachAtTUM/edx-platform,inares/edx-platform,wwj718/edx-platform,TeachAtTUM/edx-platform,Ayub-Khan/edx-platform,LICEF/edx-platform,antoviaque/edx-platform,longmen21/edx-platform,defance/edx-platform,fintech-circle/edx-platform,SivilTaram/edx-platform,leansoft/edx-platform,martynovp/edx-platform,hamzehd/edx-platform,zerobatu/edx-platform,ahmedaljazzar/edx-platform,franosincic/edx-platform,xingyepei/edx-platform,ampax/edx-platform,kmoocdev2/edx-platform,DefyVentures/edx-platform,prarthitm/edxplatform,cognitiveclass/edx-platform,adoosii/edx-platform,Edraak/edx-platform,carsongee/edx-platform,msegado/edx-platform,jazkarta/edx-platform,jswope00/GAI,inares/edx-platform,motion2015/a3,eestay/edx-platform,Endika/edx-platform,pabloborrego93/edx-platform,kursitet/edx-platform,abdoosh00/edx-rtl-final,peterm-itr/edx-platform,jonathan-beard/edx-platform,itsjeyd/edx-platform,peterm-itr/edx-platform,wwj718/ANALYSE,wwj718/edx-platform,jelugbo/tundex,amir-qayyum-khan/edx-platform,eduNEXT/edunext-platform,jbzdak/edx-platform,prarthitm/edxplatform,y12uc231/edx-platform,xuxiao19910803/edx,appliedx/edx-platform,kamalx/edx-platform,jamesblunt/edx-platform,bigdatauniversity/edx-platform,MakeHer/edx-platform,polimediaupv/edx-platform,rhndg/openedx,jamiefolsom/edx-platform,MakeHer/edx-platform,AkA84/edx-platform,atsolakid/edx-platform,Kalyzee/edx-platform,Lektorium-LLC/edx-platform,xuxiao19910803/edx-platform,IONISx/edx-platform,mcgachey/edx-platform,mjirayu/sit_academy,jazztpt/edx-platform,wwj718/ANALYSE,ahmadio/edx-platform,jzoldak/edx-platform,lduarte1991/edx-platform,procangroup/edx-platform,shubhdev/openedx,motion2015/edx-platform,cselis86/edx-platform,chand3040/cloud_that,hamzehd/edx-platform,tiagochiavericosta/edx-platform,zubair-arbi/edx-platform,teltek/edx-platform,y12uc231/edx-platform,nttks/edx-platform,ahmadiga/min_edx,cognitiveclass/edx-platform,nikolas/edx-platform,shubhdev/edxOnBaadal,utecuy/edx-platform,jazztpt/edx-platform,zubair-arbi/edx-platform,ahmadio/edx-platform,antonve/s4-project-mooc,shashank971/edx-platform,jelugbo/tundex,lduarte1991/edx-platform,waheedahmed/edx-platform,UOMx/edx-platform,dcosentino/edx-platform,benpatterson/edx-platform,sudheerchintala/LearnEraPlatForm,SivilTaram/edx-platform,inares/edx-platform,wwj718/edx-platform,pelikanchik/edx-platform,mbareta/edx-platform-ft,kmoocdev/edx-platform,kxliugang/edx-platform,benpatterson/edx-platform,eduNEXT/edx-platform,pelikanchik/edx-platform,pku9104038/edx-platform,synergeticsedx/deployment-wipro,IONISx/edx-platform,motion2015/edx-platform,JCBarahona/edX,kmoocdev2/edx-platform,nanolearningllc/edx-platform-cypress-2,jazztpt/edx-platform,UOMx/edx-platform,DNFcode/edx-platform,kxliugang/edx-platform,iivic/BoiseStateX,apigee/edx-platform,Endika/edx-platform,JCBarahona/edX,EDUlib/edx-platform,chudaol/edx-platform,beni55/edx-platform,rismalrv/edx-platform,xinjiguaike/edx-platform,mcgachey/edx-platform,MSOpenTech/edx-platform,dkarakats/edx-platform,nttks/jenkins-test,analyseuc3m/ANALYSE-v1,nikolas/edx-platform,dsajkl/reqiop,jolyonb/edx-platform,solashirai/edx-platform,nikolas/edx-platform,y12uc231/edx-platform,zofuthan/edx-platform,shashank971/edx-platform,edry/edx-platform,zubair-arbi/edx-platform,ESOedX/edx-platform,cpennington/edx-platform,halvertoluke/edx-platform,itsjeyd/edx-platform,Unow/edx-platform,kxliugang/edx-platform,antoviaque/edx-platform,bitifirefly/edx-platform,CredoReference/edx-platform,shurihell/testasia,amir-qayyum-khan/edx-platform,ZLLab-Mooc/edx-platform,SravanthiSinha/edx-platform,olexiim/edx-platform,ovnicraft/edx-platform,jelugbo/tundex,bdero/edx-platform,kamalx/edx-platform,abdoosh00/edx-rtl-final,mjg2203/edx-platform-seas,proversity-org/edx-platform,polimediaupv/edx-platform,morenopc/edx-platform,jbzdak/edx-platform,SravanthiSinha/edx-platform,leansoft/edx-platform,hamzehd/edx-platform,DefyVentures/edx-platform,pku9104038/edx-platform,caesar2164/edx-platform,TeachAtTUM/edx-platform,halvertoluke/edx-platform,edx-solutions/edx-platform,zofuthan/edx-platform,andyzsf/edx,fly19890211/edx-platform,cyanna/edx-platform,jruiperezv/ANALYSE,jbzdak/edx-platform,cognitiveclass/edx-platform,jelugbo/tundex,angelapper/edx-platform,SivilTaram/edx-platform,defance/edx-platform,nanolearningllc/edx-platform-cypress,zofuthan/edx-platform,xuxiao19910803/edx,angelapper/edx-platform,naresh21/synergetics-edx-platform,pepeportela/edx-platform,nanolearningllc/edx-platform-cypress-2,Semi-global/edx-platform,iivic/BoiseStateX,y12uc231/edx-platform,a-parhom/edx-platform,ampax/edx-platform-backup,hkawasaki/kawasaki-aio8-0,Ayub-Khan/edx-platform,msegado/edx-platform,wwj718/edx-platform,jswope00/GAI,shubhdev/edxOnBaadal,CredoReference/edx-platform,waheedahmed/edx-platform,edx/edx-platform,ampax/edx-platform,rue89-tech/edx-platform,cecep-edu/edx-platform,jbzdak/edx-platform,shabab12/edx-platform,simbs/edx-platform,Shrhawk/edx-platform,andyzsf/edx,playm2mboy/edx-platform,halvertoluke/edx-platform,Unow/edx-platform,leansoft/edx-platform,inares/edx-platform,arifsetiawan/edx-platform,jazkarta/edx-platform-for-isc,jolyonb/edx-platform,pabloborrego93/edx-platform,playm2mboy/edx-platform,edx/edx-platform,abdoosh00/edraak,mushtaqak/edx-platform,chand3040/cloud_that,msegado/edx-platform,hkawasaki/kawasaki-aio8-0,Kalyzee/edx-platform,utecuy/edx-platform,hmcmooc/muddx-platform,chauhanhardik/populo_2,ESOedX/edx-platform,zerobatu/edx-platform,shurihell/testasia,rue89-tech/edx-platform,MSOpenTech/edx-platform,don-github/edx-platform,UOMx/edx-platform,devs1991/test_edx_docmode,vasyarv/edx-platform,jazztpt/edx-platform,tiagochiavericosta/edx-platform,louyihua/edx-platform,dcosentino/edx-platform,hmcmooc/muddx-platform,IONISx/edx-platform,rismalrv/edx-platform,RPI-OPENEDX/edx-platform,fintech-circle/edx-platform,mitocw/edx-platform,raccoongang/edx-platform,shubhdev/edx-platform,vikas1885/test1,RPI-OPENEDX/edx-platform,DefyVentures/edx-platform,shubhdev/edxOnBaadal,etzhou/edx-platform,antonve/s4-project-mooc,nttks/jenkins-test,jruiperezv/ANALYSE,cecep-edu/edx-platform,cecep-edu/edx-platform,amir-qayyum-khan/edx-platform,dkarakats/edx-platform,jswope00/griffinx,UXE/local-edx,CredoReference/edx-platform,cyanna/edx-platform,martynovp/edx-platform,mjirayu/sit_academy,fly19890211/edx-platform,Edraak/edx-platform,Shrhawk/edx-platform,ahmedaljazzar/edx-platform,bdero/edx-platform,zadgroup/edx-platform,dsajkl/123,rismalrv/edx-platform,hkawasaki/kawasaki-aio8-2,gsehub/edx-platform,JioEducation/edx-platform,philanthropy-u/edx-platform,mtlchun/edx,jzoldak/edx-platform,ahmedaljazzar/edx-platform,jbassen/edx-platform,jamesblunt/edx-platform,ahmadiga/min_edx,analyseuc3m/ANALYSE-v1,AkA84/edx-platform,gymnasium/edx-platform,nttks/edx-platform,stvstnfrd/edx-platform,10clouds/edx-platform,dcosentino/edx-platform,dsajkl/123,beacloudgenius/edx-platform,AkA84/edx-platform,openfun/edx-platform,raccoongang/edx-platform,kmoocdev/edx-platform,nanolearning/edx-platform,motion2015/a3,Semi-global/edx-platform,vikas1885/test1,hmcmooc/muddx-platform,nttks/jenkins-test,rue89-tech/edx-platform,ak2703/edx-platform,eestay/edx-platform,xuxiao19910803/edx,nanolearningllc/edx-platform-cypress,tanmaykm/edx-platform,4eek/edx-platform,yokose-ks/edx-platform,romain-li/edx-platform,rhndg/openedx,ampax/edx-platform-backup,ubc/edx-platform,bigdatauniversity/edx-platform,fly19890211/edx-platform,bitifirefly/edx-platform,nanolearning/edx-platform,zerobatu/edx-platform,ubc/edx-platform,a-parhom/edx-platform,alexthered/kienhoc-platform,marcore/edx-platform,RPI-OPENEDX/edx-platform,msegado/edx-platform,edx-solutions/edx-platform,hkawasaki/kawasaki-aio8-1,WatanabeYasumasa/edx-platform,BehavioralInsightsTeam/edx-platform,ahmadio/edx-platform,bigdatauniversity/edx-platform,gymnasium/edx-platform,pepeportela/edx-platform,carsongee/edx-platform,yokose-ks/edx-platform,cpennington/edx-platform,polimediaupv/edx-platform,kxliugang/edx-platform,jazkarta/edx-platform-for-isc,rhndg/openedx,zadgroup/edx-platform,auferack08/edx-platform,vikas1885/test1,defance/edx-platform,edry/edx-platform,appliedx/edx-platform,atsolakid/edx-platform,naresh21/synergetics-edx-platform,Ayub-Khan/edx-platform,alexthered/kienhoc-platform,playm2mboy/edx-platform,etzhou/edx-platform,zhenzhai/edx-platform,nanolearningllc/edx-platform-cypress,marcore/edx-platform,pabloborrego93/edx-platform,wwj718/edx-platform,miptliot/edx-platform,rismalrv/edx-platform,sameetb-cuelogic/edx-platform-test,caesar2164/edx-platform,shubhdev/openedx,halvertoluke/edx-platform,mtlchun/edx,tanmaykm/edx-platform,doismellburning/edx-platform,appsembler/edx-platform,marcore/edx-platform,mcgachey/edx-platform,DNFcode/edx-platform,cecep-edu/edx-platform,bitifirefly/edx-platform,BehavioralInsightsTeam/edx-platform,kmoocdev/edx-platform,vismartltd/edx-platform,kmoocdev2/edx-platform,valtech-mooc/edx-platform,fly19890211/edx-platform,kursitet/edx-platform,fintech-circle/edx-platform,pepeportela/edx-platform,IndonesiaX/edx-platform,atsolakid/edx-platform,UXE/local-edx,Softmotions/edx-platform,cyanna/edx-platform,gymnasium/edx-platform,xuxiao19910803/edx-platform,CourseTalk/edx-platform,caesar2164/edx-platform,Livit/Livit.Learn.EdX,Kalyzee/edx-platform,jonathan-beard/edx-platform,eemirtekin/edx-platform,Lektorium-LLC/edx-platform,xinjiguaike/edx-platform,cselis86/edx-platform,Edraak/edx-platform,Edraak/edraak-platform,ZLLab-Mooc/edx-platform,beacloudgenius/edx-platform,sudheerchintala/LearnEraPlatForm,vasyarv/edx-platform,Edraak/edraak-platform,morenopc/edx-platform,solashirai/edx-platform,antoviaque/edx-platform,cognitiveclass/edx-platform,valtech-mooc/edx-platform,y12uc231/edx-platform,shubhdev/edx-platform,MakeHer/edx-platform,zubair-arbi/edx-platform,hkawasaki/kawasaki-aio8-1,chauhanhardik/populo,mitocw/edx-platform,deepsrijit1105/edx-platform,IndonesiaX/edx-platform,kmoocdev/edx-platform,Unow/edx-platform,jamesblunt/edx-platform,gsehub/edx-platform,zhenzhai/edx-platform,devs1991/test_edx_docmode,tiagochiavericosta/edx-platform,Stanford-Online/edx-platform,eduNEXT/edx-platform,xuxiao19910803/edx,jamiefolsom/edx-platform,devs1991/test_edx_docmode,arbrandes/edx-platform,xingyepei/edx-platform,jzoldak/edx-platform,martynovp/edx-platform,chrisndodge/edx-platform,nanolearning/edx-platform,philanthropy-u/edx-platform,jjmiranda/edx-platform,mtlchun/edx,nanolearningllc/edx-platform-cypress-2,hastexo/edx-platform,olexiim/edx-platform,zofuthan/edx-platform,mtlchun/edx,mcgachey/edx-platform,ak2703/edx-platform,MSOpenTech/edx-platform,Shrhawk/edx-platform,appsembler/edx-platform,ak2703/edx-platform,shashank971/edx-platform,B-MOOC/edx-platform,raccoongang/edx-platform,Lektorium-LLC/edx-platform,Edraak/circleci-edx-platform,adoosii/edx-platform,hkawasaki/kawasaki-aio8-2,nanolearningllc/edx-platform-cypress-2,Ayub-Khan/edx-platform,xinjiguaike/edx-platform,CourseTalk/edx-platform,mahendra-r/edx-platform,nttks/jenkins-test,teltek/edx-platform,ferabra/edx-platform,auferack08/edx-platform,ampax/edx-platform-backup,andyzsf/edx,eemirtekin/edx-platform,Endika/edx-platform,Softmotions/edx-platform,bitifirefly/edx-platform,kursitet/edx-platform,jruiperezv/ANALYSE,marcore/edx-platform,jjmiranda/edx-platform,ubc/edx-platform,abdoosh00/edraak,knehez/edx-platform,vismartltd/edx-platform,sudheerchintala/LearnEraPlatForm,zadgroup/edx-platform,chudaol/edx-platform,Edraak/edx-platform,hastexo/edx-platform,CourseTalk/edx-platform,B-MOOC/edx-platform,shabab12/edx-platform,mjirayu/sit_academy,xinjiguaike/edx-platform,eestay/edx-platform,arifsetiawan/edx-platform,UXE/local-edx,jazkarta/edx-platform,torchingloom/edx-platform,morenopc/edx-platform,jazkarta/edx-platform-for-isc,OmarIthawi/edx-platform,cyanna/edx-platform,iivic/BoiseStateX,jolyonb/edx-platform,solashirai/edx-platform,unicri/edx-platform,waheedahmed/edx-platform,shurihell/testasia,devs1991/test_edx_docmode,louyihua/edx-platform,mushtaqak/edx-platform,OmarIthawi/edx-platform,wwj718/ANALYSE,dsajkl/123,yokose-ks/edx-platform,auferack08/edx-platform,gsehub/edx-platform,ovnicraft/edx-platform,analyseuc3m/ANALYSE-v1,solashirai/edx-platform,doismellburning/edx-platform,hmcmooc/muddx-platform,chudaol/edx-platform,adoosii/edx-platform,Edraak/circleci-edx-platform,AkA84/edx-platform,kxliugang/edx-platform,chauhanhardik/populo,nagyistoce/edx-platform,tiagochiavericosta/edx-platform,Semi-global/edx-platform,martynovp/edx-platform,prarthitm/edxplatform,chauhanhardik/populo_2,dkarakats/edx-platform,Kalyzee/edx-platform,jamiefolsom/edx-platform,ZLLab-Mooc/edx-platform,EDUlib/edx-platform,adoosii/edx-platform,mahendra-r/edx-platform,mcgachey/edx-platform,OmarIthawi/edx-platform,unicri/edx-platform,pelikanchik/edx-platform,MakeHer/edx-platform,doganov/edx-platform,leansoft/edx-platform,stvstnfrd/edx-platform,ZLLab-Mooc/edx-platform,eestay/edx-platform,xingyepei/edx-platform,chauhanhardik/populo_2,Lektorium-LLC/edx-platform,deepsrijit1105/edx-platform,ZLLab-Mooc/edx-platform,zhenzhai/edx-platform,inares/edx-platform,jjmiranda/edx-platform,kmoocdev/edx-platform,BehavioralInsightsTeam/edx-platform,cpennington/edx-platform,bitifirefly/edx-platform,vismartltd/edx-platform,pomegranited/edx-platform,utecuy/edx-platform,mahendra-r/edx-platform,abdoosh00/edx-rtl-final,WatanabeYasumasa/edx-platform,nagyistoce/edx-platform,Ayub-Khan/edx-platform,vasyarv/edx-platform,mtlchun/edx,LearnEra/LearnEraPlaftform,hkawasaki/kawasaki-aio8-0,nagyistoce/edx-platform,devs1991/test_edx_docmode,mjirayu/sit_academy,abdoosh00/edraak,Livit/Livit.Learn.EdX,franosincic/edx-platform,don-github/edx-platform,LICEF/edx-platform,waheedahmed/edx-platform,arbrandes/edx-platform,naresh21/synergetics-edx-platform,tiagochiavericosta/edx-platform,ahmadiga/min_edx,solashirai/edx-platform,tanmaykm/edx-platform,valtech-mooc/edx-platform,appsembler/edx-platform,Softmotions/edx-platform,dsajkl/reqiop,chrisndodge/edx-platform,rismalrv/edx-platform,amir-qayyum-khan/edx-platform,ampax/edx-platform,RPI-OPENEDX/edx-platform,jswope00/griffinx,cselis86/edx-platform,wwj718/ANALYSE,shabab12/edx-platform,dcosentino/edx-platform,SivilTaram/edx-platform,xuxiao19910803/edx-platform,hkawasaki/kawasaki-aio8-2,shashank971/edx-platform,apigee/edx-platform,devs1991/test_edx_docmode,don-github/edx-platform,knehez/edx-platform,beacloudgenius/edx-platform,eestay/edx-platform
|
Add managemant command to generate sql to clean up tp truncated student ids in ORA db
|
"""
Generate sql commands to fix truncated anonymous student ids in the ORA database
"""
import sys
from django.core.management.base import NoArgsCommand
from student.models import AnonymousUserId, anonymous_id_for_user
class Command(NoArgsCommand):
help = __doc__
def handle_noargs(self, **options):
"""
Reads a list of ids (newline separated) from stdin, and
dumps sql queries to run on the ORA database to fix those ids
from their truncated form to the full 32 character change.
The following query will generate the list of ids needed to be fixed
from the ORA database:
SELECT student_id FROM peer_grading_calibrationhistory WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM controller_submission WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_timing WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_studentcourseprofile WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_studentprofile WHERE LENGTH(student_id) = 16;
"""
ids = [line.strip() for line in sys.stdin]
old_ids = AnonymousUserId.objects.raw(
"""
SELECT *
FROM student_anonymoususerid_temp_archive
WHERE anonymous_user_id IN ({})
""".format(','.join(['%s']*len(ids))),
ids
)
for old_id in old_ids:
new_id = anonymous_id_for_user(old_id.user, old_id.course_id)
for table in ('peer_grading_calibrationhistory', 'controller_submission', 'metrics_timing'):
self.stdout.write(
"UPDATE {} "
"SET student_id = '{}' "
"WHERE student_id = '{}';\n".format(
table,
new_id,
old_id.anonymous_user_id,
)
)
self.stdout.write(
"DELETE FROM metrics_studentcourseprofile "
"WHERE student_id = '{}' "
"AND problems_attempted = 0;\n".format(old_id.anonymous_user_id)
)
self.stdout.write(
"DELETE FROM metrics_studentprofile "
"WHERE student_id = '{}' "
"AND messages_sent = 0 "
"AND messages_received = 0 "
"AND average_message_feedback_length = 0 "
"AND student_is_staff_banned = 0 "
"AND student_cannot_submit_more_for_peer_grading = 0;\n".format(old_id.anonymous_user_id)
)
|
<commit_before><commit_msg>Add managemant command to generate sql to clean up tp truncated student ids in ORA db<commit_after>
|
"""
Generate sql commands to fix truncated anonymous student ids in the ORA database
"""
import sys
from django.core.management.base import NoArgsCommand
from student.models import AnonymousUserId, anonymous_id_for_user
class Command(NoArgsCommand):
help = __doc__
def handle_noargs(self, **options):
"""
Reads a list of ids (newline separated) from stdin, and
dumps sql queries to run on the ORA database to fix those ids
from their truncated form to the full 32 character change.
The following query will generate the list of ids needed to be fixed
from the ORA database:
SELECT student_id FROM peer_grading_calibrationhistory WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM controller_submission WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_timing WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_studentcourseprofile WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_studentprofile WHERE LENGTH(student_id) = 16;
"""
ids = [line.strip() for line in sys.stdin]
old_ids = AnonymousUserId.objects.raw(
"""
SELECT *
FROM student_anonymoususerid_temp_archive
WHERE anonymous_user_id IN ({})
""".format(','.join(['%s']*len(ids))),
ids
)
for old_id in old_ids:
new_id = anonymous_id_for_user(old_id.user, old_id.course_id)
for table in ('peer_grading_calibrationhistory', 'controller_submission', 'metrics_timing'):
self.stdout.write(
"UPDATE {} "
"SET student_id = '{}' "
"WHERE student_id = '{}';\n".format(
table,
new_id,
old_id.anonymous_user_id,
)
)
self.stdout.write(
"DELETE FROM metrics_studentcourseprofile "
"WHERE student_id = '{}' "
"AND problems_attempted = 0;\n".format(old_id.anonymous_user_id)
)
self.stdout.write(
"DELETE FROM metrics_studentprofile "
"WHERE student_id = '{}' "
"AND messages_sent = 0 "
"AND messages_received = 0 "
"AND average_message_feedback_length = 0 "
"AND student_is_staff_banned = 0 "
"AND student_cannot_submit_more_for_peer_grading = 0;\n".format(old_id.anonymous_user_id)
)
|
Add managemant command to generate sql to clean up tp truncated student ids in ORA db"""
Generate sql commands to fix truncated anonymous student ids in the ORA database
"""
import sys
from django.core.management.base import NoArgsCommand
from student.models import AnonymousUserId, anonymous_id_for_user
class Command(NoArgsCommand):
help = __doc__
def handle_noargs(self, **options):
"""
Reads a list of ids (newline separated) from stdin, and
dumps sql queries to run on the ORA database to fix those ids
from their truncated form to the full 32 character change.
The following query will generate the list of ids needed to be fixed
from the ORA database:
SELECT student_id FROM peer_grading_calibrationhistory WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM controller_submission WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_timing WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_studentcourseprofile WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_studentprofile WHERE LENGTH(student_id) = 16;
"""
ids = [line.strip() for line in sys.stdin]
old_ids = AnonymousUserId.objects.raw(
"""
SELECT *
FROM student_anonymoususerid_temp_archive
WHERE anonymous_user_id IN ({})
""".format(','.join(['%s']*len(ids))),
ids
)
for old_id in old_ids:
new_id = anonymous_id_for_user(old_id.user, old_id.course_id)
for table in ('peer_grading_calibrationhistory', 'controller_submission', 'metrics_timing'):
self.stdout.write(
"UPDATE {} "
"SET student_id = '{}' "
"WHERE student_id = '{}';\n".format(
table,
new_id,
old_id.anonymous_user_id,
)
)
self.stdout.write(
"DELETE FROM metrics_studentcourseprofile "
"WHERE student_id = '{}' "
"AND problems_attempted = 0;\n".format(old_id.anonymous_user_id)
)
self.stdout.write(
"DELETE FROM metrics_studentprofile "
"WHERE student_id = '{}' "
"AND messages_sent = 0 "
"AND messages_received = 0 "
"AND average_message_feedback_length = 0 "
"AND student_is_staff_banned = 0 "
"AND student_cannot_submit_more_for_peer_grading = 0;\n".format(old_id.anonymous_user_id)
)
|
<commit_before><commit_msg>Add managemant command to generate sql to clean up tp truncated student ids in ORA db<commit_after>"""
Generate sql commands to fix truncated anonymous student ids in the ORA database
"""
import sys
from django.core.management.base import NoArgsCommand
from student.models import AnonymousUserId, anonymous_id_for_user
class Command(NoArgsCommand):
help = __doc__
def handle_noargs(self, **options):
"""
Reads a list of ids (newline separated) from stdin, and
dumps sql queries to run on the ORA database to fix those ids
from their truncated form to the full 32 character change.
The following query will generate the list of ids needed to be fixed
from the ORA database:
SELECT student_id FROM peer_grading_calibrationhistory WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM controller_submission WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_timing WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_studentcourseprofile WHERE LENGTH(student_id) = 16
UNION SELECT student_id FROM metrics_studentprofile WHERE LENGTH(student_id) = 16;
"""
ids = [line.strip() for line in sys.stdin]
old_ids = AnonymousUserId.objects.raw(
"""
SELECT *
FROM student_anonymoususerid_temp_archive
WHERE anonymous_user_id IN ({})
""".format(','.join(['%s']*len(ids))),
ids
)
for old_id in old_ids:
new_id = anonymous_id_for_user(old_id.user, old_id.course_id)
for table in ('peer_grading_calibrationhistory', 'controller_submission', 'metrics_timing'):
self.stdout.write(
"UPDATE {} "
"SET student_id = '{}' "
"WHERE student_id = '{}';\n".format(
table,
new_id,
old_id.anonymous_user_id,
)
)
self.stdout.write(
"DELETE FROM metrics_studentcourseprofile "
"WHERE student_id = '{}' "
"AND problems_attempted = 0;\n".format(old_id.anonymous_user_id)
)
self.stdout.write(
"DELETE FROM metrics_studentprofile "
"WHERE student_id = '{}' "
"AND messages_sent = 0 "
"AND messages_received = 0 "
"AND average_message_feedback_length = 0 "
"AND student_is_staff_banned = 0 "
"AND student_cannot_submit_more_for_peer_grading = 0;\n".format(old_id.anonymous_user_id)
)
|
|
be8b7c89160f4aa8b0415f673629cd80d97a13f0
|
robCRSdkt.py
|
robCRSdkt.py
|
import numpy as np
def robCRSdkt(robot, pos):
T = np.eye(4)
pos = np.array(pos) / 180.0 * np.pi
for i in range(6):
tz = np.eye(4)
tz[2,3] = robot.d[i]
rz = np.eye(4)
o = -robot.offset[i]
rz[:2, :2] = np.array([[np.cos(o+pos[i]), -np.sin(o+pos[i])],
[np.sin(o+pos[i]), np.cos(o+pos[i])]])
tx = np.eye(4)
tx[0, 3] = robot.a[i]
rx = np.eye(4)
a = robot.alpha[i]
rx[1:3, 1:3] = np.array([[np.cos(a), -np.sin(a)],
[np.sin(a), np.cos(a)]])
T = T.dot(tz).dot(rz).dot(tx).dot(rx)
T = T.dot(robot.tool)
coord = T[:3,3]
a3 = np.arctan2(T[1, 0], T[0, 0])/np.pi * 180
a4 = np.arcsin(-T[2,0])/np.pi * 180
a5 = np.arctan2(T[2,1], T[2,2])/np.pi * 180
coord = np.hstack((coord, a3,a4, a5))
return coord
|
Create direct kinematic function for CRS robot.
|
Create direct kinematic function for CRS robot.
|
Python
|
mit
|
petroolg/robo-spline
|
Create direct kinematic function for CRS robot.
|
import numpy as np
def robCRSdkt(robot, pos):
T = np.eye(4)
pos = np.array(pos) / 180.0 * np.pi
for i in range(6):
tz = np.eye(4)
tz[2,3] = robot.d[i]
rz = np.eye(4)
o = -robot.offset[i]
rz[:2, :2] = np.array([[np.cos(o+pos[i]), -np.sin(o+pos[i])],
[np.sin(o+pos[i]), np.cos(o+pos[i])]])
tx = np.eye(4)
tx[0, 3] = robot.a[i]
rx = np.eye(4)
a = robot.alpha[i]
rx[1:3, 1:3] = np.array([[np.cos(a), -np.sin(a)],
[np.sin(a), np.cos(a)]])
T = T.dot(tz).dot(rz).dot(tx).dot(rx)
T = T.dot(robot.tool)
coord = T[:3,3]
a3 = np.arctan2(T[1, 0], T[0, 0])/np.pi * 180
a4 = np.arcsin(-T[2,0])/np.pi * 180
a5 = np.arctan2(T[2,1], T[2,2])/np.pi * 180
coord = np.hstack((coord, a3,a4, a5))
return coord
|
<commit_before><commit_msg>Create direct kinematic function for CRS robot.<commit_after>
|
import numpy as np
def robCRSdkt(robot, pos):
T = np.eye(4)
pos = np.array(pos) / 180.0 * np.pi
for i in range(6):
tz = np.eye(4)
tz[2,3] = robot.d[i]
rz = np.eye(4)
o = -robot.offset[i]
rz[:2, :2] = np.array([[np.cos(o+pos[i]), -np.sin(o+pos[i])],
[np.sin(o+pos[i]), np.cos(o+pos[i])]])
tx = np.eye(4)
tx[0, 3] = robot.a[i]
rx = np.eye(4)
a = robot.alpha[i]
rx[1:3, 1:3] = np.array([[np.cos(a), -np.sin(a)],
[np.sin(a), np.cos(a)]])
T = T.dot(tz).dot(rz).dot(tx).dot(rx)
T = T.dot(robot.tool)
coord = T[:3,3]
a3 = np.arctan2(T[1, 0], T[0, 0])/np.pi * 180
a4 = np.arcsin(-T[2,0])/np.pi * 180
a5 = np.arctan2(T[2,1], T[2,2])/np.pi * 180
coord = np.hstack((coord, a3,a4, a5))
return coord
|
Create direct kinematic function for CRS robot.import numpy as np
def robCRSdkt(robot, pos):
T = np.eye(4)
pos = np.array(pos) / 180.0 * np.pi
for i in range(6):
tz = np.eye(4)
tz[2,3] = robot.d[i]
rz = np.eye(4)
o = -robot.offset[i]
rz[:2, :2] = np.array([[np.cos(o+pos[i]), -np.sin(o+pos[i])],
[np.sin(o+pos[i]), np.cos(o+pos[i])]])
tx = np.eye(4)
tx[0, 3] = robot.a[i]
rx = np.eye(4)
a = robot.alpha[i]
rx[1:3, 1:3] = np.array([[np.cos(a), -np.sin(a)],
[np.sin(a), np.cos(a)]])
T = T.dot(tz).dot(rz).dot(tx).dot(rx)
T = T.dot(robot.tool)
coord = T[:3,3]
a3 = np.arctan2(T[1, 0], T[0, 0])/np.pi * 180
a4 = np.arcsin(-T[2,0])/np.pi * 180
a5 = np.arctan2(T[2,1], T[2,2])/np.pi * 180
coord = np.hstack((coord, a3,a4, a5))
return coord
|
<commit_before><commit_msg>Create direct kinematic function for CRS robot.<commit_after>import numpy as np
def robCRSdkt(robot, pos):
T = np.eye(4)
pos = np.array(pos) / 180.0 * np.pi
for i in range(6):
tz = np.eye(4)
tz[2,3] = robot.d[i]
rz = np.eye(4)
o = -robot.offset[i]
rz[:2, :2] = np.array([[np.cos(o+pos[i]), -np.sin(o+pos[i])],
[np.sin(o+pos[i]), np.cos(o+pos[i])]])
tx = np.eye(4)
tx[0, 3] = robot.a[i]
rx = np.eye(4)
a = robot.alpha[i]
rx[1:3, 1:3] = np.array([[np.cos(a), -np.sin(a)],
[np.sin(a), np.cos(a)]])
T = T.dot(tz).dot(rz).dot(tx).dot(rx)
T = T.dot(robot.tool)
coord = T[:3,3]
a3 = np.arctan2(T[1, 0], T[0, 0])/np.pi * 180
a4 = np.arcsin(-T[2,0])/np.pi * 180
a5 = np.arctan2(T[2,1], T[2,2])/np.pi * 180
coord = np.hstack((coord, a3,a4, a5))
return coord
|
|
3443496bac4b313b1d4603086eb434459a092238
|
tools/convertPunchCardToToml.py
|
tools/convertPunchCardToToml.py
|
import argparse
def parseDay(weekDay, sunday):
daysOfTheWeek = {'S': 'saturday', 'S2': 'sunday', 'M': 'monday', 'T': 'tuesday',
'W': 'wednesday', 'R': 'thursday', 'F': 'friday'}
splitDay = weekDay.split(',')
if sunday:
day = 'S2'
splitDay.pop(0)
else:
day = splitDay.pop(0)[0]
dayOfTheWeek = daysOfTheWeek[day]
if len(splitDay) == 0:
return(dayOfTheWeek, None)
else:
dayArray = []
for time in splitDay:
dayArray.append(time)
return(dayOfTheWeek, "'000' = {}\n".format(dayArray))
def main(punchCard):
tab = ' '
title = punchCard.pop(0).rstrip('\n')
day = tab+"projects = ['000']\n\n"
for line in punchCard:
line = line.rstrip('\n')
if 'saturday' in day and line[0] == 'S':
weekday = parseDay(line, True)
else:
weekday = parseDay(line, False)
if weekday[1]:
day = '{}{}[day.{}]\n{}{}{}\n'.format(day, tab, weekday[0], tab, tab, weekday[1])
else:
day = '{}{}[day.{}]\n{}{}{}\n'.format(day, tab, weekday[0], tab, tab, '')
return "title = '{}'\n\n[day]\n\n{}".format(title, day)
if __name__ == '__main__': # pragma: no cover
description = 'This is a script to convert PunchCards from the old format to the new TOML format.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('file', nargs='?', type=argparse.FileType('r'), default='-',
help='The old format PunchCard to be converted or if no file given read stdin')
args = parser.parse_args()
oldPunchCard = []
for line in args.file:
oldPunchCard.append(line)
newPunchCard = main(oldPunchCard).rstrip('\n')
if args.file.name == '<stdin>':
print(newPunchCard)
else:
with open(args.file.name+'.toml', 'w') as newFile:
newFile.write(newPunchCard)
|
Create tools directory and add convert tool
|
Create tools directory and add convert tool
|
Python
|
mit
|
NLSteveO/PunchCard,NLSteveO/PunchCard
|
Create tools directory and add convert tool
|
import argparse
def parseDay(weekDay, sunday):
daysOfTheWeek = {'S': 'saturday', 'S2': 'sunday', 'M': 'monday', 'T': 'tuesday',
'W': 'wednesday', 'R': 'thursday', 'F': 'friday'}
splitDay = weekDay.split(',')
if sunday:
day = 'S2'
splitDay.pop(0)
else:
day = splitDay.pop(0)[0]
dayOfTheWeek = daysOfTheWeek[day]
if len(splitDay) == 0:
return(dayOfTheWeek, None)
else:
dayArray = []
for time in splitDay:
dayArray.append(time)
return(dayOfTheWeek, "'000' = {}\n".format(dayArray))
def main(punchCard):
tab = ' '
title = punchCard.pop(0).rstrip('\n')
day = tab+"projects = ['000']\n\n"
for line in punchCard:
line = line.rstrip('\n')
if 'saturday' in day and line[0] == 'S':
weekday = parseDay(line, True)
else:
weekday = parseDay(line, False)
if weekday[1]:
day = '{}{}[day.{}]\n{}{}{}\n'.format(day, tab, weekday[0], tab, tab, weekday[1])
else:
day = '{}{}[day.{}]\n{}{}{}\n'.format(day, tab, weekday[0], tab, tab, '')
return "title = '{}'\n\n[day]\n\n{}".format(title, day)
if __name__ == '__main__': # pragma: no cover
description = 'This is a script to convert PunchCards from the old format to the new TOML format.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('file', nargs='?', type=argparse.FileType('r'), default='-',
help='The old format PunchCard to be converted or if no file given read stdin')
args = parser.parse_args()
oldPunchCard = []
for line in args.file:
oldPunchCard.append(line)
newPunchCard = main(oldPunchCard).rstrip('\n')
if args.file.name == '<stdin>':
print(newPunchCard)
else:
with open(args.file.name+'.toml', 'w') as newFile:
newFile.write(newPunchCard)
|
<commit_before><commit_msg>Create tools directory and add convert tool<commit_after>
|
import argparse
def parseDay(weekDay, sunday):
daysOfTheWeek = {'S': 'saturday', 'S2': 'sunday', 'M': 'monday', 'T': 'tuesday',
'W': 'wednesday', 'R': 'thursday', 'F': 'friday'}
splitDay = weekDay.split(',')
if sunday:
day = 'S2'
splitDay.pop(0)
else:
day = splitDay.pop(0)[0]
dayOfTheWeek = daysOfTheWeek[day]
if len(splitDay) == 0:
return(dayOfTheWeek, None)
else:
dayArray = []
for time in splitDay:
dayArray.append(time)
return(dayOfTheWeek, "'000' = {}\n".format(dayArray))
def main(punchCard):
tab = ' '
title = punchCard.pop(0).rstrip('\n')
day = tab+"projects = ['000']\n\n"
for line in punchCard:
line = line.rstrip('\n')
if 'saturday' in day and line[0] == 'S':
weekday = parseDay(line, True)
else:
weekday = parseDay(line, False)
if weekday[1]:
day = '{}{}[day.{}]\n{}{}{}\n'.format(day, tab, weekday[0], tab, tab, weekday[1])
else:
day = '{}{}[day.{}]\n{}{}{}\n'.format(day, tab, weekday[0], tab, tab, '')
return "title = '{}'\n\n[day]\n\n{}".format(title, day)
if __name__ == '__main__': # pragma: no cover
description = 'This is a script to convert PunchCards from the old format to the new TOML format.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('file', nargs='?', type=argparse.FileType('r'), default='-',
help='The old format PunchCard to be converted or if no file given read stdin')
args = parser.parse_args()
oldPunchCard = []
for line in args.file:
oldPunchCard.append(line)
newPunchCard = main(oldPunchCard).rstrip('\n')
if args.file.name == '<stdin>':
print(newPunchCard)
else:
with open(args.file.name+'.toml', 'w') as newFile:
newFile.write(newPunchCard)
|
Create tools directory and add convert toolimport argparse
def parseDay(weekDay, sunday):
daysOfTheWeek = {'S': 'saturday', 'S2': 'sunday', 'M': 'monday', 'T': 'tuesday',
'W': 'wednesday', 'R': 'thursday', 'F': 'friday'}
splitDay = weekDay.split(',')
if sunday:
day = 'S2'
splitDay.pop(0)
else:
day = splitDay.pop(0)[0]
dayOfTheWeek = daysOfTheWeek[day]
if len(splitDay) == 0:
return(dayOfTheWeek, None)
else:
dayArray = []
for time in splitDay:
dayArray.append(time)
return(dayOfTheWeek, "'000' = {}\n".format(dayArray))
def main(punchCard):
tab = ' '
title = punchCard.pop(0).rstrip('\n')
day = tab+"projects = ['000']\n\n"
for line in punchCard:
line = line.rstrip('\n')
if 'saturday' in day and line[0] == 'S':
weekday = parseDay(line, True)
else:
weekday = parseDay(line, False)
if weekday[1]:
day = '{}{}[day.{}]\n{}{}{}\n'.format(day, tab, weekday[0], tab, tab, weekday[1])
else:
day = '{}{}[day.{}]\n{}{}{}\n'.format(day, tab, weekday[0], tab, tab, '')
return "title = '{}'\n\n[day]\n\n{}".format(title, day)
if __name__ == '__main__': # pragma: no cover
description = 'This is a script to convert PunchCards from the old format to the new TOML format.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('file', nargs='?', type=argparse.FileType('r'), default='-',
help='The old format PunchCard to be converted or if no file given read stdin')
args = parser.parse_args()
oldPunchCard = []
for line in args.file:
oldPunchCard.append(line)
newPunchCard = main(oldPunchCard).rstrip('\n')
if args.file.name == '<stdin>':
print(newPunchCard)
else:
with open(args.file.name+'.toml', 'w') as newFile:
newFile.write(newPunchCard)
|
<commit_before><commit_msg>Create tools directory and add convert tool<commit_after>import argparse
def parseDay(weekDay, sunday):
daysOfTheWeek = {'S': 'saturday', 'S2': 'sunday', 'M': 'monday', 'T': 'tuesday',
'W': 'wednesday', 'R': 'thursday', 'F': 'friday'}
splitDay = weekDay.split(',')
if sunday:
day = 'S2'
splitDay.pop(0)
else:
day = splitDay.pop(0)[0]
dayOfTheWeek = daysOfTheWeek[day]
if len(splitDay) == 0:
return(dayOfTheWeek, None)
else:
dayArray = []
for time in splitDay:
dayArray.append(time)
return(dayOfTheWeek, "'000' = {}\n".format(dayArray))
def main(punchCard):
tab = ' '
title = punchCard.pop(0).rstrip('\n')
day = tab+"projects = ['000']\n\n"
for line in punchCard:
line = line.rstrip('\n')
if 'saturday' in day and line[0] == 'S':
weekday = parseDay(line, True)
else:
weekday = parseDay(line, False)
if weekday[1]:
day = '{}{}[day.{}]\n{}{}{}\n'.format(day, tab, weekday[0], tab, tab, weekday[1])
else:
day = '{}{}[day.{}]\n{}{}{}\n'.format(day, tab, weekday[0], tab, tab, '')
return "title = '{}'\n\n[day]\n\n{}".format(title, day)
if __name__ == '__main__': # pragma: no cover
description = 'This is a script to convert PunchCards from the old format to the new TOML format.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('file', nargs='?', type=argparse.FileType('r'), default='-',
help='The old format PunchCard to be converted or if no file given read stdin')
args = parser.parse_args()
oldPunchCard = []
for line in args.file:
oldPunchCard.append(line)
newPunchCard = main(oldPunchCard).rstrip('\n')
if args.file.name == '<stdin>':
print(newPunchCard)
else:
with open(args.file.name+'.toml', 'w') as newFile:
newFile.write(newPunchCard)
|
|
5d0738f256339844acfb633e3c4277f6ea17ac30
|
cleaning_scripts/remove_leading_slashes_212.py
|
cleaning_scripts/remove_leading_slashes_212.py
|
# This software and any associated files are copyright 2010 Brian Carver and
# Michael Lissner.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Under Sections 7(a) and 7(b) of version 3 of the GNU Affero General Public
# License, that license is supplemented by the following terms:
#
# a) You are required to preserve this legal notice and all author
# attributions in this program and its accompanying documentation.
#
# b) You are prohibited from misrepresenting the origin of any material
# within this covered work and you are required to mark in reasonable
# ways how any modified versions differ from the original version.
import sys
sys.path.append('/var/www/court-listener/alert')
import settings
from django.core.management import setup_environ
setup_environ(settings)
from search.models import Document
from juriscraper.lib.string_utils import harmonize, clean_string
from optparse import OptionParser
def fixer(simulate=False, verbose=False):
'''Remove leading slashes by running the new and improved harmonize/clean_string scipts'''
docs = Document.objects.raw(r'''select Document.documentUUID
from Document, Citation
where Document.citation_id = Citation.citationUUID and
Citation.case_name like '/%%';''')
for doc in docs:
if verbose:
print "Fixing document %s: %s" % (doc.pk, doc)
if not simulate:
doc.citation.case_name = harmonize(clean_string(doc.citation.case_name))
doc.citation.save()
def main():
usage = "usage: %prog [--verbose] [---simulate]"
parser = OptionParser(usage)
parser.add_option('-v', '--verbose', action="store_true", dest='verbose',
default=False, help="Display log during execution")
parser.add_option('-s', '--simulate', action="store_true",
dest='simulate', default=False, help=("Simulate the corrections without "
"actually making them."))
(options, args) = parser.parse_args()
verbose = options.verbose
simulate = options.simulate
if simulate:
print "*******************************************"
print "* SIMULATE MODE - NO CHANGES WILL BE MADE *"
print "*******************************************"
return fixer(simulate, verbose)
exit(0)
if __name__ == '__main__':
main()
|
Fix script for cases with leading slashes.
|
Fix script for cases with leading slashes.
|
Python
|
agpl-3.0
|
brianwc/courtlistener,wmbutler/courtlistener,Andr3iC/courtlistener,voutilad/courtlistener,brianwc/courtlistener,wmbutler/courtlistener,brianwc/courtlistener,Andr3iC/courtlistener,shashi792/courtlistener,wmbutler/courtlistener,wmbutler/courtlistener,shashi792/courtlistener,wmbutler/courtlistener,voutilad/courtlistener,Andr3iC/courtlistener,voutilad/courtlistener,brianwc/courtlistener,voutilad/courtlistener,Andr3iC/courtlistener,brianwc/courtlistener,shashi792/courtlistener,Andr3iC/courtlistener,voutilad/courtlistener,shashi792/courtlistener,shashi792/courtlistener
|
Fix script for cases with leading slashes.
|
# This software and any associated files are copyright 2010 Brian Carver and
# Michael Lissner.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Under Sections 7(a) and 7(b) of version 3 of the GNU Affero General Public
# License, that license is supplemented by the following terms:
#
# a) You are required to preserve this legal notice and all author
# attributions in this program and its accompanying documentation.
#
# b) You are prohibited from misrepresenting the origin of any material
# within this covered work and you are required to mark in reasonable
# ways how any modified versions differ from the original version.
import sys
sys.path.append('/var/www/court-listener/alert')
import settings
from django.core.management import setup_environ
setup_environ(settings)
from search.models import Document
from juriscraper.lib.string_utils import harmonize, clean_string
from optparse import OptionParser
def fixer(simulate=False, verbose=False):
'''Remove leading slashes by running the new and improved harmonize/clean_string scipts'''
docs = Document.objects.raw(r'''select Document.documentUUID
from Document, Citation
where Document.citation_id = Citation.citationUUID and
Citation.case_name like '/%%';''')
for doc in docs:
if verbose:
print "Fixing document %s: %s" % (doc.pk, doc)
if not simulate:
doc.citation.case_name = harmonize(clean_string(doc.citation.case_name))
doc.citation.save()
def main():
usage = "usage: %prog [--verbose] [---simulate]"
parser = OptionParser(usage)
parser.add_option('-v', '--verbose', action="store_true", dest='verbose',
default=False, help="Display log during execution")
parser.add_option('-s', '--simulate', action="store_true",
dest='simulate', default=False, help=("Simulate the corrections without "
"actually making them."))
(options, args) = parser.parse_args()
verbose = options.verbose
simulate = options.simulate
if simulate:
print "*******************************************"
print "* SIMULATE MODE - NO CHANGES WILL BE MADE *"
print "*******************************************"
return fixer(simulate, verbose)
exit(0)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Fix script for cases with leading slashes.<commit_after>
|
# This software and any associated files are copyright 2010 Brian Carver and
# Michael Lissner.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Under Sections 7(a) and 7(b) of version 3 of the GNU Affero General Public
# License, that license is supplemented by the following terms:
#
# a) You are required to preserve this legal notice and all author
# attributions in this program and its accompanying documentation.
#
# b) You are prohibited from misrepresenting the origin of any material
# within this covered work and you are required to mark in reasonable
# ways how any modified versions differ from the original version.
import sys
sys.path.append('/var/www/court-listener/alert')
import settings
from django.core.management import setup_environ
setup_environ(settings)
from search.models import Document
from juriscraper.lib.string_utils import harmonize, clean_string
from optparse import OptionParser
def fixer(simulate=False, verbose=False):
'''Remove leading slashes by running the new and improved harmonize/clean_string scipts'''
docs = Document.objects.raw(r'''select Document.documentUUID
from Document, Citation
where Document.citation_id = Citation.citationUUID and
Citation.case_name like '/%%';''')
for doc in docs:
if verbose:
print "Fixing document %s: %s" % (doc.pk, doc)
if not simulate:
doc.citation.case_name = harmonize(clean_string(doc.citation.case_name))
doc.citation.save()
def main():
usage = "usage: %prog [--verbose] [---simulate]"
parser = OptionParser(usage)
parser.add_option('-v', '--verbose', action="store_true", dest='verbose',
default=False, help="Display log during execution")
parser.add_option('-s', '--simulate', action="store_true",
dest='simulate', default=False, help=("Simulate the corrections without "
"actually making them."))
(options, args) = parser.parse_args()
verbose = options.verbose
simulate = options.simulate
if simulate:
print "*******************************************"
print "* SIMULATE MODE - NO CHANGES WILL BE MADE *"
print "*******************************************"
return fixer(simulate, verbose)
exit(0)
if __name__ == '__main__':
main()
|
Fix script for cases with leading slashes.# This software and any associated files are copyright 2010 Brian Carver and
# Michael Lissner.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Under Sections 7(a) and 7(b) of version 3 of the GNU Affero General Public
# License, that license is supplemented by the following terms:
#
# a) You are required to preserve this legal notice and all author
# attributions in this program and its accompanying documentation.
#
# b) You are prohibited from misrepresenting the origin of any material
# within this covered work and you are required to mark in reasonable
# ways how any modified versions differ from the original version.
import sys
sys.path.append('/var/www/court-listener/alert')
import settings
from django.core.management import setup_environ
setup_environ(settings)
from search.models import Document
from juriscraper.lib.string_utils import harmonize, clean_string
from optparse import OptionParser
def fixer(simulate=False, verbose=False):
'''Remove leading slashes by running the new and improved harmonize/clean_string scipts'''
docs = Document.objects.raw(r'''select Document.documentUUID
from Document, Citation
where Document.citation_id = Citation.citationUUID and
Citation.case_name like '/%%';''')
for doc in docs:
if verbose:
print "Fixing document %s: %s" % (doc.pk, doc)
if not simulate:
doc.citation.case_name = harmonize(clean_string(doc.citation.case_name))
doc.citation.save()
def main():
usage = "usage: %prog [--verbose] [---simulate]"
parser = OptionParser(usage)
parser.add_option('-v', '--verbose', action="store_true", dest='verbose',
default=False, help="Display log during execution")
parser.add_option('-s', '--simulate', action="store_true",
dest='simulate', default=False, help=("Simulate the corrections without "
"actually making them."))
(options, args) = parser.parse_args()
verbose = options.verbose
simulate = options.simulate
if simulate:
print "*******************************************"
print "* SIMULATE MODE - NO CHANGES WILL BE MADE *"
print "*******************************************"
return fixer(simulate, verbose)
exit(0)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Fix script for cases with leading slashes.<commit_after># This software and any associated files are copyright 2010 Brian Carver and
# Michael Lissner.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Under Sections 7(a) and 7(b) of version 3 of the GNU Affero General Public
# License, that license is supplemented by the following terms:
#
# a) You are required to preserve this legal notice and all author
# attributions in this program and its accompanying documentation.
#
# b) You are prohibited from misrepresenting the origin of any material
# within this covered work and you are required to mark in reasonable
# ways how any modified versions differ from the original version.
import sys
sys.path.append('/var/www/court-listener/alert')
import settings
from django.core.management import setup_environ
setup_environ(settings)
from search.models import Document
from juriscraper.lib.string_utils import harmonize, clean_string
from optparse import OptionParser
def fixer(simulate=False, verbose=False):
'''Remove leading slashes by running the new and improved harmonize/clean_string scipts'''
docs = Document.objects.raw(r'''select Document.documentUUID
from Document, Citation
where Document.citation_id = Citation.citationUUID and
Citation.case_name like '/%%';''')
for doc in docs:
if verbose:
print "Fixing document %s: %s" % (doc.pk, doc)
if not simulate:
doc.citation.case_name = harmonize(clean_string(doc.citation.case_name))
doc.citation.save()
def main():
usage = "usage: %prog [--verbose] [---simulate]"
parser = OptionParser(usage)
parser.add_option('-v', '--verbose', action="store_true", dest='verbose',
default=False, help="Display log during execution")
parser.add_option('-s', '--simulate', action="store_true",
dest='simulate', default=False, help=("Simulate the corrections without "
"actually making them."))
(options, args) = parser.parse_args()
verbose = options.verbose
simulate = options.simulate
if simulate:
print "*******************************************"
print "* SIMULATE MODE - NO CHANGES WILL BE MADE *"
print "*******************************************"
return fixer(simulate, verbose)
exit(0)
if __name__ == '__main__':
main()
|
|
4eeab8ad7c1d4cdbda68222f94b23a62337570c6
|
pywikibot/families/commons_family.py
|
pywikibot/families/commons_family.py
|
# -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia Commons family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'commons'
self.langs = {
'commons': 'commons.wikimedia.org',
}
self.interwiki_forward = 'wikipedia'
self.category_redirect_templates = {
'commons': (u'Category redirect',
u'Categoryredirect',
u'Synonym taxon category redirect',
u'Invalid taxon category redirect',
u'Monotypic taxon category redirect',
u'See cat',
u'Seecat',
u'See category',
u'Catredirect',
u'Cat redirect',
u'Cat-red',
u'Catredir',
u'Redirect category'),
}
self.disambcatname = {
'commons': u'Disambiguation'
}
def ssl_pathprefix(self, code):
return "/wikipedia/commons"
|
# -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia Commons family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'commons'
self.langs = {
'commons': 'commons.wikimedia.org',
}
self.interwiki_forward = 'wikipedia'
self.category_redirect_templates = {
'commons': (u'Category redirect',
u'Categoryredirect',
u'Synonym taxon category redirect',
u'Invalid taxon category redirect',
u'Monotypic taxon category redirect',
u'See cat',
u'Seecat',
u'See category',
u'Catredirect',
u'Cat redirect',
u'Cat-red',
u'Catredir',
u'Redirect category'),
}
self.disambcatname = {
'commons': u'Disambiguation'
}
def ssl_pathprefix(self, code):
return "/wikipedia/commons"
def shared_data_repository(self, code, transcluded=False):
return ('wikidata', 'wikidata')
|
Add Wikidata as shared data repository for Commons.
|
Add Wikidata as shared data repository for Commons.
Change-Id: Ie79e3157d016fc74e400ddc618c04f2d1d39f17d
|
Python
|
mit
|
TridevGuha/pywikibot-core,darthbhyrava/pywikibot-local,xZise/pywikibot-core,wikimedia/pywikibot-core,hasteur/g13bot_tools_new,Darkdadaah/pywikibot-core,hasteur/g13bot_tools_new,happy5214/pywikibot-core,magul/pywikibot-core,wikimedia/pywikibot-core,PersianWikipedia/pywikibot-core,npdoty/pywikibot,trishnaguha/pywikibot-core,VcamX/pywikibot-core,happy5214/pywikibot-core,jayvdb/pywikibot-core,h4ck3rm1k3/pywikibot-core,jayvdb/pywikibot-core,h4ck3rm1k3/pywikibot-core,icyflame/batman,npdoty/pywikibot,magul/pywikibot-core,Darkdadaah/pywikibot-core,valhallasw/pywikibot-core,hasteur/g13bot_tools_new,smalyshev/pywikibot-core,emijrp/pywikibot-core
|
# -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia Commons family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'commons'
self.langs = {
'commons': 'commons.wikimedia.org',
}
self.interwiki_forward = 'wikipedia'
self.category_redirect_templates = {
'commons': (u'Category redirect',
u'Categoryredirect',
u'Synonym taxon category redirect',
u'Invalid taxon category redirect',
u'Monotypic taxon category redirect',
u'See cat',
u'Seecat',
u'See category',
u'Catredirect',
u'Cat redirect',
u'Cat-red',
u'Catredir',
u'Redirect category'),
}
self.disambcatname = {
'commons': u'Disambiguation'
}
def ssl_pathprefix(self, code):
return "/wikipedia/commons"
Add Wikidata as shared data repository for Commons.
Change-Id: Ie79e3157d016fc74e400ddc618c04f2d1d39f17d
|
# -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia Commons family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'commons'
self.langs = {
'commons': 'commons.wikimedia.org',
}
self.interwiki_forward = 'wikipedia'
self.category_redirect_templates = {
'commons': (u'Category redirect',
u'Categoryredirect',
u'Synonym taxon category redirect',
u'Invalid taxon category redirect',
u'Monotypic taxon category redirect',
u'See cat',
u'Seecat',
u'See category',
u'Catredirect',
u'Cat redirect',
u'Cat-red',
u'Catredir',
u'Redirect category'),
}
self.disambcatname = {
'commons': u'Disambiguation'
}
def ssl_pathprefix(self, code):
return "/wikipedia/commons"
def shared_data_repository(self, code, transcluded=False):
return ('wikidata', 'wikidata')
|
<commit_before># -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia Commons family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'commons'
self.langs = {
'commons': 'commons.wikimedia.org',
}
self.interwiki_forward = 'wikipedia'
self.category_redirect_templates = {
'commons': (u'Category redirect',
u'Categoryredirect',
u'Synonym taxon category redirect',
u'Invalid taxon category redirect',
u'Monotypic taxon category redirect',
u'See cat',
u'Seecat',
u'See category',
u'Catredirect',
u'Cat redirect',
u'Cat-red',
u'Catredir',
u'Redirect category'),
}
self.disambcatname = {
'commons': u'Disambiguation'
}
def ssl_pathprefix(self, code):
return "/wikipedia/commons"
<commit_msg>Add Wikidata as shared data repository for Commons.
Change-Id: Ie79e3157d016fc74e400ddc618c04f2d1d39f17d<commit_after>
|
# -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia Commons family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'commons'
self.langs = {
'commons': 'commons.wikimedia.org',
}
self.interwiki_forward = 'wikipedia'
self.category_redirect_templates = {
'commons': (u'Category redirect',
u'Categoryredirect',
u'Synonym taxon category redirect',
u'Invalid taxon category redirect',
u'Monotypic taxon category redirect',
u'See cat',
u'Seecat',
u'See category',
u'Catredirect',
u'Cat redirect',
u'Cat-red',
u'Catredir',
u'Redirect category'),
}
self.disambcatname = {
'commons': u'Disambiguation'
}
def ssl_pathprefix(self, code):
return "/wikipedia/commons"
def shared_data_repository(self, code, transcluded=False):
return ('wikidata', 'wikidata')
|
# -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia Commons family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'commons'
self.langs = {
'commons': 'commons.wikimedia.org',
}
self.interwiki_forward = 'wikipedia'
self.category_redirect_templates = {
'commons': (u'Category redirect',
u'Categoryredirect',
u'Synonym taxon category redirect',
u'Invalid taxon category redirect',
u'Monotypic taxon category redirect',
u'See cat',
u'Seecat',
u'See category',
u'Catredirect',
u'Cat redirect',
u'Cat-red',
u'Catredir',
u'Redirect category'),
}
self.disambcatname = {
'commons': u'Disambiguation'
}
def ssl_pathprefix(self, code):
return "/wikipedia/commons"
Add Wikidata as shared data repository for Commons.
Change-Id: Ie79e3157d016fc74e400ddc618c04f2d1d39f17d# -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia Commons family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'commons'
self.langs = {
'commons': 'commons.wikimedia.org',
}
self.interwiki_forward = 'wikipedia'
self.category_redirect_templates = {
'commons': (u'Category redirect',
u'Categoryredirect',
u'Synonym taxon category redirect',
u'Invalid taxon category redirect',
u'Monotypic taxon category redirect',
u'See cat',
u'Seecat',
u'See category',
u'Catredirect',
u'Cat redirect',
u'Cat-red',
u'Catredir',
u'Redirect category'),
}
self.disambcatname = {
'commons': u'Disambiguation'
}
def ssl_pathprefix(self, code):
return "/wikipedia/commons"
def shared_data_repository(self, code, transcluded=False):
return ('wikidata', 'wikidata')
|
<commit_before># -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia Commons family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'commons'
self.langs = {
'commons': 'commons.wikimedia.org',
}
self.interwiki_forward = 'wikipedia'
self.category_redirect_templates = {
'commons': (u'Category redirect',
u'Categoryredirect',
u'Synonym taxon category redirect',
u'Invalid taxon category redirect',
u'Monotypic taxon category redirect',
u'See cat',
u'Seecat',
u'See category',
u'Catredirect',
u'Cat redirect',
u'Cat-red',
u'Catredir',
u'Redirect category'),
}
self.disambcatname = {
'commons': u'Disambiguation'
}
def ssl_pathprefix(self, code):
return "/wikipedia/commons"
<commit_msg>Add Wikidata as shared data repository for Commons.
Change-Id: Ie79e3157d016fc74e400ddc618c04f2d1d39f17d<commit_after># -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia Commons family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'commons'
self.langs = {
'commons': 'commons.wikimedia.org',
}
self.interwiki_forward = 'wikipedia'
self.category_redirect_templates = {
'commons': (u'Category redirect',
u'Categoryredirect',
u'Synonym taxon category redirect',
u'Invalid taxon category redirect',
u'Monotypic taxon category redirect',
u'See cat',
u'Seecat',
u'See category',
u'Catredirect',
u'Cat redirect',
u'Cat-red',
u'Catredir',
u'Redirect category'),
}
self.disambcatname = {
'commons': u'Disambiguation'
}
def ssl_pathprefix(self, code):
return "/wikipedia/commons"
def shared_data_repository(self, code, transcluded=False):
return ('wikidata', 'wikidata')
|
47699ec789c4a8cf47d07a9ea2955bf220c14e69
|
tests/test_vector2_substraction.py
|
tests/test_vector2_substraction.py
|
import pytest
from ppb_vector import Vector2
def test_substraction_vectors():
test_vector1 = Vector2(0, 1)
test_vector2 = Vector2(0, 1)
result = test_vector1 - test_vector2
assert result == Vector2(0, 0)
def test_substraction_vector_tuple():
test_vector = Vector2(4, 6)
test_tuple = (1, 1)
result = test_vector - test_tuple
assert result == Vector2(3, 5)
def test_substraction_vector_list():
test_vector = Vector2(3, 7)
test_list = [1, 3]
result = test_vector - test_list
assert result == Vector2(2, 4)
def test_substraction_vector_dict():
test_vector = Vector2(7, 11)
test_dict = {'x': 3, 'y': 5}
result = test_vector - test_dict
assert result == Vector2(4, 6)
data = [
([Vector2(10, 16), Vector2(2, 2)], Vector2(8, 14)),
([Vector2(25, 22), Vector2(22, 61)], Vector2(3, -39)),
([Vector2(39, 43), Vector2(92, -12)], Vector2(-53, 55)),
([Vector2(1, 1), (2, 2)], Vector2(-1, -1)),
([Vector2(25, 22), (12, 92)], Vector2(13, -70)),
([Vector2(42, 12), (-5, 23)], Vector2(47, -11)),
([Vector2(51, 28), [72, 31]], Vector2(-21, -3)),
([Vector2(1, 2), [2, 2]], Vector2(-1, 0)),
([Vector2(1, 2), {'x': 2, 'y': 2}], Vector2(-1, 0)),
]
@pytest.mark.parametrize('test_input, expected', data)
def test_multiples_values(test_input, expected):
assert (test_input[0] - test_input[1]) == expected
|
Add vector2 substraction unit tests
|
Add vector2 substraction unit tests
|
Python
|
artistic-2.0
|
ppb/ppb-vector,ppb/ppb-vector
|
Add vector2 substraction unit tests
|
import pytest
from ppb_vector import Vector2
def test_substraction_vectors():
test_vector1 = Vector2(0, 1)
test_vector2 = Vector2(0, 1)
result = test_vector1 - test_vector2
assert result == Vector2(0, 0)
def test_substraction_vector_tuple():
test_vector = Vector2(4, 6)
test_tuple = (1, 1)
result = test_vector - test_tuple
assert result == Vector2(3, 5)
def test_substraction_vector_list():
test_vector = Vector2(3, 7)
test_list = [1, 3]
result = test_vector - test_list
assert result == Vector2(2, 4)
def test_substraction_vector_dict():
test_vector = Vector2(7, 11)
test_dict = {'x': 3, 'y': 5}
result = test_vector - test_dict
assert result == Vector2(4, 6)
data = [
([Vector2(10, 16), Vector2(2, 2)], Vector2(8, 14)),
([Vector2(25, 22), Vector2(22, 61)], Vector2(3, -39)),
([Vector2(39, 43), Vector2(92, -12)], Vector2(-53, 55)),
([Vector2(1, 1), (2, 2)], Vector2(-1, -1)),
([Vector2(25, 22), (12, 92)], Vector2(13, -70)),
([Vector2(42, 12), (-5, 23)], Vector2(47, -11)),
([Vector2(51, 28), [72, 31]], Vector2(-21, -3)),
([Vector2(1, 2), [2, 2]], Vector2(-1, 0)),
([Vector2(1, 2), {'x': 2, 'y': 2}], Vector2(-1, 0)),
]
@pytest.mark.parametrize('test_input, expected', data)
def test_multiples_values(test_input, expected):
assert (test_input[0] - test_input[1]) == expected
|
<commit_before><commit_msg>Add vector2 substraction unit tests<commit_after>
|
import pytest
from ppb_vector import Vector2
def test_substraction_vectors():
test_vector1 = Vector2(0, 1)
test_vector2 = Vector2(0, 1)
result = test_vector1 - test_vector2
assert result == Vector2(0, 0)
def test_substraction_vector_tuple():
test_vector = Vector2(4, 6)
test_tuple = (1, 1)
result = test_vector - test_tuple
assert result == Vector2(3, 5)
def test_substraction_vector_list():
test_vector = Vector2(3, 7)
test_list = [1, 3]
result = test_vector - test_list
assert result == Vector2(2, 4)
def test_substraction_vector_dict():
test_vector = Vector2(7, 11)
test_dict = {'x': 3, 'y': 5}
result = test_vector - test_dict
assert result == Vector2(4, 6)
data = [
([Vector2(10, 16), Vector2(2, 2)], Vector2(8, 14)),
([Vector2(25, 22), Vector2(22, 61)], Vector2(3, -39)),
([Vector2(39, 43), Vector2(92, -12)], Vector2(-53, 55)),
([Vector2(1, 1), (2, 2)], Vector2(-1, -1)),
([Vector2(25, 22), (12, 92)], Vector2(13, -70)),
([Vector2(42, 12), (-5, 23)], Vector2(47, -11)),
([Vector2(51, 28), [72, 31]], Vector2(-21, -3)),
([Vector2(1, 2), [2, 2]], Vector2(-1, 0)),
([Vector2(1, 2), {'x': 2, 'y': 2}], Vector2(-1, 0)),
]
@pytest.mark.parametrize('test_input, expected', data)
def test_multiples_values(test_input, expected):
assert (test_input[0] - test_input[1]) == expected
|
Add vector2 substraction unit testsimport pytest
from ppb_vector import Vector2
def test_substraction_vectors():
test_vector1 = Vector2(0, 1)
test_vector2 = Vector2(0, 1)
result = test_vector1 - test_vector2
assert result == Vector2(0, 0)
def test_substraction_vector_tuple():
test_vector = Vector2(4, 6)
test_tuple = (1, 1)
result = test_vector - test_tuple
assert result == Vector2(3, 5)
def test_substraction_vector_list():
test_vector = Vector2(3, 7)
test_list = [1, 3]
result = test_vector - test_list
assert result == Vector2(2, 4)
def test_substraction_vector_dict():
test_vector = Vector2(7, 11)
test_dict = {'x': 3, 'y': 5}
result = test_vector - test_dict
assert result == Vector2(4, 6)
data = [
([Vector2(10, 16), Vector2(2, 2)], Vector2(8, 14)),
([Vector2(25, 22), Vector2(22, 61)], Vector2(3, -39)),
([Vector2(39, 43), Vector2(92, -12)], Vector2(-53, 55)),
([Vector2(1, 1), (2, 2)], Vector2(-1, -1)),
([Vector2(25, 22), (12, 92)], Vector2(13, -70)),
([Vector2(42, 12), (-5, 23)], Vector2(47, -11)),
([Vector2(51, 28), [72, 31]], Vector2(-21, -3)),
([Vector2(1, 2), [2, 2]], Vector2(-1, 0)),
([Vector2(1, 2), {'x': 2, 'y': 2}], Vector2(-1, 0)),
]
@pytest.mark.parametrize('test_input, expected', data)
def test_multiples_values(test_input, expected):
assert (test_input[0] - test_input[1]) == expected
|
<commit_before><commit_msg>Add vector2 substraction unit tests<commit_after>import pytest
from ppb_vector import Vector2
def test_substraction_vectors():
test_vector1 = Vector2(0, 1)
test_vector2 = Vector2(0, 1)
result = test_vector1 - test_vector2
assert result == Vector2(0, 0)
def test_substraction_vector_tuple():
test_vector = Vector2(4, 6)
test_tuple = (1, 1)
result = test_vector - test_tuple
assert result == Vector2(3, 5)
def test_substraction_vector_list():
test_vector = Vector2(3, 7)
test_list = [1, 3]
result = test_vector - test_list
assert result == Vector2(2, 4)
def test_substraction_vector_dict():
test_vector = Vector2(7, 11)
test_dict = {'x': 3, 'y': 5}
result = test_vector - test_dict
assert result == Vector2(4, 6)
data = [
([Vector2(10, 16), Vector2(2, 2)], Vector2(8, 14)),
([Vector2(25, 22), Vector2(22, 61)], Vector2(3, -39)),
([Vector2(39, 43), Vector2(92, -12)], Vector2(-53, 55)),
([Vector2(1, 1), (2, 2)], Vector2(-1, -1)),
([Vector2(25, 22), (12, 92)], Vector2(13, -70)),
([Vector2(42, 12), (-5, 23)], Vector2(47, -11)),
([Vector2(51, 28), [72, 31]], Vector2(-21, -3)),
([Vector2(1, 2), [2, 2]], Vector2(-1, 0)),
([Vector2(1, 2), {'x': 2, 'y': 2}], Vector2(-1, 0)),
]
@pytest.mark.parametrize('test_input, expected', data)
def test_multiples_values(test_input, expected):
assert (test_input[0] - test_input[1]) == expected
|
|
3ec5c0a742054177be525182f42b69d48f837aff
|
rache/utils.py
|
rache/utils.py
|
import os
try:
from urllib import parse
except ImportError:
import urlparse as parse
def parse_redis_url():
config = {
'host': 'localhost',
'port': 6379,
'password': None,
'db': 0,
}
parsed_redis = parse.urlparse(
os.environ.get('REDIS_URL', 'redis://localhost:6379/0'))
if '?' in parsed_redis.path and not parsed_redis.query:
# Bug in python 2.7.3, fixed in 2.7.4
path, q, querystring = parsed_redis.path.partition('?')
else:
path, q, querystring = parsed_redis.path, None, parsed_redis.query
if path[1:]:
config['db'] = int(path[1:])
querystring = parse.parse_qs(querystring)
for key in querystring.keys():
querystring[key] = querystring[key][0]
for key in config.keys():
querystring.pop(key, None)
host, colon, port = parsed_redis.netloc.partition(':')
if '@' in host:
password, at, host = host.partition('@')
config['password'] = password
config['host'] = host
config['port'] = int(port)
return config
|
import os
try:
from urllib import parse
except ImportError:
import urlparse as parse
def parse_redis_url():
config = {
'host': 'localhost',
'port': 6379,
'password': None,
'db': 0,
}
parsed_redis = parse.urlparse(
os.environ.get('REDIS_URL', 'redis://localhost:6379/0'))
if '?' in parsed_redis.path and not parsed_redis.query:
# Bug in python 2.7.3, fixed in 2.7.4
path, q, querystring = parsed_redis.path.partition('?')
else:
path, q, querystring = parsed_redis.path, None, parsed_redis.query
if parsed_redis.netloc.endswith('unix'):
del config['port']
del config['host']
# the last item of the path could also be just part of the socket path
try:
config['db'] = int(os.path.split(path)[-1])
except ValueError:
pass
else:
path = os.path.join(*os.path.split(path)[:-1])
config['unix_socket_path'] = path
if parsed_redis.password:
config['password'] = parsed_redis.password
else:
if path[1:]:
config['db'] = int(path[1:])
if parsed_redis.password:
config['password'] = parsed_redis.password
if parsed_redis.port:
config['port'] = int(parsed_redis.port)
if parsed_redis.hostname:
config['host'] = parsed_redis.hostname
return config
|
Support unix sockets in the Redis URL parser.
|
Support unix sockets in the Redis URL parser.
|
Python
|
bsd-3-clause
|
brutasse/rache
|
import os
try:
from urllib import parse
except ImportError:
import urlparse as parse
def parse_redis_url():
config = {
'host': 'localhost',
'port': 6379,
'password': None,
'db': 0,
}
parsed_redis = parse.urlparse(
os.environ.get('REDIS_URL', 'redis://localhost:6379/0'))
if '?' in parsed_redis.path and not parsed_redis.query:
# Bug in python 2.7.3, fixed in 2.7.4
path, q, querystring = parsed_redis.path.partition('?')
else:
path, q, querystring = parsed_redis.path, None, parsed_redis.query
if path[1:]:
config['db'] = int(path[1:])
querystring = parse.parse_qs(querystring)
for key in querystring.keys():
querystring[key] = querystring[key][0]
for key in config.keys():
querystring.pop(key, None)
host, colon, port = parsed_redis.netloc.partition(':')
if '@' in host:
password, at, host = host.partition('@')
config['password'] = password
config['host'] = host
config['port'] = int(port)
return config
Support unix sockets in the Redis URL parser.
|
import os
try:
from urllib import parse
except ImportError:
import urlparse as parse
def parse_redis_url():
config = {
'host': 'localhost',
'port': 6379,
'password': None,
'db': 0,
}
parsed_redis = parse.urlparse(
os.environ.get('REDIS_URL', 'redis://localhost:6379/0'))
if '?' in parsed_redis.path and not parsed_redis.query:
# Bug in python 2.7.3, fixed in 2.7.4
path, q, querystring = parsed_redis.path.partition('?')
else:
path, q, querystring = parsed_redis.path, None, parsed_redis.query
if parsed_redis.netloc.endswith('unix'):
del config['port']
del config['host']
# the last item of the path could also be just part of the socket path
try:
config['db'] = int(os.path.split(path)[-1])
except ValueError:
pass
else:
path = os.path.join(*os.path.split(path)[:-1])
config['unix_socket_path'] = path
if parsed_redis.password:
config['password'] = parsed_redis.password
else:
if path[1:]:
config['db'] = int(path[1:])
if parsed_redis.password:
config['password'] = parsed_redis.password
if parsed_redis.port:
config['port'] = int(parsed_redis.port)
if parsed_redis.hostname:
config['host'] = parsed_redis.hostname
return config
|
<commit_before>import os
try:
from urllib import parse
except ImportError:
import urlparse as parse
def parse_redis_url():
config = {
'host': 'localhost',
'port': 6379,
'password': None,
'db': 0,
}
parsed_redis = parse.urlparse(
os.environ.get('REDIS_URL', 'redis://localhost:6379/0'))
if '?' in parsed_redis.path and not parsed_redis.query:
# Bug in python 2.7.3, fixed in 2.7.4
path, q, querystring = parsed_redis.path.partition('?')
else:
path, q, querystring = parsed_redis.path, None, parsed_redis.query
if path[1:]:
config['db'] = int(path[1:])
querystring = parse.parse_qs(querystring)
for key in querystring.keys():
querystring[key] = querystring[key][0]
for key in config.keys():
querystring.pop(key, None)
host, colon, port = parsed_redis.netloc.partition(':')
if '@' in host:
password, at, host = host.partition('@')
config['password'] = password
config['host'] = host
config['port'] = int(port)
return config
<commit_msg>Support unix sockets in the Redis URL parser.<commit_after>
|
import os
try:
from urllib import parse
except ImportError:
import urlparse as parse
def parse_redis_url():
config = {
'host': 'localhost',
'port': 6379,
'password': None,
'db': 0,
}
parsed_redis = parse.urlparse(
os.environ.get('REDIS_URL', 'redis://localhost:6379/0'))
if '?' in parsed_redis.path and not parsed_redis.query:
# Bug in python 2.7.3, fixed in 2.7.4
path, q, querystring = parsed_redis.path.partition('?')
else:
path, q, querystring = parsed_redis.path, None, parsed_redis.query
if parsed_redis.netloc.endswith('unix'):
del config['port']
del config['host']
# the last item of the path could also be just part of the socket path
try:
config['db'] = int(os.path.split(path)[-1])
except ValueError:
pass
else:
path = os.path.join(*os.path.split(path)[:-1])
config['unix_socket_path'] = path
if parsed_redis.password:
config['password'] = parsed_redis.password
else:
if path[1:]:
config['db'] = int(path[1:])
if parsed_redis.password:
config['password'] = parsed_redis.password
if parsed_redis.port:
config['port'] = int(parsed_redis.port)
if parsed_redis.hostname:
config['host'] = parsed_redis.hostname
return config
|
import os
try:
from urllib import parse
except ImportError:
import urlparse as parse
def parse_redis_url():
config = {
'host': 'localhost',
'port': 6379,
'password': None,
'db': 0,
}
parsed_redis = parse.urlparse(
os.environ.get('REDIS_URL', 'redis://localhost:6379/0'))
if '?' in parsed_redis.path and not parsed_redis.query:
# Bug in python 2.7.3, fixed in 2.7.4
path, q, querystring = parsed_redis.path.partition('?')
else:
path, q, querystring = parsed_redis.path, None, parsed_redis.query
if path[1:]:
config['db'] = int(path[1:])
querystring = parse.parse_qs(querystring)
for key in querystring.keys():
querystring[key] = querystring[key][0]
for key in config.keys():
querystring.pop(key, None)
host, colon, port = parsed_redis.netloc.partition(':')
if '@' in host:
password, at, host = host.partition('@')
config['password'] = password
config['host'] = host
config['port'] = int(port)
return config
Support unix sockets in the Redis URL parser.import os
try:
from urllib import parse
except ImportError:
import urlparse as parse
def parse_redis_url():
config = {
'host': 'localhost',
'port': 6379,
'password': None,
'db': 0,
}
parsed_redis = parse.urlparse(
os.environ.get('REDIS_URL', 'redis://localhost:6379/0'))
if '?' in parsed_redis.path and not parsed_redis.query:
# Bug in python 2.7.3, fixed in 2.7.4
path, q, querystring = parsed_redis.path.partition('?')
else:
path, q, querystring = parsed_redis.path, None, parsed_redis.query
if parsed_redis.netloc.endswith('unix'):
del config['port']
del config['host']
# the last item of the path could also be just part of the socket path
try:
config['db'] = int(os.path.split(path)[-1])
except ValueError:
pass
else:
path = os.path.join(*os.path.split(path)[:-1])
config['unix_socket_path'] = path
if parsed_redis.password:
config['password'] = parsed_redis.password
else:
if path[1:]:
config['db'] = int(path[1:])
if parsed_redis.password:
config['password'] = parsed_redis.password
if parsed_redis.port:
config['port'] = int(parsed_redis.port)
if parsed_redis.hostname:
config['host'] = parsed_redis.hostname
return config
|
<commit_before>import os
try:
from urllib import parse
except ImportError:
import urlparse as parse
def parse_redis_url():
config = {
'host': 'localhost',
'port': 6379,
'password': None,
'db': 0,
}
parsed_redis = parse.urlparse(
os.environ.get('REDIS_URL', 'redis://localhost:6379/0'))
if '?' in parsed_redis.path and not parsed_redis.query:
# Bug in python 2.7.3, fixed in 2.7.4
path, q, querystring = parsed_redis.path.partition('?')
else:
path, q, querystring = parsed_redis.path, None, parsed_redis.query
if path[1:]:
config['db'] = int(path[1:])
querystring = parse.parse_qs(querystring)
for key in querystring.keys():
querystring[key] = querystring[key][0]
for key in config.keys():
querystring.pop(key, None)
host, colon, port = parsed_redis.netloc.partition(':')
if '@' in host:
password, at, host = host.partition('@')
config['password'] = password
config['host'] = host
config['port'] = int(port)
return config
<commit_msg>Support unix sockets in the Redis URL parser.<commit_after>import os
try:
from urllib import parse
except ImportError:
import urlparse as parse
def parse_redis_url():
config = {
'host': 'localhost',
'port': 6379,
'password': None,
'db': 0,
}
parsed_redis = parse.urlparse(
os.environ.get('REDIS_URL', 'redis://localhost:6379/0'))
if '?' in parsed_redis.path and not parsed_redis.query:
# Bug in python 2.7.3, fixed in 2.7.4
path, q, querystring = parsed_redis.path.partition('?')
else:
path, q, querystring = parsed_redis.path, None, parsed_redis.query
if parsed_redis.netloc.endswith('unix'):
del config['port']
del config['host']
# the last item of the path could also be just part of the socket path
try:
config['db'] = int(os.path.split(path)[-1])
except ValueError:
pass
else:
path = os.path.join(*os.path.split(path)[:-1])
config['unix_socket_path'] = path
if parsed_redis.password:
config['password'] = parsed_redis.password
else:
if path[1:]:
config['db'] = int(path[1:])
if parsed_redis.password:
config['password'] = parsed_redis.password
if parsed_redis.port:
config['port'] = int(parsed_redis.port)
if parsed_redis.hostname:
config['host'] = parsed_redis.hostname
return config
|
cf3b675913e584f0a3dd54fceb6d14ff85bbbad4
|
tools/sofa-edr.py
|
tools/sofa-edr.py
|
#!/usr/bin/env python3
import subprocess
import time
import argparse
if __name__ == '__main__':
bwa_is_recorded = False
smb_is_recorded = False
htvc_is_recorded = False
parser = argparse.ArgumentParser(description='A SOFA wrapper which supports event-driven recording.')
parser.add_argument('--trace-points', default='', metavar='Comma-sperated string list for interested keywords, e.g., "keyword1,keyword2"')
args = parser.parse_args()
while True:
time.sleep(3)
print(time.time())
with open('/home/ubuntu/pbrun_error.log') as f:
lines = f.readlines()
lc = 0
for line in lines:
#print('Line%d'%lc, line)
lc = lc + 1
if lc < 6:
continue
if line.find('BWA') != -1 and not smb_is_recorded:
bwa_is_recorded = True
print('BWA begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-bwa ', shell=True)
break
if line.find('BQSR') != -1 and not smb_is_recorded:
smb_is_recorded = True
print('SMB begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-smb ', shell=True)
break
if line.find('HaplotypeCaller') != -1 and not htvc_is_recorded:
htvc_is_recorded = True
print('HTVC begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-htvc ', shell=True)
break
if bwa_is_recorded and smb_is_recorded and htvc_is_recorded:
print("Tracing is done.")
break
|
Add an event-driven recording tool
|
Add an event-driven recording tool
|
Python
|
apache-2.0
|
cyliustack/sofa,cyliustack/sofa,cyliustack/sofa,cyliustack/sofa,cyliustack/sofa
|
Add an event-driven recording tool
|
#!/usr/bin/env python3
import subprocess
import time
import argparse
if __name__ == '__main__':
bwa_is_recorded = False
smb_is_recorded = False
htvc_is_recorded = False
parser = argparse.ArgumentParser(description='A SOFA wrapper which supports event-driven recording.')
parser.add_argument('--trace-points', default='', metavar='Comma-sperated string list for interested keywords, e.g., "keyword1,keyword2"')
args = parser.parse_args()
while True:
time.sleep(3)
print(time.time())
with open('/home/ubuntu/pbrun_error.log') as f:
lines = f.readlines()
lc = 0
for line in lines:
#print('Line%d'%lc, line)
lc = lc + 1
if lc < 6:
continue
if line.find('BWA') != -1 and not smb_is_recorded:
bwa_is_recorded = True
print('BWA begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-bwa ', shell=True)
break
if line.find('BQSR') != -1 and not smb_is_recorded:
smb_is_recorded = True
print('SMB begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-smb ', shell=True)
break
if line.find('HaplotypeCaller') != -1 and not htvc_is_recorded:
htvc_is_recorded = True
print('HTVC begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-htvc ', shell=True)
break
if bwa_is_recorded and smb_is_recorded and htvc_is_recorded:
print("Tracing is done.")
break
|
<commit_before><commit_msg>Add an event-driven recording tool<commit_after>
|
#!/usr/bin/env python3
import subprocess
import time
import argparse
if __name__ == '__main__':
bwa_is_recorded = False
smb_is_recorded = False
htvc_is_recorded = False
parser = argparse.ArgumentParser(description='A SOFA wrapper which supports event-driven recording.')
parser.add_argument('--trace-points', default='', metavar='Comma-sperated string list for interested keywords, e.g., "keyword1,keyword2"')
args = parser.parse_args()
while True:
time.sleep(3)
print(time.time())
with open('/home/ubuntu/pbrun_error.log') as f:
lines = f.readlines()
lc = 0
for line in lines:
#print('Line%d'%lc, line)
lc = lc + 1
if lc < 6:
continue
if line.find('BWA') != -1 and not smb_is_recorded:
bwa_is_recorded = True
print('BWA begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-bwa ', shell=True)
break
if line.find('BQSR') != -1 and not smb_is_recorded:
smb_is_recorded = True
print('SMB begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-smb ', shell=True)
break
if line.find('HaplotypeCaller') != -1 and not htvc_is_recorded:
htvc_is_recorded = True
print('HTVC begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-htvc ', shell=True)
break
if bwa_is_recorded and smb_is_recorded and htvc_is_recorded:
print("Tracing is done.")
break
|
Add an event-driven recording tool#!/usr/bin/env python3
import subprocess
import time
import argparse
if __name__ == '__main__':
bwa_is_recorded = False
smb_is_recorded = False
htvc_is_recorded = False
parser = argparse.ArgumentParser(description='A SOFA wrapper which supports event-driven recording.')
parser.add_argument('--trace-points', default='', metavar='Comma-sperated string list for interested keywords, e.g., "keyword1,keyword2"')
args = parser.parse_args()
while True:
time.sleep(3)
print(time.time())
with open('/home/ubuntu/pbrun_error.log') as f:
lines = f.readlines()
lc = 0
for line in lines:
#print('Line%d'%lc, line)
lc = lc + 1
if lc < 6:
continue
if line.find('BWA') != -1 and not smb_is_recorded:
bwa_is_recorded = True
print('BWA begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-bwa ', shell=True)
break
if line.find('BQSR') != -1 and not smb_is_recorded:
smb_is_recorded = True
print('SMB begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-smb ', shell=True)
break
if line.find('HaplotypeCaller') != -1 and not htvc_is_recorded:
htvc_is_recorded = True
print('HTVC begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-htvc ', shell=True)
break
if bwa_is_recorded and smb_is_recorded and htvc_is_recorded:
print("Tracing is done.")
break
|
<commit_before><commit_msg>Add an event-driven recording tool<commit_after>#!/usr/bin/env python3
import subprocess
import time
import argparse
if __name__ == '__main__':
bwa_is_recorded = False
smb_is_recorded = False
htvc_is_recorded = False
parser = argparse.ArgumentParser(description='A SOFA wrapper which supports event-driven recording.')
parser.add_argument('--trace-points', default='', metavar='Comma-sperated string list for interested keywords, e.g., "keyword1,keyword2"')
args = parser.parse_args()
while True:
time.sleep(3)
print(time.time())
with open('/home/ubuntu/pbrun_error.log') as f:
lines = f.readlines()
lc = 0
for line in lines:
#print('Line%d'%lc, line)
lc = lc + 1
if lc < 6:
continue
if line.find('BWA') != -1 and not smb_is_recorded:
bwa_is_recorded = True
print('BWA begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-bwa ', shell=True)
break
if line.find('BQSR') != -1 and not smb_is_recorded:
smb_is_recorded = True
print('SMB begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-smb ', shell=True)
break
if line.find('HaplotypeCaller') != -1 and not htvc_is_recorded:
htvc_is_recorded = True
print('HTVC begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-htvc ', shell=True)
break
if bwa_is_recorded and smb_is_recorded and htvc_is_recorded:
print("Tracing is done.")
break
|
|
7482de022b8876df8d8c49f2cb4586496ec4549e
|
yatsm/regression/numba_accel.py
|
yatsm/regression/numba_accel.py
|
""" Regression related functions optimized using Numba, if available
If Numba is not avaiable, functions are still usable as not JIT'd functions
"""
# @nb.jit()
def rmse(y, yhat):
""" Calculate and return Root Mean Squared Error (RMSE)
Args:
y (np.ndarray): known values
yhat (np.ndarray): predicted values
Returns:
float: Root Mean Squared Error
"""
return ((y - yhat) ** 2).mean() ** 0.5
try:
import numba as nb
except:
pass
else:
# JIT functions if we can
rmse = nb.jit(rmse)
|
Add faster RMSE via numba
|
Add faster RMSE via numba
|
Python
|
mit
|
ceholden/yatsm,c11/yatsm,valpasq/yatsm,c11/yatsm,ceholden/yatsm,valpasq/yatsm
|
Add faster RMSE via numba
|
""" Regression related functions optimized using Numba, if available
If Numba is not avaiable, functions are still usable as not JIT'd functions
"""
# @nb.jit()
def rmse(y, yhat):
""" Calculate and return Root Mean Squared Error (RMSE)
Args:
y (np.ndarray): known values
yhat (np.ndarray): predicted values
Returns:
float: Root Mean Squared Error
"""
return ((y - yhat) ** 2).mean() ** 0.5
try:
import numba as nb
except:
pass
else:
# JIT functions if we can
rmse = nb.jit(rmse)
|
<commit_before><commit_msg>Add faster RMSE via numba<commit_after>
|
""" Regression related functions optimized using Numba, if available
If Numba is not avaiable, functions are still usable as not JIT'd functions
"""
# @nb.jit()
def rmse(y, yhat):
""" Calculate and return Root Mean Squared Error (RMSE)
Args:
y (np.ndarray): known values
yhat (np.ndarray): predicted values
Returns:
float: Root Mean Squared Error
"""
return ((y - yhat) ** 2).mean() ** 0.5
try:
import numba as nb
except:
pass
else:
# JIT functions if we can
rmse = nb.jit(rmse)
|
Add faster RMSE via numba""" Regression related functions optimized using Numba, if available
If Numba is not avaiable, functions are still usable as not JIT'd functions
"""
# @nb.jit()
def rmse(y, yhat):
""" Calculate and return Root Mean Squared Error (RMSE)
Args:
y (np.ndarray): known values
yhat (np.ndarray): predicted values
Returns:
float: Root Mean Squared Error
"""
return ((y - yhat) ** 2).mean() ** 0.5
try:
import numba as nb
except:
pass
else:
# JIT functions if we can
rmse = nb.jit(rmse)
|
<commit_before><commit_msg>Add faster RMSE via numba<commit_after>""" Regression related functions optimized using Numba, if available
If Numba is not avaiable, functions are still usable as not JIT'd functions
"""
# @nb.jit()
def rmse(y, yhat):
""" Calculate and return Root Mean Squared Error (RMSE)
Args:
y (np.ndarray): known values
yhat (np.ndarray): predicted values
Returns:
float: Root Mean Squared Error
"""
return ((y - yhat) ** 2).mean() ** 0.5
try:
import numba as nb
except:
pass
else:
# JIT functions if we can
rmse = nb.jit(rmse)
|
|
6cf9f9c83861e7e32f9ce90f2c35b5b73aa7fb10
|
manila/db/sqlalchemy/migrate_repo/versions/010_change_project_user_quotas_deleted_column_type.py
|
manila/db/sqlalchemy/migrate_repo/versions/010_change_project_user_quotas_deleted_column_type.py
|
# Copyright 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import MetaData, String, Table
from manila.openstack.common.gettextutils import _
from manila.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name != "sqlite":
project_user_quotas = Table('project_user_quotas', meta, autoload=True)
new_deleted = Column('new_deleted', Boolean, default=False)
new_deleted.create(project_user_quotas, populate_default=True)
project_user_quotas.update().\
where(project_user_quotas.c.deleted == 1).\
values(new_deleted=True).\
execute()
project_user_quotas.c.deleted.drop()
project_user_quotas.c.new_deleted.alter(name="deleted")
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
|
Change 'deleted' to Boolean in project_user_quotas
|
Change 'deleted' to Boolean in project_user_quotas
Change-Id: I7604ba4b938d5ff73bcf3b1f9152cc6aa4f1c0c8
Closes-Bug: #1274165
|
Python
|
apache-2.0
|
sajuptpm/manila,NetApp/manila,jcsp/manila,scality/manila,jcsp/manila,openstack/manila,NetApp/manila,weiting-chen/manila,weiting-chen/manila,vponomaryov/manila,vponomaryov/manila,bswartz/manila,bswartz/manila,redhat-openstack/manila,openstack/manila,sajuptpm/manila,redhat-openstack/manila,scality/manila
|
Change 'deleted' to Boolean in project_user_quotas
Change-Id: I7604ba4b938d5ff73bcf3b1f9152cc6aa4f1c0c8
Closes-Bug: #1274165
|
# Copyright 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import MetaData, String, Table
from manila.openstack.common.gettextutils import _
from manila.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name != "sqlite":
project_user_quotas = Table('project_user_quotas', meta, autoload=True)
new_deleted = Column('new_deleted', Boolean, default=False)
new_deleted.create(project_user_quotas, populate_default=True)
project_user_quotas.update().\
where(project_user_quotas.c.deleted == 1).\
values(new_deleted=True).\
execute()
project_user_quotas.c.deleted.drop()
project_user_quotas.c.new_deleted.alter(name="deleted")
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
|
<commit_before><commit_msg>Change 'deleted' to Boolean in project_user_quotas
Change-Id: I7604ba4b938d5ff73bcf3b1f9152cc6aa4f1c0c8
Closes-Bug: #1274165<commit_after>
|
# Copyright 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import MetaData, String, Table
from manila.openstack.common.gettextutils import _
from manila.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name != "sqlite":
project_user_quotas = Table('project_user_quotas', meta, autoload=True)
new_deleted = Column('new_deleted', Boolean, default=False)
new_deleted.create(project_user_quotas, populate_default=True)
project_user_quotas.update().\
where(project_user_quotas.c.deleted == 1).\
values(new_deleted=True).\
execute()
project_user_quotas.c.deleted.drop()
project_user_quotas.c.new_deleted.alter(name="deleted")
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
|
Change 'deleted' to Boolean in project_user_quotas
Change-Id: I7604ba4b938d5ff73bcf3b1f9152cc6aa4f1c0c8
Closes-Bug: #1274165# Copyright 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import MetaData, String, Table
from manila.openstack.common.gettextutils import _
from manila.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name != "sqlite":
project_user_quotas = Table('project_user_quotas', meta, autoload=True)
new_deleted = Column('new_deleted', Boolean, default=False)
new_deleted.create(project_user_quotas, populate_default=True)
project_user_quotas.update().\
where(project_user_quotas.c.deleted == 1).\
values(new_deleted=True).\
execute()
project_user_quotas.c.deleted.drop()
project_user_quotas.c.new_deleted.alter(name="deleted")
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
|
<commit_before><commit_msg>Change 'deleted' to Boolean in project_user_quotas
Change-Id: I7604ba4b938d5ff73bcf3b1f9152cc6aa4f1c0c8
Closes-Bug: #1274165<commit_after># Copyright 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import MetaData, String, Table
from manila.openstack.common.gettextutils import _
from manila.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name != "sqlite":
project_user_quotas = Table('project_user_quotas', meta, autoload=True)
new_deleted = Column('new_deleted', Boolean, default=False)
new_deleted.create(project_user_quotas, populate_default=True)
project_user_quotas.update().\
where(project_user_quotas.c.deleted == 1).\
values(new_deleted=True).\
execute()
project_user_quotas.c.deleted.drop()
project_user_quotas.c.new_deleted.alter(name="deleted")
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
|
|
99dd76aadea730c2b33fcaa435a0e3b6a0f67567
|
IntegrationTestPreprocessingdMRI.py
|
IntegrationTestPreprocessingdMRI.py
|
# -*- coding: utf-8 -*-
import unittest
from os.path import expanduser, join, isdir
import numpy as np
from dipy.data.fetcher import fetch_scil_b0, read_siemens_scil_b0
from createBrainMask import createBrainMaskFromb0Data
class IntegrationTestPreprocessingdMRI(unittest.TestCase):
dataDirectory = None
def setUp(self):
homeDirectory = expanduser('~')
self.dataDirectory = join(homeDirectory, '.dipy',
'datasets_multi-site_all_companies')
if not isdir(self.dataDirectory):
fetch_scil_b0()
def tearDown(self):
pass
def test_brainMaskVolume(self):
b0Img = read_siemens_scil_b0()
b0Data = np.squeeze(b0Img.get_data())
voxelSizes = b0Img.header.get_zooms()[:3]
voxelVolumeInCubicCentimeters = 1e-3*np.prod(voxelSizes)
brainMask = createBrainMaskFromb0Data(b0Data)
brainVolume = voxelVolumeInCubicCentimeters*np.sum(brainMask)
self.assertTrue(brainVolume > 500 and brainVolume < 1500)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
Test that brain mask has plausible volume
|
Test that brain mask has plausible volume
|
Python
|
bsd-3-clause
|
jsjol/GaussianProcessRegressionForDiffusionMRI,jsjol/GaussianProcessRegressionForDiffusionMRI
|
Test that brain mask has plausible volume
|
# -*- coding: utf-8 -*-
import unittest
from os.path import expanduser, join, isdir
import numpy as np
from dipy.data.fetcher import fetch_scil_b0, read_siemens_scil_b0
from createBrainMask import createBrainMaskFromb0Data
class IntegrationTestPreprocessingdMRI(unittest.TestCase):
dataDirectory = None
def setUp(self):
homeDirectory = expanduser('~')
self.dataDirectory = join(homeDirectory, '.dipy',
'datasets_multi-site_all_companies')
if not isdir(self.dataDirectory):
fetch_scil_b0()
def tearDown(self):
pass
def test_brainMaskVolume(self):
b0Img = read_siemens_scil_b0()
b0Data = np.squeeze(b0Img.get_data())
voxelSizes = b0Img.header.get_zooms()[:3]
voxelVolumeInCubicCentimeters = 1e-3*np.prod(voxelSizes)
brainMask = createBrainMaskFromb0Data(b0Data)
brainVolume = voxelVolumeInCubicCentimeters*np.sum(brainMask)
self.assertTrue(brainVolume > 500 and brainVolume < 1500)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Test that brain mask has plausible volume<commit_after>
|
# -*- coding: utf-8 -*-
import unittest
from os.path import expanduser, join, isdir
import numpy as np
from dipy.data.fetcher import fetch_scil_b0, read_siemens_scil_b0
from createBrainMask import createBrainMaskFromb0Data
class IntegrationTestPreprocessingdMRI(unittest.TestCase):
dataDirectory = None
def setUp(self):
homeDirectory = expanduser('~')
self.dataDirectory = join(homeDirectory, '.dipy',
'datasets_multi-site_all_companies')
if not isdir(self.dataDirectory):
fetch_scil_b0()
def tearDown(self):
pass
def test_brainMaskVolume(self):
b0Img = read_siemens_scil_b0()
b0Data = np.squeeze(b0Img.get_data())
voxelSizes = b0Img.header.get_zooms()[:3]
voxelVolumeInCubicCentimeters = 1e-3*np.prod(voxelSizes)
brainMask = createBrainMaskFromb0Data(b0Data)
brainVolume = voxelVolumeInCubicCentimeters*np.sum(brainMask)
self.assertTrue(brainVolume > 500 and brainVolume < 1500)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
Test that brain mask has plausible volume# -*- coding: utf-8 -*-
import unittest
from os.path import expanduser, join, isdir
import numpy as np
from dipy.data.fetcher import fetch_scil_b0, read_siemens_scil_b0
from createBrainMask import createBrainMaskFromb0Data
class IntegrationTestPreprocessingdMRI(unittest.TestCase):
dataDirectory = None
def setUp(self):
homeDirectory = expanduser('~')
self.dataDirectory = join(homeDirectory, '.dipy',
'datasets_multi-site_all_companies')
if not isdir(self.dataDirectory):
fetch_scil_b0()
def tearDown(self):
pass
def test_brainMaskVolume(self):
b0Img = read_siemens_scil_b0()
b0Data = np.squeeze(b0Img.get_data())
voxelSizes = b0Img.header.get_zooms()[:3]
voxelVolumeInCubicCentimeters = 1e-3*np.prod(voxelSizes)
brainMask = createBrainMaskFromb0Data(b0Data)
brainVolume = voxelVolumeInCubicCentimeters*np.sum(brainMask)
self.assertTrue(brainVolume > 500 and brainVolume < 1500)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Test that brain mask has plausible volume<commit_after># -*- coding: utf-8 -*-
import unittest
from os.path import expanduser, join, isdir
import numpy as np
from dipy.data.fetcher import fetch_scil_b0, read_siemens_scil_b0
from createBrainMask import createBrainMaskFromb0Data
class IntegrationTestPreprocessingdMRI(unittest.TestCase):
dataDirectory = None
def setUp(self):
homeDirectory = expanduser('~')
self.dataDirectory = join(homeDirectory, '.dipy',
'datasets_multi-site_all_companies')
if not isdir(self.dataDirectory):
fetch_scil_b0()
def tearDown(self):
pass
def test_brainMaskVolume(self):
b0Img = read_siemens_scil_b0()
b0Data = np.squeeze(b0Img.get_data())
voxelSizes = b0Img.header.get_zooms()[:3]
voxelVolumeInCubicCentimeters = 1e-3*np.prod(voxelSizes)
brainMask = createBrainMaskFromb0Data(b0Data)
brainVolume = voxelVolumeInCubicCentimeters*np.sum(brainMask)
self.assertTrue(brainVolume > 500 and brainVolume < 1500)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
|
e5a269ab0eaea3052c76764c844233d06a10b422
|
IPython/core/tests/test_callbacks.py
|
IPython/core/tests/test_callbacks.py
|
import unittest
try: # Python 3.3 +
from unittest.mock import Mock
except ImportError:
from mock import Mock
from IPython.core import callbacks
import IPython.testing.tools as tt
def ping_received():
pass
class CallbackTests(unittest.TestCase):
def setUp(self):
self.cbm = callbacks.CallbackManager(get_ipython(), {'ping_received': ping_received})
def test_register_unregister(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.fire('ping_received')
self.assertEqual(cb.call_count, 1)
self.cbm.unregister('ping_received', cb)
self.cbm.fire('ping_received')
self.assertEqual(cb.call_count, 1)
def test_reset(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.reset('ping_received')
self.cbm.fire('ping_received')
assert not cb.called
def test_reset_all(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.reset_all()
self.cbm.fire('ping_received')
assert not cb.called
def test_cb_error(self):
cb = Mock(side_effect=ValueError)
self.cbm.register('ping_received', cb)
with tt.AssertPrints("Error in callback"):
self.cbm.fire('ping_received')
|
Add tests for callback infrastructure
|
Add tests for callback infrastructure
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add tests for callback infrastructure
|
import unittest
try: # Python 3.3 +
from unittest.mock import Mock
except ImportError:
from mock import Mock
from IPython.core import callbacks
import IPython.testing.tools as tt
def ping_received():
pass
class CallbackTests(unittest.TestCase):
def setUp(self):
self.cbm = callbacks.CallbackManager(get_ipython(), {'ping_received': ping_received})
def test_register_unregister(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.fire('ping_received')
self.assertEqual(cb.call_count, 1)
self.cbm.unregister('ping_received', cb)
self.cbm.fire('ping_received')
self.assertEqual(cb.call_count, 1)
def test_reset(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.reset('ping_received')
self.cbm.fire('ping_received')
assert not cb.called
def test_reset_all(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.reset_all()
self.cbm.fire('ping_received')
assert not cb.called
def test_cb_error(self):
cb = Mock(side_effect=ValueError)
self.cbm.register('ping_received', cb)
with tt.AssertPrints("Error in callback"):
self.cbm.fire('ping_received')
|
<commit_before><commit_msg>Add tests for callback infrastructure<commit_after>
|
import unittest
try: # Python 3.3 +
from unittest.mock import Mock
except ImportError:
from mock import Mock
from IPython.core import callbacks
import IPython.testing.tools as tt
def ping_received():
pass
class CallbackTests(unittest.TestCase):
def setUp(self):
self.cbm = callbacks.CallbackManager(get_ipython(), {'ping_received': ping_received})
def test_register_unregister(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.fire('ping_received')
self.assertEqual(cb.call_count, 1)
self.cbm.unregister('ping_received', cb)
self.cbm.fire('ping_received')
self.assertEqual(cb.call_count, 1)
def test_reset(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.reset('ping_received')
self.cbm.fire('ping_received')
assert not cb.called
def test_reset_all(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.reset_all()
self.cbm.fire('ping_received')
assert not cb.called
def test_cb_error(self):
cb = Mock(side_effect=ValueError)
self.cbm.register('ping_received', cb)
with tt.AssertPrints("Error in callback"):
self.cbm.fire('ping_received')
|
Add tests for callback infrastructureimport unittest
try: # Python 3.3 +
from unittest.mock import Mock
except ImportError:
from mock import Mock
from IPython.core import callbacks
import IPython.testing.tools as tt
def ping_received():
pass
class CallbackTests(unittest.TestCase):
def setUp(self):
self.cbm = callbacks.CallbackManager(get_ipython(), {'ping_received': ping_received})
def test_register_unregister(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.fire('ping_received')
self.assertEqual(cb.call_count, 1)
self.cbm.unregister('ping_received', cb)
self.cbm.fire('ping_received')
self.assertEqual(cb.call_count, 1)
def test_reset(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.reset('ping_received')
self.cbm.fire('ping_received')
assert not cb.called
def test_reset_all(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.reset_all()
self.cbm.fire('ping_received')
assert not cb.called
def test_cb_error(self):
cb = Mock(side_effect=ValueError)
self.cbm.register('ping_received', cb)
with tt.AssertPrints("Error in callback"):
self.cbm.fire('ping_received')
|
<commit_before><commit_msg>Add tests for callback infrastructure<commit_after>import unittest
try: # Python 3.3 +
from unittest.mock import Mock
except ImportError:
from mock import Mock
from IPython.core import callbacks
import IPython.testing.tools as tt
def ping_received():
pass
class CallbackTests(unittest.TestCase):
def setUp(self):
self.cbm = callbacks.CallbackManager(get_ipython(), {'ping_received': ping_received})
def test_register_unregister(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.fire('ping_received')
self.assertEqual(cb.call_count, 1)
self.cbm.unregister('ping_received', cb)
self.cbm.fire('ping_received')
self.assertEqual(cb.call_count, 1)
def test_reset(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.reset('ping_received')
self.cbm.fire('ping_received')
assert not cb.called
def test_reset_all(self):
cb = Mock()
self.cbm.register('ping_received', cb)
self.cbm.reset_all()
self.cbm.fire('ping_received')
assert not cb.called
def test_cb_error(self):
cb = Mock(side_effect=ValueError)
self.cbm.register('ping_received', cb)
with tt.AssertPrints("Error in callback"):
self.cbm.fire('ping_received')
|
|
f5849aeb1c811c8f244b2ec1db3750bc8beea441
|
bin/isbn_format.py
|
bin/isbn_format.py
|
#!/usr/bin/env python
import sys
import os
import yaml
import isbnlib
metafile = sys.argv[1]
metadata = open(metafile, 'r').read()
yamldata = yaml.load(metadata)
identifier = {}
for id in yamldata["identifier"]:
if "key" in id:
isbnlike = isbnlib.get_isbnlike(id["text"])[0]
if isbnlib.is_isbn13(isbnlike):
identifier[id["key"]] = isbnlib.EAN13(isbnlike)
isbn = identifier[sys.argv[2]]
if len(sys.argv) >= 4 and sys.argv[3] == "mask":
print(isbnlib.mask(isbn))
else:
print(isbn)
|
Add python scipt for parsing meta data and extracting validated ISBNs
|
Add python scipt for parsing meta data and extracting validated ISBNs
|
Python
|
agpl-3.0
|
alerque/casile,alerque/casile,alerque/casile,alerque/casile,alerque/casile
|
Add python scipt for parsing meta data and extracting validated ISBNs
|
#!/usr/bin/env python
import sys
import os
import yaml
import isbnlib
metafile = sys.argv[1]
metadata = open(metafile, 'r').read()
yamldata = yaml.load(metadata)
identifier = {}
for id in yamldata["identifier"]:
if "key" in id:
isbnlike = isbnlib.get_isbnlike(id["text"])[0]
if isbnlib.is_isbn13(isbnlike):
identifier[id["key"]] = isbnlib.EAN13(isbnlike)
isbn = identifier[sys.argv[2]]
if len(sys.argv) >= 4 and sys.argv[3] == "mask":
print(isbnlib.mask(isbn))
else:
print(isbn)
|
<commit_before><commit_msg>Add python scipt for parsing meta data and extracting validated ISBNs<commit_after>
|
#!/usr/bin/env python
import sys
import os
import yaml
import isbnlib
metafile = sys.argv[1]
metadata = open(metafile, 'r').read()
yamldata = yaml.load(metadata)
identifier = {}
for id in yamldata["identifier"]:
if "key" in id:
isbnlike = isbnlib.get_isbnlike(id["text"])[0]
if isbnlib.is_isbn13(isbnlike):
identifier[id["key"]] = isbnlib.EAN13(isbnlike)
isbn = identifier[sys.argv[2]]
if len(sys.argv) >= 4 and sys.argv[3] == "mask":
print(isbnlib.mask(isbn))
else:
print(isbn)
|
Add python scipt for parsing meta data and extracting validated ISBNs#!/usr/bin/env python
import sys
import os
import yaml
import isbnlib
metafile = sys.argv[1]
metadata = open(metafile, 'r').read()
yamldata = yaml.load(metadata)
identifier = {}
for id in yamldata["identifier"]:
if "key" in id:
isbnlike = isbnlib.get_isbnlike(id["text"])[0]
if isbnlib.is_isbn13(isbnlike):
identifier[id["key"]] = isbnlib.EAN13(isbnlike)
isbn = identifier[sys.argv[2]]
if len(sys.argv) >= 4 and sys.argv[3] == "mask":
print(isbnlib.mask(isbn))
else:
print(isbn)
|
<commit_before><commit_msg>Add python scipt for parsing meta data and extracting validated ISBNs<commit_after>#!/usr/bin/env python
import sys
import os
import yaml
import isbnlib
metafile = sys.argv[1]
metadata = open(metafile, 'r').read()
yamldata = yaml.load(metadata)
identifier = {}
for id in yamldata["identifier"]:
if "key" in id:
isbnlike = isbnlib.get_isbnlike(id["text"])[0]
if isbnlib.is_isbn13(isbnlike):
identifier[id["key"]] = isbnlib.EAN13(isbnlike)
isbn = identifier[sys.argv[2]]
if len(sys.argv) >= 4 and sys.argv[3] == "mask":
print(isbnlib.mask(isbn))
else:
print(isbn)
|
|
6e425814bf24eb2f7e0d2ba6d3cce109426904f8
|
analytics/tests/test_fixtures.py
|
analytics/tests/test_fixtures.py
|
from zerver.lib.test_classes import ZulipTestCase
from analytics.lib.counts import CountStat
from analytics.lib.fixtures import generate_time_series_data
# A very light test suite; the code being tested is not run in production.
class TestFixtures(ZulipTestCase):
def test_deterministic_settings(self):
# type: () -> None
# test basic business_hour / non_business_hour calculation
# test we get an array of the right length with frequency=CountStat.DAY
data = generate_time_series_data(
days=7, business_hours_base=20, non_business_hours_base=15, spikiness=0)
self.assertEqual(data, [400, 400, 400, 400, 400, 360, 360])
data = generate_time_series_data(
days=1, business_hours_base=2000, non_business_hours_base=1500,
growth=2, spikiness=0, frequency=CountStat.HOUR)
# test we get an array of the right length with frequency=CountStat.HOUR
self.assertEqual(len(data), 24)
# test that growth doesn't affect the first data point
self.assertEqual(data[0], 2000)
# test that the last data point is growth times what it otherwise would be
self.assertEqual(data[-1], 1500*2)
# test autocorrelation == 1, since that's the easiest value to test
data = generate_time_series_data(
days=1, business_hours_base=2000, non_business_hours_base=2000,
autocorrelation=1, frequency=CountStat.HOUR)
self.assertEqual(data[0], data[1])
self.assertEqual(data[0], data[-1])
|
Add a few tests for fixtures.py.
|
analytics: Add a few tests for fixtures.py.
The code in fixtures.py is only called from populate_analytics_db, and is
only used for generating pretty fixture data for manual testing. This commit
adds tests for a few things that were easy to add tests for, and provides
some minimal coverage of the file, but is not meant to be comprehensive.
|
Python
|
apache-2.0
|
rishig/zulip,showell/zulip,ryanbackman/zulip,mahim97/zulip,mahim97/zulip,timabbott/zulip,amanharitsh123/zulip,rht/zulip,shubhamdhama/zulip,jackrzhang/zulip,vabs22/zulip,brockwhittaker/zulip,zulip/zulip,punchagan/zulip,tommyip/zulip,vaidap/zulip,brainwane/zulip,timabbott/zulip,punchagan/zulip,andersk/zulip,eeshangarg/zulip,jphilipsen05/zulip,andersk/zulip,shubhamdhama/zulip,timabbott/zulip,brainwane/zulip,mahim97/zulip,SmartPeople/zulip,amanharitsh123/zulip,jphilipsen05/zulip,dhcrzf/zulip,ryanbackman/zulip,christi3k/zulip,kou/zulip,vaidap/zulip,j831/zulip,brockwhittaker/zulip,shubhamdhama/zulip,showell/zulip,synicalsyntax/zulip,brainwane/zulip,tommyip/zulip,mahim97/zulip,mahim97/zulip,punchagan/zulip,rht/zulip,jackrzhang/zulip,tommyip/zulip,tommyip/zulip,timabbott/zulip,eeshangarg/zulip,kou/zulip,synicalsyntax/zulip,kou/zulip,j831/zulip,SmartPeople/zulip,brockwhittaker/zulip,ryanbackman/zulip,rht/zulip,jrowan/zulip,rishig/zulip,Galexrt/zulip,christi3k/zulip,Galexrt/zulip,j831/zulip,SmartPeople/zulip,jackrzhang/zulip,showell/zulip,Galexrt/zulip,vaidap/zulip,brockwhittaker/zulip,dhcrzf/zulip,verma-varsha/zulip,synicalsyntax/zulip,Galexrt/zulip,synicalsyntax/zulip,hackerkid/zulip,amanharitsh123/zulip,andersk/zulip,rishig/zulip,zulip/zulip,christi3k/zulip,shubhamdhama/zulip,ryanbackman/zulip,rishig/zulip,jrowan/zulip,j831/zulip,timabbott/zulip,eeshangarg/zulip,christi3k/zulip,verma-varsha/zulip,kou/zulip,rht/zulip,vaidap/zulip,hackerkid/zulip,christi3k/zulip,punchagan/zulip,Galexrt/zulip,rishig/zulip,eeshangarg/zulip,jrowan/zulip,jphilipsen05/zulip,rishig/zulip,SmartPeople/zulip,zulip/zulip,showell/zulip,vaidap/zulip,showell/zulip,vabs22/zulip,punchagan/zulip,rishig/zulip,verma-varsha/zulip,timabbott/zulip,kou/zulip,vabs22/zulip,verma-varsha/zulip,verma-varsha/zulip,vabs22/zulip,dhcrzf/zulip,eeshangarg/zulip,SmartPeople/zulip,j831/zulip,timabbott/zulip,ryanbackman/zulip,hackerkid/zulip,hackerkid/zulip,verma-varsha/zulip,rht/zulip,brockwhittaker/zulip,tommyip/zulip,jackrzhang/zulip,kou/zulip,SmartPeople/zulip,synicalsyntax/zulip,zulip/zulip,jrowan/zulip,andersk/zulip,hackerkid/zulip,jackrzhang/zulip,rht/zulip,andersk/zulip,dhcrzf/zulip,eeshangarg/zulip,punchagan/zulip,vaidap/zulip,amanharitsh123/zulip,brainwane/zulip,mahim97/zulip,dhcrzf/zulip,amanharitsh123/zulip,synicalsyntax/zulip,amanharitsh123/zulip,jphilipsen05/zulip,j831/zulip,brockwhittaker/zulip,hackerkid/zulip,hackerkid/zulip,brainwane/zulip,kou/zulip,dhcrzf/zulip,christi3k/zulip,zulip/zulip,shubhamdhama/zulip,shubhamdhama/zulip,tommyip/zulip,andersk/zulip,shubhamdhama/zulip,andersk/zulip,Galexrt/zulip,ryanbackman/zulip,brainwane/zulip,punchagan/zulip,vabs22/zulip,zulip/zulip,showell/zulip,synicalsyntax/zulip,dhcrzf/zulip,tommyip/zulip,rht/zulip,zulip/zulip,brainwane/zulip,jackrzhang/zulip,showell/zulip,jrowan/zulip,Galexrt/zulip,jrowan/zulip,jphilipsen05/zulip,jackrzhang/zulip,eeshangarg/zulip,jphilipsen05/zulip,vabs22/zulip
|
analytics: Add a few tests for fixtures.py.
The code in fixtures.py is only called from populate_analytics_db, and is
only used for generating pretty fixture data for manual testing. This commit
adds tests for a few things that were easy to add tests for, and provides
some minimal coverage of the file, but is not meant to be comprehensive.
|
from zerver.lib.test_classes import ZulipTestCase
from analytics.lib.counts import CountStat
from analytics.lib.fixtures import generate_time_series_data
# A very light test suite; the code being tested is not run in production.
class TestFixtures(ZulipTestCase):
def test_deterministic_settings(self):
# type: () -> None
# test basic business_hour / non_business_hour calculation
# test we get an array of the right length with frequency=CountStat.DAY
data = generate_time_series_data(
days=7, business_hours_base=20, non_business_hours_base=15, spikiness=0)
self.assertEqual(data, [400, 400, 400, 400, 400, 360, 360])
data = generate_time_series_data(
days=1, business_hours_base=2000, non_business_hours_base=1500,
growth=2, spikiness=0, frequency=CountStat.HOUR)
# test we get an array of the right length with frequency=CountStat.HOUR
self.assertEqual(len(data), 24)
# test that growth doesn't affect the first data point
self.assertEqual(data[0], 2000)
# test that the last data point is growth times what it otherwise would be
self.assertEqual(data[-1], 1500*2)
# test autocorrelation == 1, since that's the easiest value to test
data = generate_time_series_data(
days=1, business_hours_base=2000, non_business_hours_base=2000,
autocorrelation=1, frequency=CountStat.HOUR)
self.assertEqual(data[0], data[1])
self.assertEqual(data[0], data[-1])
|
<commit_before><commit_msg>analytics: Add a few tests for fixtures.py.
The code in fixtures.py is only called from populate_analytics_db, and is
only used for generating pretty fixture data for manual testing. This commit
adds tests for a few things that were easy to add tests for, and provides
some minimal coverage of the file, but is not meant to be comprehensive.<commit_after>
|
from zerver.lib.test_classes import ZulipTestCase
from analytics.lib.counts import CountStat
from analytics.lib.fixtures import generate_time_series_data
# A very light test suite; the code being tested is not run in production.
class TestFixtures(ZulipTestCase):
def test_deterministic_settings(self):
# type: () -> None
# test basic business_hour / non_business_hour calculation
# test we get an array of the right length with frequency=CountStat.DAY
data = generate_time_series_data(
days=7, business_hours_base=20, non_business_hours_base=15, spikiness=0)
self.assertEqual(data, [400, 400, 400, 400, 400, 360, 360])
data = generate_time_series_data(
days=1, business_hours_base=2000, non_business_hours_base=1500,
growth=2, spikiness=0, frequency=CountStat.HOUR)
# test we get an array of the right length with frequency=CountStat.HOUR
self.assertEqual(len(data), 24)
# test that growth doesn't affect the first data point
self.assertEqual(data[0], 2000)
# test that the last data point is growth times what it otherwise would be
self.assertEqual(data[-1], 1500*2)
# test autocorrelation == 1, since that's the easiest value to test
data = generate_time_series_data(
days=1, business_hours_base=2000, non_business_hours_base=2000,
autocorrelation=1, frequency=CountStat.HOUR)
self.assertEqual(data[0], data[1])
self.assertEqual(data[0], data[-1])
|
analytics: Add a few tests for fixtures.py.
The code in fixtures.py is only called from populate_analytics_db, and is
only used for generating pretty fixture data for manual testing. This commit
adds tests for a few things that were easy to add tests for, and provides
some minimal coverage of the file, but is not meant to be comprehensive.from zerver.lib.test_classes import ZulipTestCase
from analytics.lib.counts import CountStat
from analytics.lib.fixtures import generate_time_series_data
# A very light test suite; the code being tested is not run in production.
class TestFixtures(ZulipTestCase):
def test_deterministic_settings(self):
# type: () -> None
# test basic business_hour / non_business_hour calculation
# test we get an array of the right length with frequency=CountStat.DAY
data = generate_time_series_data(
days=7, business_hours_base=20, non_business_hours_base=15, spikiness=0)
self.assertEqual(data, [400, 400, 400, 400, 400, 360, 360])
data = generate_time_series_data(
days=1, business_hours_base=2000, non_business_hours_base=1500,
growth=2, spikiness=0, frequency=CountStat.HOUR)
# test we get an array of the right length with frequency=CountStat.HOUR
self.assertEqual(len(data), 24)
# test that growth doesn't affect the first data point
self.assertEqual(data[0], 2000)
# test that the last data point is growth times what it otherwise would be
self.assertEqual(data[-1], 1500*2)
# test autocorrelation == 1, since that's the easiest value to test
data = generate_time_series_data(
days=1, business_hours_base=2000, non_business_hours_base=2000,
autocorrelation=1, frequency=CountStat.HOUR)
self.assertEqual(data[0], data[1])
self.assertEqual(data[0], data[-1])
|
<commit_before><commit_msg>analytics: Add a few tests for fixtures.py.
The code in fixtures.py is only called from populate_analytics_db, and is
only used for generating pretty fixture data for manual testing. This commit
adds tests for a few things that were easy to add tests for, and provides
some minimal coverage of the file, but is not meant to be comprehensive.<commit_after>from zerver.lib.test_classes import ZulipTestCase
from analytics.lib.counts import CountStat
from analytics.lib.fixtures import generate_time_series_data
# A very light test suite; the code being tested is not run in production.
class TestFixtures(ZulipTestCase):
def test_deterministic_settings(self):
# type: () -> None
# test basic business_hour / non_business_hour calculation
# test we get an array of the right length with frequency=CountStat.DAY
data = generate_time_series_data(
days=7, business_hours_base=20, non_business_hours_base=15, spikiness=0)
self.assertEqual(data, [400, 400, 400, 400, 400, 360, 360])
data = generate_time_series_data(
days=1, business_hours_base=2000, non_business_hours_base=1500,
growth=2, spikiness=0, frequency=CountStat.HOUR)
# test we get an array of the right length with frequency=CountStat.HOUR
self.assertEqual(len(data), 24)
# test that growth doesn't affect the first data point
self.assertEqual(data[0], 2000)
# test that the last data point is growth times what it otherwise would be
self.assertEqual(data[-1], 1500*2)
# test autocorrelation == 1, since that's the easiest value to test
data = generate_time_series_data(
days=1, business_hours_base=2000, non_business_hours_base=2000,
autocorrelation=1, frequency=CountStat.HOUR)
self.assertEqual(data[0], data[1])
self.assertEqual(data[0], data[-1])
|
|
7a97694d14e9ed02ae38ab713267d4c722079c9c
|
kolibri/core/webpack/test/test_webpack_tags.py
|
kolibri/core/webpack/test/test_webpack_tags.py
|
from __future__ import absolute_import, print_function, unicode_literals
from django.test import TestCase
from ..hooks import WebpackBundleHook
class TestHook(WebpackBundleHook):
unique_slug = "non_default_frontend"
entry_file = "assets/src/kolibri_core_app.js"
class KolibriTagNavigationTestCase(TestCase):
def test_frontend_tag(self):
self.assertIn(
"non_default_frontend",
TestHook().render_to_html()
)
|
from __future__ import absolute_import, print_function, unicode_literals
import json
import tempfile
from django.test import TestCase
from ..hooks import WebpackBundleHook
TEST_STATS_FILE = None
class TestHook(WebpackBundleHook):
unique_slug = "non_default_frontend"
src_file = "assets/src/kolibri_core_app.js"
@property
def stats_file(self):
return TEST_STATS_FILE.name
class KolibriTagNavigationTestCase(TestCase):
def setUp(self):
global TEST_STATS_FILE
TestCase.setUp(self)
TEST_STATS_FILE = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.test_hook = TestHook()
json.dump(
{
"status": "done",
"chunks": {
"non_default_frontend": [
{
"name": "non_default_frontend-2c4fb3d6a29238b06f84.js",
"publicPath": "non_default_frontend/non_default_frontend-2c4fb3d6a29238b06f84.js",
"path": "kolibri/core/static/non_default_frontend/non_default_frontend-2c4fb3d6a29238b06f84.js"
}
]
},
"publicPath": "default_frontend/"
},
TEST_STATS_FILE
)
TEST_STATS_FILE.close()
def test_frontend_tag(self):
self.assertIn(
"non_default_frontend",
self.test_hook.render_to_html()
)
|
Create a faked hook that does not try to read a live stats file
|
Create a faked hook that does not try to read a live stats file
|
Python
|
mit
|
jonboiser/kolibri,jtamiace/kolibri,66eli77/kolibri,66eli77/kolibri,mrpau/kolibri,rtibbles/kolibri,learningequality/kolibri,lyw07/kolibri,jtamiace/kolibri,jtamiace/kolibri,mrpau/kolibri,ralphiee22/kolibri,jayoshih/kolibri,learningequality/kolibri,DXCanas/kolibri,jonboiser/kolibri,benjaoming/kolibri,christianmemije/kolibri,indirectlylit/kolibri,benjaoming/kolibri,lyw07/kolibri,aronasorman/kolibri,66eli77/kolibri,MCGallaspy/kolibri,jayoshih/kolibri,DXCanas/kolibri,jonboiser/kolibri,MCGallaspy/kolibri,aronasorman/kolibri,whitzhu/kolibri,DXCanas/kolibri,whitzhu/kolibri,benjaoming/kolibri,rtibbles/kolibri,mrpau/kolibri,lyw07/kolibri,learningequality/kolibri,jamalex/kolibri,rtibbles/kolibri,lyw07/kolibri,MingDai/kolibri,rtibbles/kolibri,jonboiser/kolibri,indirectlylit/kolibri,jamalex/kolibri,whitzhu/kolibri,MingDai/kolibri,66eli77/kolibri,MingDai/kolibri,indirectlylit/kolibri,MingDai/kolibri,jamalex/kolibri,DXCanas/kolibri,jtamiace/kolibri,jamalex/kolibri,MCGallaspy/kolibri,christianmemije/kolibri,ralphiee22/kolibri,aronasorman/kolibri,aronasorman/kolibri,whitzhu/kolibri,jayoshih/kolibri,ralphiee22/kolibri,indirectlylit/kolibri,christianmemije/kolibri,learningequality/kolibri,benjaoming/kolibri,mrpau/kolibri,christianmemije/kolibri,ralphiee22/kolibri,jayoshih/kolibri
|
from __future__ import absolute_import, print_function, unicode_literals
from django.test import TestCase
from ..hooks import WebpackBundleHook
class TestHook(WebpackBundleHook):
unique_slug = "non_default_frontend"
entry_file = "assets/src/kolibri_core_app.js"
class KolibriTagNavigationTestCase(TestCase):
def test_frontend_tag(self):
self.assertIn(
"non_default_frontend",
TestHook().render_to_html()
)
Create a faked hook that does not try to read a live stats file
|
from __future__ import absolute_import, print_function, unicode_literals
import json
import tempfile
from django.test import TestCase
from ..hooks import WebpackBundleHook
TEST_STATS_FILE = None
class TestHook(WebpackBundleHook):
unique_slug = "non_default_frontend"
src_file = "assets/src/kolibri_core_app.js"
@property
def stats_file(self):
return TEST_STATS_FILE.name
class KolibriTagNavigationTestCase(TestCase):
def setUp(self):
global TEST_STATS_FILE
TestCase.setUp(self)
TEST_STATS_FILE = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.test_hook = TestHook()
json.dump(
{
"status": "done",
"chunks": {
"non_default_frontend": [
{
"name": "non_default_frontend-2c4fb3d6a29238b06f84.js",
"publicPath": "non_default_frontend/non_default_frontend-2c4fb3d6a29238b06f84.js",
"path": "kolibri/core/static/non_default_frontend/non_default_frontend-2c4fb3d6a29238b06f84.js"
}
]
},
"publicPath": "default_frontend/"
},
TEST_STATS_FILE
)
TEST_STATS_FILE.close()
def test_frontend_tag(self):
self.assertIn(
"non_default_frontend",
self.test_hook.render_to_html()
)
|
<commit_before>from __future__ import absolute_import, print_function, unicode_literals
from django.test import TestCase
from ..hooks import WebpackBundleHook
class TestHook(WebpackBundleHook):
unique_slug = "non_default_frontend"
entry_file = "assets/src/kolibri_core_app.js"
class KolibriTagNavigationTestCase(TestCase):
def test_frontend_tag(self):
self.assertIn(
"non_default_frontend",
TestHook().render_to_html()
)
<commit_msg>Create a faked hook that does not try to read a live stats file<commit_after>
|
from __future__ import absolute_import, print_function, unicode_literals
import json
import tempfile
from django.test import TestCase
from ..hooks import WebpackBundleHook
TEST_STATS_FILE = None
class TestHook(WebpackBundleHook):
unique_slug = "non_default_frontend"
src_file = "assets/src/kolibri_core_app.js"
@property
def stats_file(self):
return TEST_STATS_FILE.name
class KolibriTagNavigationTestCase(TestCase):
def setUp(self):
global TEST_STATS_FILE
TestCase.setUp(self)
TEST_STATS_FILE = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.test_hook = TestHook()
json.dump(
{
"status": "done",
"chunks": {
"non_default_frontend": [
{
"name": "non_default_frontend-2c4fb3d6a29238b06f84.js",
"publicPath": "non_default_frontend/non_default_frontend-2c4fb3d6a29238b06f84.js",
"path": "kolibri/core/static/non_default_frontend/non_default_frontend-2c4fb3d6a29238b06f84.js"
}
]
},
"publicPath": "default_frontend/"
},
TEST_STATS_FILE
)
TEST_STATS_FILE.close()
def test_frontend_tag(self):
self.assertIn(
"non_default_frontend",
self.test_hook.render_to_html()
)
|
from __future__ import absolute_import, print_function, unicode_literals
from django.test import TestCase
from ..hooks import WebpackBundleHook
class TestHook(WebpackBundleHook):
unique_slug = "non_default_frontend"
entry_file = "assets/src/kolibri_core_app.js"
class KolibriTagNavigationTestCase(TestCase):
def test_frontend_tag(self):
self.assertIn(
"non_default_frontend",
TestHook().render_to_html()
)
Create a faked hook that does not try to read a live stats filefrom __future__ import absolute_import, print_function, unicode_literals
import json
import tempfile
from django.test import TestCase
from ..hooks import WebpackBundleHook
TEST_STATS_FILE = None
class TestHook(WebpackBundleHook):
unique_slug = "non_default_frontend"
src_file = "assets/src/kolibri_core_app.js"
@property
def stats_file(self):
return TEST_STATS_FILE.name
class KolibriTagNavigationTestCase(TestCase):
def setUp(self):
global TEST_STATS_FILE
TestCase.setUp(self)
TEST_STATS_FILE = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.test_hook = TestHook()
json.dump(
{
"status": "done",
"chunks": {
"non_default_frontend": [
{
"name": "non_default_frontend-2c4fb3d6a29238b06f84.js",
"publicPath": "non_default_frontend/non_default_frontend-2c4fb3d6a29238b06f84.js",
"path": "kolibri/core/static/non_default_frontend/non_default_frontend-2c4fb3d6a29238b06f84.js"
}
]
},
"publicPath": "default_frontend/"
},
TEST_STATS_FILE
)
TEST_STATS_FILE.close()
def test_frontend_tag(self):
self.assertIn(
"non_default_frontend",
self.test_hook.render_to_html()
)
|
<commit_before>from __future__ import absolute_import, print_function, unicode_literals
from django.test import TestCase
from ..hooks import WebpackBundleHook
class TestHook(WebpackBundleHook):
unique_slug = "non_default_frontend"
entry_file = "assets/src/kolibri_core_app.js"
class KolibriTagNavigationTestCase(TestCase):
def test_frontend_tag(self):
self.assertIn(
"non_default_frontend",
TestHook().render_to_html()
)
<commit_msg>Create a faked hook that does not try to read a live stats file<commit_after>from __future__ import absolute_import, print_function, unicode_literals
import json
import tempfile
from django.test import TestCase
from ..hooks import WebpackBundleHook
TEST_STATS_FILE = None
class TestHook(WebpackBundleHook):
unique_slug = "non_default_frontend"
src_file = "assets/src/kolibri_core_app.js"
@property
def stats_file(self):
return TEST_STATS_FILE.name
class KolibriTagNavigationTestCase(TestCase):
def setUp(self):
global TEST_STATS_FILE
TestCase.setUp(self)
TEST_STATS_FILE = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.test_hook = TestHook()
json.dump(
{
"status": "done",
"chunks": {
"non_default_frontend": [
{
"name": "non_default_frontend-2c4fb3d6a29238b06f84.js",
"publicPath": "non_default_frontend/non_default_frontend-2c4fb3d6a29238b06f84.js",
"path": "kolibri/core/static/non_default_frontend/non_default_frontend-2c4fb3d6a29238b06f84.js"
}
]
},
"publicPath": "default_frontend/"
},
TEST_STATS_FILE
)
TEST_STATS_FILE.close()
def test_frontend_tag(self):
self.assertIn(
"non_default_frontend",
self.test_hook.render_to_html()
)
|
a9595cff570efe78228d02c9dd05f7b10ccd30f7
|
Python/demos/d11_PostProcessing.py
|
Python/demos/d11_PostProcessing.py
|
#%% Demo 11: Postprocessing
#
# This demo demonstrates the available postprocessing tools in TIGRE.by calling the "Measure_Quality.m" function with detailed description.
#
#
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# This file is part of the TIGRE Toolbox
#
# Copyright (c) 2015, University of Bath and
# CERN-European Organization for Nuclear Research
# All rights reserved.
#
# License: Open Source under BSD.
# See the full license at
# https://github.com/CERN/TIGRE/blob/master/LICENSE
#
# Contact: tigre.toolbox@gmail.com
# Codes: https://github.com/CERN/TIGRE/
# Coded by: Manasavee Lohvithee
#--------------------------------------------------------------------------
#%%Initialize
import tigre
import numpy as np
from tigre.utilities import sample_loader
from tigre.utilities import CTnoise
import tigre.algorithms as algs
from matplotlib import pyplot as plt
#%% Geometry
geo=tigre.geometry_default(high_resolution=False)
#%% Load data and generate projections
# define angles
angles=np.linspace(0,2*np.pi,100)
# Load thorax phatom data
head = sample_loader.load_head_phantom(geo.nVoxel)
# generate projections
projections=tigre.Ax(head,geo,angles)
# add noise
noise_projections=CTnoise.add(projections,Poisson=1e5,Gaussian=np.array([0, 10]))
#%% Some recon, FDK for example
imgFDK=algs.fdk(projections,geo,angles)
# TODO, these are not implemented/accesible in python TIGRE
# Issues #270 #271
|
Add structure of demo 11
|
Add structure of demo 11
|
Python
|
bsd-3-clause
|
CERN/TIGRE,CERN/TIGRE,CERN/TIGRE,CERN/TIGRE
|
Add structure of demo 11
|
#%% Demo 11: Postprocessing
#
# This demo demonstrates the available postprocessing tools in TIGRE.by calling the "Measure_Quality.m" function with detailed description.
#
#
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# This file is part of the TIGRE Toolbox
#
# Copyright (c) 2015, University of Bath and
# CERN-European Organization for Nuclear Research
# All rights reserved.
#
# License: Open Source under BSD.
# See the full license at
# https://github.com/CERN/TIGRE/blob/master/LICENSE
#
# Contact: tigre.toolbox@gmail.com
# Codes: https://github.com/CERN/TIGRE/
# Coded by: Manasavee Lohvithee
#--------------------------------------------------------------------------
#%%Initialize
import tigre
import numpy as np
from tigre.utilities import sample_loader
from tigre.utilities import CTnoise
import tigre.algorithms as algs
from matplotlib import pyplot as plt
#%% Geometry
geo=tigre.geometry_default(high_resolution=False)
#%% Load data and generate projections
# define angles
angles=np.linspace(0,2*np.pi,100)
# Load thorax phatom data
head = sample_loader.load_head_phantom(geo.nVoxel)
# generate projections
projections=tigre.Ax(head,geo,angles)
# add noise
noise_projections=CTnoise.add(projections,Poisson=1e5,Gaussian=np.array([0, 10]))
#%% Some recon, FDK for example
imgFDK=algs.fdk(projections,geo,angles)
# TODO, these are not implemented/accesible in python TIGRE
# Issues #270 #271
|
<commit_before><commit_msg>Add structure of demo 11<commit_after>
|
#%% Demo 11: Postprocessing
#
# This demo demonstrates the available postprocessing tools in TIGRE.by calling the "Measure_Quality.m" function with detailed description.
#
#
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# This file is part of the TIGRE Toolbox
#
# Copyright (c) 2015, University of Bath and
# CERN-European Organization for Nuclear Research
# All rights reserved.
#
# License: Open Source under BSD.
# See the full license at
# https://github.com/CERN/TIGRE/blob/master/LICENSE
#
# Contact: tigre.toolbox@gmail.com
# Codes: https://github.com/CERN/TIGRE/
# Coded by: Manasavee Lohvithee
#--------------------------------------------------------------------------
#%%Initialize
import tigre
import numpy as np
from tigre.utilities import sample_loader
from tigre.utilities import CTnoise
import tigre.algorithms as algs
from matplotlib import pyplot as plt
#%% Geometry
geo=tigre.geometry_default(high_resolution=False)
#%% Load data and generate projections
# define angles
angles=np.linspace(0,2*np.pi,100)
# Load thorax phatom data
head = sample_loader.load_head_phantom(geo.nVoxel)
# generate projections
projections=tigre.Ax(head,geo,angles)
# add noise
noise_projections=CTnoise.add(projections,Poisson=1e5,Gaussian=np.array([0, 10]))
#%% Some recon, FDK for example
imgFDK=algs.fdk(projections,geo,angles)
# TODO, these are not implemented/accesible in python TIGRE
# Issues #270 #271
|
Add structure of demo 11#%% Demo 11: Postprocessing
#
# This demo demonstrates the available postprocessing tools in TIGRE.by calling the "Measure_Quality.m" function with detailed description.
#
#
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# This file is part of the TIGRE Toolbox
#
# Copyright (c) 2015, University of Bath and
# CERN-European Organization for Nuclear Research
# All rights reserved.
#
# License: Open Source under BSD.
# See the full license at
# https://github.com/CERN/TIGRE/blob/master/LICENSE
#
# Contact: tigre.toolbox@gmail.com
# Codes: https://github.com/CERN/TIGRE/
# Coded by: Manasavee Lohvithee
#--------------------------------------------------------------------------
#%%Initialize
import tigre
import numpy as np
from tigre.utilities import sample_loader
from tigre.utilities import CTnoise
import tigre.algorithms as algs
from matplotlib import pyplot as plt
#%% Geometry
geo=tigre.geometry_default(high_resolution=False)
#%% Load data and generate projections
# define angles
angles=np.linspace(0,2*np.pi,100)
# Load thorax phatom data
head = sample_loader.load_head_phantom(geo.nVoxel)
# generate projections
projections=tigre.Ax(head,geo,angles)
# add noise
noise_projections=CTnoise.add(projections,Poisson=1e5,Gaussian=np.array([0, 10]))
#%% Some recon, FDK for example
imgFDK=algs.fdk(projections,geo,angles)
# TODO, these are not implemented/accesible in python TIGRE
# Issues #270 #271
|
<commit_before><commit_msg>Add structure of demo 11<commit_after>#%% Demo 11: Postprocessing
#
# This demo demonstrates the available postprocessing tools in TIGRE.by calling the "Measure_Quality.m" function with detailed description.
#
#
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# This file is part of the TIGRE Toolbox
#
# Copyright (c) 2015, University of Bath and
# CERN-European Organization for Nuclear Research
# All rights reserved.
#
# License: Open Source under BSD.
# See the full license at
# https://github.com/CERN/TIGRE/blob/master/LICENSE
#
# Contact: tigre.toolbox@gmail.com
# Codes: https://github.com/CERN/TIGRE/
# Coded by: Manasavee Lohvithee
#--------------------------------------------------------------------------
#%%Initialize
import tigre
import numpy as np
from tigre.utilities import sample_loader
from tigre.utilities import CTnoise
import tigre.algorithms as algs
from matplotlib import pyplot as plt
#%% Geometry
geo=tigre.geometry_default(high_resolution=False)
#%% Load data and generate projections
# define angles
angles=np.linspace(0,2*np.pi,100)
# Load thorax phatom data
head = sample_loader.load_head_phantom(geo.nVoxel)
# generate projections
projections=tigre.Ax(head,geo,angles)
# add noise
noise_projections=CTnoise.add(projections,Poisson=1e5,Gaussian=np.array([0, 10]))
#%% Some recon, FDK for example
imgFDK=algs.fdk(projections,geo,angles)
# TODO, these are not implemented/accesible in python TIGRE
# Issues #270 #271
|
|
8d4e705cec2592f9e5f3fa14b8a7ca9610a1f5cc
|
services/netflix.py
|
services/netflix.py
|
import foauth.providers
class Netflix(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'https://www.netflix.com/'
docs_url = 'http://developer.netflix.com/docs'
# URLs to interact with the API
request_token_url = 'http://api.netflix.com/oauth/request_token'
authorize_url = 'https://api-user.netflix.com/oauth/login'
access_token_url = 'http://api.netflix.com/oauth/access_token'
api_domains = ['api-public.netflix.com', 'api.netflix.com']
available_permissions = [
(None, 'read and manage your queue'),
]
|
Add Netflix back in ... maybe
|
Add Netflix back in ... maybe
|
Python
|
bsd-3-clause
|
foauth/foauth.org,foauth/foauth.org,foauth/oauth-proxy,foauth/foauth.org
|
Add Netflix back in ... maybe
|
import foauth.providers
class Netflix(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'https://www.netflix.com/'
docs_url = 'http://developer.netflix.com/docs'
# URLs to interact with the API
request_token_url = 'http://api.netflix.com/oauth/request_token'
authorize_url = 'https://api-user.netflix.com/oauth/login'
access_token_url = 'http://api.netflix.com/oauth/access_token'
api_domains = ['api-public.netflix.com', 'api.netflix.com']
available_permissions = [
(None, 'read and manage your queue'),
]
|
<commit_before><commit_msg>Add Netflix back in ... maybe<commit_after>
|
import foauth.providers
class Netflix(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'https://www.netflix.com/'
docs_url = 'http://developer.netflix.com/docs'
# URLs to interact with the API
request_token_url = 'http://api.netflix.com/oauth/request_token'
authorize_url = 'https://api-user.netflix.com/oauth/login'
access_token_url = 'http://api.netflix.com/oauth/access_token'
api_domains = ['api-public.netflix.com', 'api.netflix.com']
available_permissions = [
(None, 'read and manage your queue'),
]
|
Add Netflix back in ... maybeimport foauth.providers
class Netflix(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'https://www.netflix.com/'
docs_url = 'http://developer.netflix.com/docs'
# URLs to interact with the API
request_token_url = 'http://api.netflix.com/oauth/request_token'
authorize_url = 'https://api-user.netflix.com/oauth/login'
access_token_url = 'http://api.netflix.com/oauth/access_token'
api_domains = ['api-public.netflix.com', 'api.netflix.com']
available_permissions = [
(None, 'read and manage your queue'),
]
|
<commit_before><commit_msg>Add Netflix back in ... maybe<commit_after>import foauth.providers
class Netflix(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'https://www.netflix.com/'
docs_url = 'http://developer.netflix.com/docs'
# URLs to interact with the API
request_token_url = 'http://api.netflix.com/oauth/request_token'
authorize_url = 'https://api-user.netflix.com/oauth/login'
access_token_url = 'http://api.netflix.com/oauth/access_token'
api_domains = ['api-public.netflix.com', 'api.netflix.com']
available_permissions = [
(None, 'read and manage your queue'),
]
|
|
d289f8ef30bf1657c8570252ac2361fade5c43ac
|
package/dataset.py
|
package/dataset.py
|
__author__ = 'luigolas'
from package.image_set import ImageSet
import re
# import numpy as np
# import package.image as image
# import package.evaluator as evaluator
# import itertools
# import cv2
class Dataset():
def __init__(self):
self.id_regex = "P[1-6]_[0-9]{3}"
self.probe = None
self.gallery = None
self._preprocessing = None
self._preprocessing_w_masks = True
def set_probe(self, folder):
self.probe = ImageSet(folder)
def set_gallery(self, folder):
self.gallery = ImageSet(folder)
def set_id_regex(self, regex):
self.id_regex = regex
def name(self):
"""
example:
P2_cam1_P2_cam2_Grabcut2OptimalMask_Histogram_IIP_[5, 5, 5]_6R_3D_BHATT
:return:
"""
name = "%s_%s" % (self.probe.name, self.gallery.name)
if self._preprocessing:
if self._preprocessing_w_masks:
preproc_name = "_" + self._preprocessing + "M"
else:
preproc_name = "_" + self._preprocessing + "U"
name += preproc_name
return name
def dict_name(self):
"""
example:
name = {"Probe": "P2_cam1", "Gallery": "P2_cam2", "Segment": "Grabcut", "SegIter": "2", "Mask": "OptimalMask",
"Evaluator": "Histogram", "EvColorSpace": "IIP", "EvBins": "[5, 5, 5]", "EvDims": "3D", "Regions": "6R",
"Comparator": "BHATT"}
:return:
"""
name = {"Probe": self.probe.name, "Gallery": self.gallery.name}
if self._preprocessing:
name["Preproc"] = self._preprocessing
name["Preproc_Mask"] = self._preprocessing_w_masks
return name
def same_individual(self, id1, id2):
elem_id1 = re.search(self.id_regex, id1).group(0)
elem_id2 = re.search(self.id_regex, id2).group(0)
return elem_id1 == elem_id2
def calc_masks(self, segmenter):
self.probe.calc_masks(segmenter)
self.gallery.calc_masks(segmenter)
def unload(self):
self.probe.images = None
self.probe.masks = None
self.probe.files = None
self.gallery.images = None
self.gallery.masks = None
self.gallery.files = None
del self.gallery
del self.probe
|
Add basic Dataset. No ImgSet nor Preposcessing
|
Add basic Dataset. No ImgSet nor Preposcessing
|
Python
|
mit
|
Luigolas/PyReID
|
Add basic Dataset. No ImgSet nor Preposcessing
|
__author__ = 'luigolas'
from package.image_set import ImageSet
import re
# import numpy as np
# import package.image as image
# import package.evaluator as evaluator
# import itertools
# import cv2
class Dataset():
def __init__(self):
self.id_regex = "P[1-6]_[0-9]{3}"
self.probe = None
self.gallery = None
self._preprocessing = None
self._preprocessing_w_masks = True
def set_probe(self, folder):
self.probe = ImageSet(folder)
def set_gallery(self, folder):
self.gallery = ImageSet(folder)
def set_id_regex(self, regex):
self.id_regex = regex
def name(self):
"""
example:
P2_cam1_P2_cam2_Grabcut2OptimalMask_Histogram_IIP_[5, 5, 5]_6R_3D_BHATT
:return:
"""
name = "%s_%s" % (self.probe.name, self.gallery.name)
if self._preprocessing:
if self._preprocessing_w_masks:
preproc_name = "_" + self._preprocessing + "M"
else:
preproc_name = "_" + self._preprocessing + "U"
name += preproc_name
return name
def dict_name(self):
"""
example:
name = {"Probe": "P2_cam1", "Gallery": "P2_cam2", "Segment": "Grabcut", "SegIter": "2", "Mask": "OptimalMask",
"Evaluator": "Histogram", "EvColorSpace": "IIP", "EvBins": "[5, 5, 5]", "EvDims": "3D", "Regions": "6R",
"Comparator": "BHATT"}
:return:
"""
name = {"Probe": self.probe.name, "Gallery": self.gallery.name}
if self._preprocessing:
name["Preproc"] = self._preprocessing
name["Preproc_Mask"] = self._preprocessing_w_masks
return name
def same_individual(self, id1, id2):
elem_id1 = re.search(self.id_regex, id1).group(0)
elem_id2 = re.search(self.id_regex, id2).group(0)
return elem_id1 == elem_id2
def calc_masks(self, segmenter):
self.probe.calc_masks(segmenter)
self.gallery.calc_masks(segmenter)
def unload(self):
self.probe.images = None
self.probe.masks = None
self.probe.files = None
self.gallery.images = None
self.gallery.masks = None
self.gallery.files = None
del self.gallery
del self.probe
|
<commit_before><commit_msg>Add basic Dataset. No ImgSet nor Preposcessing<commit_after>
|
__author__ = 'luigolas'
from package.image_set import ImageSet
import re
# import numpy as np
# import package.image as image
# import package.evaluator as evaluator
# import itertools
# import cv2
class Dataset():
def __init__(self):
self.id_regex = "P[1-6]_[0-9]{3}"
self.probe = None
self.gallery = None
self._preprocessing = None
self._preprocessing_w_masks = True
def set_probe(self, folder):
self.probe = ImageSet(folder)
def set_gallery(self, folder):
self.gallery = ImageSet(folder)
def set_id_regex(self, regex):
self.id_regex = regex
def name(self):
"""
example:
P2_cam1_P2_cam2_Grabcut2OptimalMask_Histogram_IIP_[5, 5, 5]_6R_3D_BHATT
:return:
"""
name = "%s_%s" % (self.probe.name, self.gallery.name)
if self._preprocessing:
if self._preprocessing_w_masks:
preproc_name = "_" + self._preprocessing + "M"
else:
preproc_name = "_" + self._preprocessing + "U"
name += preproc_name
return name
def dict_name(self):
"""
example:
name = {"Probe": "P2_cam1", "Gallery": "P2_cam2", "Segment": "Grabcut", "SegIter": "2", "Mask": "OptimalMask",
"Evaluator": "Histogram", "EvColorSpace": "IIP", "EvBins": "[5, 5, 5]", "EvDims": "3D", "Regions": "6R",
"Comparator": "BHATT"}
:return:
"""
name = {"Probe": self.probe.name, "Gallery": self.gallery.name}
if self._preprocessing:
name["Preproc"] = self._preprocessing
name["Preproc_Mask"] = self._preprocessing_w_masks
return name
def same_individual(self, id1, id2):
elem_id1 = re.search(self.id_regex, id1).group(0)
elem_id2 = re.search(self.id_regex, id2).group(0)
return elem_id1 == elem_id2
def calc_masks(self, segmenter):
self.probe.calc_masks(segmenter)
self.gallery.calc_masks(segmenter)
def unload(self):
self.probe.images = None
self.probe.masks = None
self.probe.files = None
self.gallery.images = None
self.gallery.masks = None
self.gallery.files = None
del self.gallery
del self.probe
|
Add basic Dataset. No ImgSet nor Preposcessing__author__ = 'luigolas'
from package.image_set import ImageSet
import re
# import numpy as np
# import package.image as image
# import package.evaluator as evaluator
# import itertools
# import cv2
class Dataset():
def __init__(self):
self.id_regex = "P[1-6]_[0-9]{3}"
self.probe = None
self.gallery = None
self._preprocessing = None
self._preprocessing_w_masks = True
def set_probe(self, folder):
self.probe = ImageSet(folder)
def set_gallery(self, folder):
self.gallery = ImageSet(folder)
def set_id_regex(self, regex):
self.id_regex = regex
def name(self):
"""
example:
P2_cam1_P2_cam2_Grabcut2OptimalMask_Histogram_IIP_[5, 5, 5]_6R_3D_BHATT
:return:
"""
name = "%s_%s" % (self.probe.name, self.gallery.name)
if self._preprocessing:
if self._preprocessing_w_masks:
preproc_name = "_" + self._preprocessing + "M"
else:
preproc_name = "_" + self._preprocessing + "U"
name += preproc_name
return name
def dict_name(self):
"""
example:
name = {"Probe": "P2_cam1", "Gallery": "P2_cam2", "Segment": "Grabcut", "SegIter": "2", "Mask": "OptimalMask",
"Evaluator": "Histogram", "EvColorSpace": "IIP", "EvBins": "[5, 5, 5]", "EvDims": "3D", "Regions": "6R",
"Comparator": "BHATT"}
:return:
"""
name = {"Probe": self.probe.name, "Gallery": self.gallery.name}
if self._preprocessing:
name["Preproc"] = self._preprocessing
name["Preproc_Mask"] = self._preprocessing_w_masks
return name
def same_individual(self, id1, id2):
elem_id1 = re.search(self.id_regex, id1).group(0)
elem_id2 = re.search(self.id_regex, id2).group(0)
return elem_id1 == elem_id2
def calc_masks(self, segmenter):
self.probe.calc_masks(segmenter)
self.gallery.calc_masks(segmenter)
def unload(self):
self.probe.images = None
self.probe.masks = None
self.probe.files = None
self.gallery.images = None
self.gallery.masks = None
self.gallery.files = None
del self.gallery
del self.probe
|
<commit_before><commit_msg>Add basic Dataset. No ImgSet nor Preposcessing<commit_after>__author__ = 'luigolas'
from package.image_set import ImageSet
import re
# import numpy as np
# import package.image as image
# import package.evaluator as evaluator
# import itertools
# import cv2
class Dataset():
def __init__(self):
self.id_regex = "P[1-6]_[0-9]{3}"
self.probe = None
self.gallery = None
self._preprocessing = None
self._preprocessing_w_masks = True
def set_probe(self, folder):
self.probe = ImageSet(folder)
def set_gallery(self, folder):
self.gallery = ImageSet(folder)
def set_id_regex(self, regex):
self.id_regex = regex
def name(self):
"""
example:
P2_cam1_P2_cam2_Grabcut2OptimalMask_Histogram_IIP_[5, 5, 5]_6R_3D_BHATT
:return:
"""
name = "%s_%s" % (self.probe.name, self.gallery.name)
if self._preprocessing:
if self._preprocessing_w_masks:
preproc_name = "_" + self._preprocessing + "M"
else:
preproc_name = "_" + self._preprocessing + "U"
name += preproc_name
return name
def dict_name(self):
"""
example:
name = {"Probe": "P2_cam1", "Gallery": "P2_cam2", "Segment": "Grabcut", "SegIter": "2", "Mask": "OptimalMask",
"Evaluator": "Histogram", "EvColorSpace": "IIP", "EvBins": "[5, 5, 5]", "EvDims": "3D", "Regions": "6R",
"Comparator": "BHATT"}
:return:
"""
name = {"Probe": self.probe.name, "Gallery": self.gallery.name}
if self._preprocessing:
name["Preproc"] = self._preprocessing
name["Preproc_Mask"] = self._preprocessing_w_masks
return name
def same_individual(self, id1, id2):
elem_id1 = re.search(self.id_regex, id1).group(0)
elem_id2 = re.search(self.id_regex, id2).group(0)
return elem_id1 == elem_id2
def calc_masks(self, segmenter):
self.probe.calc_masks(segmenter)
self.gallery.calc_masks(segmenter)
def unload(self):
self.probe.images = None
self.probe.masks = None
self.probe.files = None
self.gallery.images = None
self.gallery.masks = None
self.gallery.files = None
del self.gallery
del self.probe
|
|
f3c30f2ef57411c466dfed0718873939e6bd3174
|
ceph_deploy/tests/parser/test_mon.py
|
ceph_deploy/tests/parser/test_mon.py
|
import pytest
from ceph_deploy.cli import get_parser
SUBCMDS_WITH_ARGS = ['add', 'destroy']
SUBCMDS_WITHOUT_ARGS = ['create', 'create-initial']
class TestParserMON(object):
def setup(self):
self.parser = get_parser()
def test_mon_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mon' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150")
@pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS)
def test_mon_valid_subcommands_with_args(self, cmd, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args(['mon'] + ['%s' % cmd] + ['host1'])
out, err = capsys.readouterr()
assert 'too few arguments' in err
assert 'invalid choice' not in err
@pytest.mark.parametrize('cmd', SUBCMDS_WITHOUT_ARGS)
def test_mon_valid_subcommands_without_args(self, cmd, capsys):
self.parser.parse_args(['mon'] + ['%s' % cmd])
def test_mon_invalid_subcommand(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon bork'.split())
out, err = capsys.readouterr()
assert 'invalid choice' in err
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12151")
def test_mon_create_initial_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon create-initial --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mon create-initial' in out
def test_mon_create_initial_keyrings_default_none(self):
args = self.parser.parse_args('mon create-initial'.split())
assert args.keyrings is None
def test_mon_create_initial_keyrings_custom_dir(self):
args = self.parser.parse_args('mon create-initial --keyrings /tmp/keys'.split())
assert args.keyrings == "/tmp/keys"
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12151")
def test_mon_create_initial_keyrings_host_raises_err(self):
with pytest.raises(SystemExit):
self.parser.parse_args('mon create-initial test1'.split())
|
Add tests to for mon, mon create-initial
|
[RM-11742] Add tests to for mon, mon create-initial
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
Python
|
mit
|
codenrhoden/ceph-deploy,ceph/ceph-deploy,codenrhoden/ceph-deploy,imzhulei/ceph-deploy,branto1/ceph-deploy,SUSE/ceph-deploy,SUSE/ceph-deploy,osynge/ceph-deploy,osynge/ceph-deploy,trhoden/ceph-deploy,zhouyuan/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,branto1/ceph-deploy,ghxandsky/ceph-deploy,ceph/ceph-deploy,isyippee/ceph-deploy,imzhulei/ceph-deploy,shenhequnying/ceph-deploy,shenhequnying/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,ghxandsky/ceph-deploy,zhouyuan/ceph-deploy,isyippee/ceph-deploy,Vicente-Cheng/ceph-deploy,trhoden/ceph-deploy,Vicente-Cheng/ceph-deploy
|
[RM-11742] Add tests to for mon, mon create-initial
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
import pytest
from ceph_deploy.cli import get_parser
SUBCMDS_WITH_ARGS = ['add', 'destroy']
SUBCMDS_WITHOUT_ARGS = ['create', 'create-initial']
class TestParserMON(object):
def setup(self):
self.parser = get_parser()
def test_mon_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mon' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150")
@pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS)
def test_mon_valid_subcommands_with_args(self, cmd, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args(['mon'] + ['%s' % cmd] + ['host1'])
out, err = capsys.readouterr()
assert 'too few arguments' in err
assert 'invalid choice' not in err
@pytest.mark.parametrize('cmd', SUBCMDS_WITHOUT_ARGS)
def test_mon_valid_subcommands_without_args(self, cmd, capsys):
self.parser.parse_args(['mon'] + ['%s' % cmd])
def test_mon_invalid_subcommand(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon bork'.split())
out, err = capsys.readouterr()
assert 'invalid choice' in err
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12151")
def test_mon_create_initial_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon create-initial --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mon create-initial' in out
def test_mon_create_initial_keyrings_default_none(self):
args = self.parser.parse_args('mon create-initial'.split())
assert args.keyrings is None
def test_mon_create_initial_keyrings_custom_dir(self):
args = self.parser.parse_args('mon create-initial --keyrings /tmp/keys'.split())
assert args.keyrings == "/tmp/keys"
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12151")
def test_mon_create_initial_keyrings_host_raises_err(self):
with pytest.raises(SystemExit):
self.parser.parse_args('mon create-initial test1'.split())
|
<commit_before><commit_msg>[RM-11742] Add tests to for mon, mon create-initial
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>
|
import pytest
from ceph_deploy.cli import get_parser
SUBCMDS_WITH_ARGS = ['add', 'destroy']
SUBCMDS_WITHOUT_ARGS = ['create', 'create-initial']
class TestParserMON(object):
def setup(self):
self.parser = get_parser()
def test_mon_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mon' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150")
@pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS)
def test_mon_valid_subcommands_with_args(self, cmd, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args(['mon'] + ['%s' % cmd] + ['host1'])
out, err = capsys.readouterr()
assert 'too few arguments' in err
assert 'invalid choice' not in err
@pytest.mark.parametrize('cmd', SUBCMDS_WITHOUT_ARGS)
def test_mon_valid_subcommands_without_args(self, cmd, capsys):
self.parser.parse_args(['mon'] + ['%s' % cmd])
def test_mon_invalid_subcommand(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon bork'.split())
out, err = capsys.readouterr()
assert 'invalid choice' in err
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12151")
def test_mon_create_initial_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon create-initial --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mon create-initial' in out
def test_mon_create_initial_keyrings_default_none(self):
args = self.parser.parse_args('mon create-initial'.split())
assert args.keyrings is None
def test_mon_create_initial_keyrings_custom_dir(self):
args = self.parser.parse_args('mon create-initial --keyrings /tmp/keys'.split())
assert args.keyrings == "/tmp/keys"
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12151")
def test_mon_create_initial_keyrings_host_raises_err(self):
with pytest.raises(SystemExit):
self.parser.parse_args('mon create-initial test1'.split())
|
[RM-11742] Add tests to for mon, mon create-initial
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>import pytest
from ceph_deploy.cli import get_parser
SUBCMDS_WITH_ARGS = ['add', 'destroy']
SUBCMDS_WITHOUT_ARGS = ['create', 'create-initial']
class TestParserMON(object):
def setup(self):
self.parser = get_parser()
def test_mon_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mon' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150")
@pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS)
def test_mon_valid_subcommands_with_args(self, cmd, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args(['mon'] + ['%s' % cmd] + ['host1'])
out, err = capsys.readouterr()
assert 'too few arguments' in err
assert 'invalid choice' not in err
@pytest.mark.parametrize('cmd', SUBCMDS_WITHOUT_ARGS)
def test_mon_valid_subcommands_without_args(self, cmd, capsys):
self.parser.parse_args(['mon'] + ['%s' % cmd])
def test_mon_invalid_subcommand(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon bork'.split())
out, err = capsys.readouterr()
assert 'invalid choice' in err
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12151")
def test_mon_create_initial_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon create-initial --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mon create-initial' in out
def test_mon_create_initial_keyrings_default_none(self):
args = self.parser.parse_args('mon create-initial'.split())
assert args.keyrings is None
def test_mon_create_initial_keyrings_custom_dir(self):
args = self.parser.parse_args('mon create-initial --keyrings /tmp/keys'.split())
assert args.keyrings == "/tmp/keys"
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12151")
def test_mon_create_initial_keyrings_host_raises_err(self):
with pytest.raises(SystemExit):
self.parser.parse_args('mon create-initial test1'.split())
|
<commit_before><commit_msg>[RM-11742] Add tests to for mon, mon create-initial
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>import pytest
from ceph_deploy.cli import get_parser
SUBCMDS_WITH_ARGS = ['add', 'destroy']
SUBCMDS_WITHOUT_ARGS = ['create', 'create-initial']
class TestParserMON(object):
def setup(self):
self.parser = get_parser()
def test_mon_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mon' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150")
@pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS)
def test_mon_valid_subcommands_with_args(self, cmd, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args(['mon'] + ['%s' % cmd] + ['host1'])
out, err = capsys.readouterr()
assert 'too few arguments' in err
assert 'invalid choice' not in err
@pytest.mark.parametrize('cmd', SUBCMDS_WITHOUT_ARGS)
def test_mon_valid_subcommands_without_args(self, cmd, capsys):
self.parser.parse_args(['mon'] + ['%s' % cmd])
def test_mon_invalid_subcommand(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon bork'.split())
out, err = capsys.readouterr()
assert 'invalid choice' in err
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12151")
def test_mon_create_initial_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('mon create-initial --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy mon create-initial' in out
def test_mon_create_initial_keyrings_default_none(self):
args = self.parser.parse_args('mon create-initial'.split())
assert args.keyrings is None
def test_mon_create_initial_keyrings_custom_dir(self):
args = self.parser.parse_args('mon create-initial --keyrings /tmp/keys'.split())
assert args.keyrings == "/tmp/keys"
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12151")
def test_mon_create_initial_keyrings_host_raises_err(self):
with pytest.raises(SystemExit):
self.parser.parse_args('mon create-initial test1'.split())
|
|
95bfc900e0ed45cb8cab6649def194dee2d694e3
|
solarnmf_main_ts.py
|
solarnmf_main_ts.py
|
#solarnmf_main_ts.py
#Will Barnes
#31 March 2015
#Import needed modules
import solarnmf_functs as snf
import solarnmf_plot_routines as spr
#Read in and format the time series
results = snf.make_t_matrix("data",filename='/home/wtb2/Desktop/gaussian_test.dat')
#Get the dimensions of the T matrix
ny,nx = results['T'].shape
#Set the number of guessed sources
Q = 10
#Initialize the U, V, and A matrices
uva_initial = snf.initialize_uva(nx,ny,Q,5,5,results['T'])
#Start the minimizer
min_results = snf.minimize_div(uva_initial['u'],uva_initial['v'],results['T'],uva_initial['A'],200,1.0e-5)
#Show the initial and final matrices side-by-side
spr.plot_mat_obsVpred(results['T'],min_results['A'])
#Show the initial and final 1d time series curves
spr.plot_ts_obsVpred(results['x'],min_results['A'])
#Show the constituents of the time series on top of the original vector
spr.plot_ts_reconstruction(results['x'],min_results['u'],min_results['v'])
|
Add main function to start needed functions for a time series run
|
Add main function to start needed functions for a time series run
|
Python
|
mit
|
wtbarnes/solarnmf
|
Add main function to start needed functions for a time series run
|
#solarnmf_main_ts.py
#Will Barnes
#31 March 2015
#Import needed modules
import solarnmf_functs as snf
import solarnmf_plot_routines as spr
#Read in and format the time series
results = snf.make_t_matrix("data",filename='/home/wtb2/Desktop/gaussian_test.dat')
#Get the dimensions of the T matrix
ny,nx = results['T'].shape
#Set the number of guessed sources
Q = 10
#Initialize the U, V, and A matrices
uva_initial = snf.initialize_uva(nx,ny,Q,5,5,results['T'])
#Start the minimizer
min_results = snf.minimize_div(uva_initial['u'],uva_initial['v'],results['T'],uva_initial['A'],200,1.0e-5)
#Show the initial and final matrices side-by-side
spr.plot_mat_obsVpred(results['T'],min_results['A'])
#Show the initial and final 1d time series curves
spr.plot_ts_obsVpred(results['x'],min_results['A'])
#Show the constituents of the time series on top of the original vector
spr.plot_ts_reconstruction(results['x'],min_results['u'],min_results['v'])
|
<commit_before><commit_msg>Add main function to start needed functions for a time series run<commit_after>
|
#solarnmf_main_ts.py
#Will Barnes
#31 March 2015
#Import needed modules
import solarnmf_functs as snf
import solarnmf_plot_routines as spr
#Read in and format the time series
results = snf.make_t_matrix("data",filename='/home/wtb2/Desktop/gaussian_test.dat')
#Get the dimensions of the T matrix
ny,nx = results['T'].shape
#Set the number of guessed sources
Q = 10
#Initialize the U, V, and A matrices
uva_initial = snf.initialize_uva(nx,ny,Q,5,5,results['T'])
#Start the minimizer
min_results = snf.minimize_div(uva_initial['u'],uva_initial['v'],results['T'],uva_initial['A'],200,1.0e-5)
#Show the initial and final matrices side-by-side
spr.plot_mat_obsVpred(results['T'],min_results['A'])
#Show the initial and final 1d time series curves
spr.plot_ts_obsVpred(results['x'],min_results['A'])
#Show the constituents of the time series on top of the original vector
spr.plot_ts_reconstruction(results['x'],min_results['u'],min_results['v'])
|
Add main function to start needed functions for a time series run#solarnmf_main_ts.py
#Will Barnes
#31 March 2015
#Import needed modules
import solarnmf_functs as snf
import solarnmf_plot_routines as spr
#Read in and format the time series
results = snf.make_t_matrix("data",filename='/home/wtb2/Desktop/gaussian_test.dat')
#Get the dimensions of the T matrix
ny,nx = results['T'].shape
#Set the number of guessed sources
Q = 10
#Initialize the U, V, and A matrices
uva_initial = snf.initialize_uva(nx,ny,Q,5,5,results['T'])
#Start the minimizer
min_results = snf.minimize_div(uva_initial['u'],uva_initial['v'],results['T'],uva_initial['A'],200,1.0e-5)
#Show the initial and final matrices side-by-side
spr.plot_mat_obsVpred(results['T'],min_results['A'])
#Show the initial and final 1d time series curves
spr.plot_ts_obsVpred(results['x'],min_results['A'])
#Show the constituents of the time series on top of the original vector
spr.plot_ts_reconstruction(results['x'],min_results['u'],min_results['v'])
|
<commit_before><commit_msg>Add main function to start needed functions for a time series run<commit_after>#solarnmf_main_ts.py
#Will Barnes
#31 March 2015
#Import needed modules
import solarnmf_functs as snf
import solarnmf_plot_routines as spr
#Read in and format the time series
results = snf.make_t_matrix("data",filename='/home/wtb2/Desktop/gaussian_test.dat')
#Get the dimensions of the T matrix
ny,nx = results['T'].shape
#Set the number of guessed sources
Q = 10
#Initialize the U, V, and A matrices
uva_initial = snf.initialize_uva(nx,ny,Q,5,5,results['T'])
#Start the minimizer
min_results = snf.minimize_div(uva_initial['u'],uva_initial['v'],results['T'],uva_initial['A'],200,1.0e-5)
#Show the initial and final matrices side-by-side
spr.plot_mat_obsVpred(results['T'],min_results['A'])
#Show the initial and final 1d time series curves
spr.plot_ts_obsVpred(results['x'],min_results['A'])
#Show the constituents of the time series on top of the original vector
spr.plot_ts_reconstruction(results['x'],min_results['u'],min_results['v'])
|
|
ce53f3f7132c5b066d36f5e90d0b6c23c692fe9a
|
chapter03/maxProductOfTwoNumbers.py
|
chapter03/maxProductOfTwoNumbers.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
fourNumbers = input('Write four numbers separated by commas')
target = []
def multiplyToOthers(index):
results = [4]
for x in range(0,4):
if (index != x):
result = fourNumbers[index] * fourNumbers[x]
results.insert(x, result)
else:
results.insert(x, -1)
return results
i = 0
currentMax = 0
while (i < 4):
ml = multiplyToOthers(i)
ml.append(currentMax)
currentMax = max(ml)
i = i + 1
print 'Maximum multiplication number for two numbers in {0} tuple is {1}'.format(fourNumbers, currentMax)
|
Add multiplication between two numbers algorythm. Not optimized.
|
Add multiplication between two numbers algorythm. Not optimized.
|
Python
|
apache-2.0
|
MindCookin/python-exercises
|
Add multiplication between two numbers algorythm. Not optimized.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
fourNumbers = input('Write four numbers separated by commas')
target = []
def multiplyToOthers(index):
results = [4]
for x in range(0,4):
if (index != x):
result = fourNumbers[index] * fourNumbers[x]
results.insert(x, result)
else:
results.insert(x, -1)
return results
i = 0
currentMax = 0
while (i < 4):
ml = multiplyToOthers(i)
ml.append(currentMax)
currentMax = max(ml)
i = i + 1
print 'Maximum multiplication number for two numbers in {0} tuple is {1}'.format(fourNumbers, currentMax)
|
<commit_before><commit_msg>Add multiplication between two numbers algorythm. Not optimized.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
fourNumbers = input('Write four numbers separated by commas')
target = []
def multiplyToOthers(index):
results = [4]
for x in range(0,4):
if (index != x):
result = fourNumbers[index] * fourNumbers[x]
results.insert(x, result)
else:
results.insert(x, -1)
return results
i = 0
currentMax = 0
while (i < 4):
ml = multiplyToOthers(i)
ml.append(currentMax)
currentMax = max(ml)
i = i + 1
print 'Maximum multiplication number for two numbers in {0} tuple is {1}'.format(fourNumbers, currentMax)
|
Add multiplication between two numbers algorythm. Not optimized.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
fourNumbers = input('Write four numbers separated by commas')
target = []
def multiplyToOthers(index):
results = [4]
for x in range(0,4):
if (index != x):
result = fourNumbers[index] * fourNumbers[x]
results.insert(x, result)
else:
results.insert(x, -1)
return results
i = 0
currentMax = 0
while (i < 4):
ml = multiplyToOthers(i)
ml.append(currentMax)
currentMax = max(ml)
i = i + 1
print 'Maximum multiplication number for two numbers in {0} tuple is {1}'.format(fourNumbers, currentMax)
|
<commit_before><commit_msg>Add multiplication between two numbers algorythm. Not optimized.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
fourNumbers = input('Write four numbers separated by commas')
target = []
def multiplyToOthers(index):
results = [4]
for x in range(0,4):
if (index != x):
result = fourNumbers[index] * fourNumbers[x]
results.insert(x, result)
else:
results.insert(x, -1)
return results
i = 0
currentMax = 0
while (i < 4):
ml = multiplyToOthers(i)
ml.append(currentMax)
currentMax = max(ml)
i = i + 1
print 'Maximum multiplication number for two numbers in {0} tuple is {1}'.format(fourNumbers, currentMax)
|
|
d6ce90994b8ec0f8db6802adfdcafeea63c30419
|
doc/examples/transform/plot_fundamental_matrix.py
|
doc/examples/transform/plot_fundamental_matrix.py
|
"""
=============================
Fundamental matrix estimation
=============================
This example demonstrates how to robustly estimate epipolar geometry between two
views using sparse ORB feature correspondences.
The fundamental matrix relates corresponding points between a pair of
uncalibrated images. The matrix transforms homogeneous image points in one image
to epipolar lines in the other image.
Uncalibrated means that the intrinsic calibration (focal lengths, pixel skew,
principal point) of the two cameras is not known. The fundamental matrix thus
enables projective 3D reconstruction of the captured scene. If the calibration
is known, estimating the essential matrix enables metric 3D reconstruction of
the captured scene.
"""
from skimage import data
from skimage.color import rgb2gray
from skimage.feature import match_descriptors, ORB, plot_matches
from skimage.measure import ransac
from skimage.transform import FundamentalMatrixTransform
import matplotlib.pyplot as plt
img1, img2 = map(rgb2gray, data.middlebury2014_motorcycle())
descriptor_extractor = ORB()
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
model, inliers = ransac((keypoints1[matches12[:, 0]],
keypoints2[matches12[:, 1]]),
FundamentalMatrixTransform, min_samples=8,
residual_threshold=4, max_trials=5000)
print("Number of matches:", matches12.shape[0])
print("Number of inliers:", inliers.sum())
fig, ax = plt.subplots(nrows=1, ncols=1)
plt.gray()
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12[inliers],
only_matches=True)
ax.axis('off')
plt.show()
|
Add simple fundamental matrix example
|
Add simple fundamental matrix example
|
Python
|
bsd-3-clause
|
paalge/scikit-image,paalge/scikit-image,paalge/scikit-image
|
Add simple fundamental matrix example
|
"""
=============================
Fundamental matrix estimation
=============================
This example demonstrates how to robustly estimate epipolar geometry between two
views using sparse ORB feature correspondences.
The fundamental matrix relates corresponding points between a pair of
uncalibrated images. The matrix transforms homogeneous image points in one image
to epipolar lines in the other image.
Uncalibrated means that the intrinsic calibration (focal lengths, pixel skew,
principal point) of the two cameras is not known. The fundamental matrix thus
enables projective 3D reconstruction of the captured scene. If the calibration
is known, estimating the essential matrix enables metric 3D reconstruction of
the captured scene.
"""
from skimage import data
from skimage.color import rgb2gray
from skimage.feature import match_descriptors, ORB, plot_matches
from skimage.measure import ransac
from skimage.transform import FundamentalMatrixTransform
import matplotlib.pyplot as plt
img1, img2 = map(rgb2gray, data.middlebury2014_motorcycle())
descriptor_extractor = ORB()
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
model, inliers = ransac((keypoints1[matches12[:, 0]],
keypoints2[matches12[:, 1]]),
FundamentalMatrixTransform, min_samples=8,
residual_threshold=4, max_trials=5000)
print("Number of matches:", matches12.shape[0])
print("Number of inliers:", inliers.sum())
fig, ax = plt.subplots(nrows=1, ncols=1)
plt.gray()
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12[inliers],
only_matches=True)
ax.axis('off')
plt.show()
|
<commit_before><commit_msg>Add simple fundamental matrix example<commit_after>
|
"""
=============================
Fundamental matrix estimation
=============================
This example demonstrates how to robustly estimate epipolar geometry between two
views using sparse ORB feature correspondences.
The fundamental matrix relates corresponding points between a pair of
uncalibrated images. The matrix transforms homogeneous image points in one image
to epipolar lines in the other image.
Uncalibrated means that the intrinsic calibration (focal lengths, pixel skew,
principal point) of the two cameras is not known. The fundamental matrix thus
enables projective 3D reconstruction of the captured scene. If the calibration
is known, estimating the essential matrix enables metric 3D reconstruction of
the captured scene.
"""
from skimage import data
from skimage.color import rgb2gray
from skimage.feature import match_descriptors, ORB, plot_matches
from skimage.measure import ransac
from skimage.transform import FundamentalMatrixTransform
import matplotlib.pyplot as plt
img1, img2 = map(rgb2gray, data.middlebury2014_motorcycle())
descriptor_extractor = ORB()
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
model, inliers = ransac((keypoints1[matches12[:, 0]],
keypoints2[matches12[:, 1]]),
FundamentalMatrixTransform, min_samples=8,
residual_threshold=4, max_trials=5000)
print("Number of matches:", matches12.shape[0])
print("Number of inliers:", inliers.sum())
fig, ax = plt.subplots(nrows=1, ncols=1)
plt.gray()
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12[inliers],
only_matches=True)
ax.axis('off')
plt.show()
|
Add simple fundamental matrix example"""
=============================
Fundamental matrix estimation
=============================
This example demonstrates how to robustly estimate epipolar geometry between two
views using sparse ORB feature correspondences.
The fundamental matrix relates corresponding points between a pair of
uncalibrated images. The matrix transforms homogeneous image points in one image
to epipolar lines in the other image.
Uncalibrated means that the intrinsic calibration (focal lengths, pixel skew,
principal point) of the two cameras is not known. The fundamental matrix thus
enables projective 3D reconstruction of the captured scene. If the calibration
is known, estimating the essential matrix enables metric 3D reconstruction of
the captured scene.
"""
from skimage import data
from skimage.color import rgb2gray
from skimage.feature import match_descriptors, ORB, plot_matches
from skimage.measure import ransac
from skimage.transform import FundamentalMatrixTransform
import matplotlib.pyplot as plt
img1, img2 = map(rgb2gray, data.middlebury2014_motorcycle())
descriptor_extractor = ORB()
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
model, inliers = ransac((keypoints1[matches12[:, 0]],
keypoints2[matches12[:, 1]]),
FundamentalMatrixTransform, min_samples=8,
residual_threshold=4, max_trials=5000)
print("Number of matches:", matches12.shape[0])
print("Number of inliers:", inliers.sum())
fig, ax = plt.subplots(nrows=1, ncols=1)
plt.gray()
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12[inliers],
only_matches=True)
ax.axis('off')
plt.show()
|
<commit_before><commit_msg>Add simple fundamental matrix example<commit_after>"""
=============================
Fundamental matrix estimation
=============================
This example demonstrates how to robustly estimate epipolar geometry between two
views using sparse ORB feature correspondences.
The fundamental matrix relates corresponding points between a pair of
uncalibrated images. The matrix transforms homogeneous image points in one image
to epipolar lines in the other image.
Uncalibrated means that the intrinsic calibration (focal lengths, pixel skew,
principal point) of the two cameras is not known. The fundamental matrix thus
enables projective 3D reconstruction of the captured scene. If the calibration
is known, estimating the essential matrix enables metric 3D reconstruction of
the captured scene.
"""
from skimage import data
from skimage.color import rgb2gray
from skimage.feature import match_descriptors, ORB, plot_matches
from skimage.measure import ransac
from skimage.transform import FundamentalMatrixTransform
import matplotlib.pyplot as plt
img1, img2 = map(rgb2gray, data.middlebury2014_motorcycle())
descriptor_extractor = ORB()
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
model, inliers = ransac((keypoints1[matches12[:, 0]],
keypoints2[matches12[:, 1]]),
FundamentalMatrixTransform, min_samples=8,
residual_threshold=4, max_trials=5000)
print("Number of matches:", matches12.shape[0])
print("Number of inliers:", inliers.sum())
fig, ax = plt.subplots(nrows=1, ncols=1)
plt.gray()
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12[inliers],
only_matches=True)
ax.axis('off')
plt.show()
|
|
a99cf1ae28cb42f15ed57ed175162fdcfafe0198
|
opps/articles/forms.py
|
opps/articles/forms.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.conf import settings
from .models import Post, Album, Link
from redactor.widgets import RedactorEditor
class PostAdminForm(forms.ModelForm):
class Meta:
model = Post
widgets = {'content': RedactorEditor()}
class AlbumAdminForm(forms.ModelForm):
class Meta:
model = Album
widgets = {
'headline': RedactorEditor(
redactor_options=settings.REDACTOR_SIMPLE
)
}
class LinkAdminForm(forms.ModelForm):
class Meta:
model = Link
|
Create articles form (to admin)
|
Create articles form (to admin)
|
Python
|
mit
|
jeanmask/opps,YACOWS/opps,jeanmask/opps,opps/opps,williamroot/opps,opps/opps,williamroot/opps,opps/opps,YACOWS/opps,williamroot/opps,williamroot/opps,opps/opps,YACOWS/opps,jeanmask/opps,jeanmask/opps,YACOWS/opps
|
Create articles form (to admin)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.conf import settings
from .models import Post, Album, Link
from redactor.widgets import RedactorEditor
class PostAdminForm(forms.ModelForm):
class Meta:
model = Post
widgets = {'content': RedactorEditor()}
class AlbumAdminForm(forms.ModelForm):
class Meta:
model = Album
widgets = {
'headline': RedactorEditor(
redactor_options=settings.REDACTOR_SIMPLE
)
}
class LinkAdminForm(forms.ModelForm):
class Meta:
model = Link
|
<commit_before><commit_msg>Create articles form (to admin)<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.conf import settings
from .models import Post, Album, Link
from redactor.widgets import RedactorEditor
class PostAdminForm(forms.ModelForm):
class Meta:
model = Post
widgets = {'content': RedactorEditor()}
class AlbumAdminForm(forms.ModelForm):
class Meta:
model = Album
widgets = {
'headline': RedactorEditor(
redactor_options=settings.REDACTOR_SIMPLE
)
}
class LinkAdminForm(forms.ModelForm):
class Meta:
model = Link
|
Create articles form (to admin)#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.conf import settings
from .models import Post, Album, Link
from redactor.widgets import RedactorEditor
class PostAdminForm(forms.ModelForm):
class Meta:
model = Post
widgets = {'content': RedactorEditor()}
class AlbumAdminForm(forms.ModelForm):
class Meta:
model = Album
widgets = {
'headline': RedactorEditor(
redactor_options=settings.REDACTOR_SIMPLE
)
}
class LinkAdminForm(forms.ModelForm):
class Meta:
model = Link
|
<commit_before><commit_msg>Create articles form (to admin)<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.conf import settings
from .models import Post, Album, Link
from redactor.widgets import RedactorEditor
class PostAdminForm(forms.ModelForm):
class Meta:
model = Post
widgets = {'content': RedactorEditor()}
class AlbumAdminForm(forms.ModelForm):
class Meta:
model = Album
widgets = {
'headline': RedactorEditor(
redactor_options=settings.REDACTOR_SIMPLE
)
}
class LinkAdminForm(forms.ModelForm):
class Meta:
model = Link
|
|
1bcc8afc27653b4fb712df5d741ec78c11229bb0
|
benchmarks/benchmarks/bench_lib.py
|
benchmarks/benchmarks/bench_lib.py
|
"""Benchmarks for `numpy.lib`."""
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Pad(Benchmark):
"""Benchmarks for `numpy.pad`."""
param_names = ["shape", "pad_width", "mode"]
params = [
[(1000,), (10, 100), (10, 10, 10)],
[1, 3, (0, 5)],
["constant", "edge", "linear_ramp", "mean", "reflect", "wrap"],
]
def setup(self, shape, pad_width, mode):
self.array = np.empty(shape)
def time_pad(self, shape, pad_width, mode):
np.pad(self.array, pad_width, mode)
|
Add basic benchmarks for numpy.pad
|
BENCH: Add basic benchmarks for numpy.pad
|
Python
|
bsd-3-clause
|
WarrenWeckesser/numpy,seberg/numpy,jakirkham/numpy,WarrenWeckesser/numpy,gfyoung/numpy,shoyer/numpy,endolith/numpy,pbrod/numpy,simongibbons/numpy,madphysicist/numpy,mhvk/numpy,abalkin/numpy,Eric89GXL/numpy,mhvk/numpy,jorisvandenbossche/numpy,mhvk/numpy,rgommers/numpy,simongibbons/numpy,mattip/numpy,pizzathief/numpy,gfyoung/numpy,abalkin/numpy,Eric89GXL/numpy,charris/numpy,pdebuyl/numpy,charris/numpy,numpy/numpy,MSeifert04/numpy,madphysicist/numpy,jorisvandenbossche/numpy,simongibbons/numpy,pizzathief/numpy,numpy/numpy,madphysicist/numpy,mattip/numpy,gfyoung/numpy,grlee77/numpy,pizzathief/numpy,Eric89GXL/numpy,WarrenWeckesser/numpy,simongibbons/numpy,mattip/numpy,ahaldane/numpy,pizzathief/numpy,madphysicist/numpy,mhvk/numpy,charris/numpy,jakirkham/numpy,madphysicist/numpy,rgommers/numpy,jorisvandenbossche/numpy,WarrenWeckesser/numpy,MSeifert04/numpy,endolith/numpy,pdebuyl/numpy,numpy/numpy,pdebuyl/numpy,rgommers/numpy,seberg/numpy,jorisvandenbossche/numpy,grlee77/numpy,jorisvandenbossche/numpy,MSeifert04/numpy,abalkin/numpy,shoyer/numpy,seberg/numpy,shoyer/numpy,charris/numpy,pbrod/numpy,anntzer/numpy,rgommers/numpy,ahaldane/numpy,mattip/numpy,pbrod/numpy,jakirkham/numpy,endolith/numpy,anntzer/numpy,jakirkham/numpy,mhvk/numpy,shoyer/numpy,numpy/numpy,pdebuyl/numpy,simongibbons/numpy,grlee77/numpy,WarrenWeckesser/numpy,ahaldane/numpy,jakirkham/numpy,ahaldane/numpy,seberg/numpy,pizzathief/numpy,grlee77/numpy,anntzer/numpy,shoyer/numpy,pbrod/numpy,MSeifert04/numpy,anntzer/numpy,MSeifert04/numpy,ahaldane/numpy,grlee77/numpy,Eric89GXL/numpy,pbrod/numpy,endolith/numpy
|
BENCH: Add basic benchmarks for numpy.pad
|
"""Benchmarks for `numpy.lib`."""
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Pad(Benchmark):
"""Benchmarks for `numpy.pad`."""
param_names = ["shape", "pad_width", "mode"]
params = [
[(1000,), (10, 100), (10, 10, 10)],
[1, 3, (0, 5)],
["constant", "edge", "linear_ramp", "mean", "reflect", "wrap"],
]
def setup(self, shape, pad_width, mode):
self.array = np.empty(shape)
def time_pad(self, shape, pad_width, mode):
np.pad(self.array, pad_width, mode)
|
<commit_before><commit_msg>BENCH: Add basic benchmarks for numpy.pad<commit_after>
|
"""Benchmarks for `numpy.lib`."""
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Pad(Benchmark):
"""Benchmarks for `numpy.pad`."""
param_names = ["shape", "pad_width", "mode"]
params = [
[(1000,), (10, 100), (10, 10, 10)],
[1, 3, (0, 5)],
["constant", "edge", "linear_ramp", "mean", "reflect", "wrap"],
]
def setup(self, shape, pad_width, mode):
self.array = np.empty(shape)
def time_pad(self, shape, pad_width, mode):
np.pad(self.array, pad_width, mode)
|
BENCH: Add basic benchmarks for numpy.pad"""Benchmarks for `numpy.lib`."""
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Pad(Benchmark):
"""Benchmarks for `numpy.pad`."""
param_names = ["shape", "pad_width", "mode"]
params = [
[(1000,), (10, 100), (10, 10, 10)],
[1, 3, (0, 5)],
["constant", "edge", "linear_ramp", "mean", "reflect", "wrap"],
]
def setup(self, shape, pad_width, mode):
self.array = np.empty(shape)
def time_pad(self, shape, pad_width, mode):
np.pad(self.array, pad_width, mode)
|
<commit_before><commit_msg>BENCH: Add basic benchmarks for numpy.pad<commit_after>"""Benchmarks for `numpy.lib`."""
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Pad(Benchmark):
"""Benchmarks for `numpy.pad`."""
param_names = ["shape", "pad_width", "mode"]
params = [
[(1000,), (10, 100), (10, 10, 10)],
[1, 3, (0, 5)],
["constant", "edge", "linear_ramp", "mean", "reflect", "wrap"],
]
def setup(self, shape, pad_width, mode):
self.array = np.empty(shape)
def time_pad(self, shape, pad_width, mode):
np.pad(self.array, pad_width, mode)
|
|
9151b60566d991e4bf0c9877de608fe88b3763fc
|
go_optouts/tests/test_server.py
|
go_optouts/tests/test_server.py
|
import yaml
from twisted.internet.defer import inlineCallbacks
from twisted.web.server import Site
from vumi.tests.helpers import VumiTestCase
from go_optouts.server import HealthResource, read_yaml_config, ApiSite
from go_optouts.tests.utils import SiteHelper
class TestHealthResource(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.site = Site(HealthResource())
self.site_helper = yield self.add_helper(SiteHelper(self.site))
@inlineCallbacks
def test_health_resource(self):
result = yield self.site_helper.get('/')
self.assertEqual(result.code, 200)
body = yield result.text()
self.assertEqual(body, "OK")
class TestReadYamlConfig(VumiTestCase):
def mk_config(self, data):
path = self.mktemp()
with open(path, "wb") as f:
f.write(yaml.safe_dump(data))
return path
def test_read_config(self):
path = self.mk_config({
"foo": "bar",
})
data = read_yaml_config(path)
self.assertEqual(data, {
"foo": "bar",
})
def test_optional_config(self):
data = read_yaml_config(None)
self.assertEqual(data, {})
class TestApiSite(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.api_site = ApiSite()
self.site_helper = yield self.add_helper(
SiteHelper(self.api_site.site))
@inlineCallbacks
def test_health(self):
result = yield self.site_helper.get('/health')
self.assertEqual(result.code, 200)
body = yield result.text()
self.assertEqual(body, "OK")
@inlineCallbacks
def test_opt_out(self):
result = yield self.site_helper.get('/optouts/count', headers={
"X-Owner-ID": "owner-1",
})
self.assertEqual(result.code, 200)
data = yield result.json()
self.assertEqual(data, {
'opt_out_count': 0,
'status': {
'code': 200,
'reason': 'OK',
},
})
|
Add tests for server utilities.
|
Add tests for server utilities.
|
Python
|
bsd-3-clause
|
praekelt/go-optouts-api,praekelt/go-optouts-api
|
Add tests for server utilities.
|
import yaml
from twisted.internet.defer import inlineCallbacks
from twisted.web.server import Site
from vumi.tests.helpers import VumiTestCase
from go_optouts.server import HealthResource, read_yaml_config, ApiSite
from go_optouts.tests.utils import SiteHelper
class TestHealthResource(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.site = Site(HealthResource())
self.site_helper = yield self.add_helper(SiteHelper(self.site))
@inlineCallbacks
def test_health_resource(self):
result = yield self.site_helper.get('/')
self.assertEqual(result.code, 200)
body = yield result.text()
self.assertEqual(body, "OK")
class TestReadYamlConfig(VumiTestCase):
def mk_config(self, data):
path = self.mktemp()
with open(path, "wb") as f:
f.write(yaml.safe_dump(data))
return path
def test_read_config(self):
path = self.mk_config({
"foo": "bar",
})
data = read_yaml_config(path)
self.assertEqual(data, {
"foo": "bar",
})
def test_optional_config(self):
data = read_yaml_config(None)
self.assertEqual(data, {})
class TestApiSite(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.api_site = ApiSite()
self.site_helper = yield self.add_helper(
SiteHelper(self.api_site.site))
@inlineCallbacks
def test_health(self):
result = yield self.site_helper.get('/health')
self.assertEqual(result.code, 200)
body = yield result.text()
self.assertEqual(body, "OK")
@inlineCallbacks
def test_opt_out(self):
result = yield self.site_helper.get('/optouts/count', headers={
"X-Owner-ID": "owner-1",
})
self.assertEqual(result.code, 200)
data = yield result.json()
self.assertEqual(data, {
'opt_out_count': 0,
'status': {
'code': 200,
'reason': 'OK',
},
})
|
<commit_before><commit_msg>Add tests for server utilities.<commit_after>
|
import yaml
from twisted.internet.defer import inlineCallbacks
from twisted.web.server import Site
from vumi.tests.helpers import VumiTestCase
from go_optouts.server import HealthResource, read_yaml_config, ApiSite
from go_optouts.tests.utils import SiteHelper
class TestHealthResource(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.site = Site(HealthResource())
self.site_helper = yield self.add_helper(SiteHelper(self.site))
@inlineCallbacks
def test_health_resource(self):
result = yield self.site_helper.get('/')
self.assertEqual(result.code, 200)
body = yield result.text()
self.assertEqual(body, "OK")
class TestReadYamlConfig(VumiTestCase):
def mk_config(self, data):
path = self.mktemp()
with open(path, "wb") as f:
f.write(yaml.safe_dump(data))
return path
def test_read_config(self):
path = self.mk_config({
"foo": "bar",
})
data = read_yaml_config(path)
self.assertEqual(data, {
"foo": "bar",
})
def test_optional_config(self):
data = read_yaml_config(None)
self.assertEqual(data, {})
class TestApiSite(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.api_site = ApiSite()
self.site_helper = yield self.add_helper(
SiteHelper(self.api_site.site))
@inlineCallbacks
def test_health(self):
result = yield self.site_helper.get('/health')
self.assertEqual(result.code, 200)
body = yield result.text()
self.assertEqual(body, "OK")
@inlineCallbacks
def test_opt_out(self):
result = yield self.site_helper.get('/optouts/count', headers={
"X-Owner-ID": "owner-1",
})
self.assertEqual(result.code, 200)
data = yield result.json()
self.assertEqual(data, {
'opt_out_count': 0,
'status': {
'code': 200,
'reason': 'OK',
},
})
|
Add tests for server utilities.import yaml
from twisted.internet.defer import inlineCallbacks
from twisted.web.server import Site
from vumi.tests.helpers import VumiTestCase
from go_optouts.server import HealthResource, read_yaml_config, ApiSite
from go_optouts.tests.utils import SiteHelper
class TestHealthResource(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.site = Site(HealthResource())
self.site_helper = yield self.add_helper(SiteHelper(self.site))
@inlineCallbacks
def test_health_resource(self):
result = yield self.site_helper.get('/')
self.assertEqual(result.code, 200)
body = yield result.text()
self.assertEqual(body, "OK")
class TestReadYamlConfig(VumiTestCase):
def mk_config(self, data):
path = self.mktemp()
with open(path, "wb") as f:
f.write(yaml.safe_dump(data))
return path
def test_read_config(self):
path = self.mk_config({
"foo": "bar",
})
data = read_yaml_config(path)
self.assertEqual(data, {
"foo": "bar",
})
def test_optional_config(self):
data = read_yaml_config(None)
self.assertEqual(data, {})
class TestApiSite(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.api_site = ApiSite()
self.site_helper = yield self.add_helper(
SiteHelper(self.api_site.site))
@inlineCallbacks
def test_health(self):
result = yield self.site_helper.get('/health')
self.assertEqual(result.code, 200)
body = yield result.text()
self.assertEqual(body, "OK")
@inlineCallbacks
def test_opt_out(self):
result = yield self.site_helper.get('/optouts/count', headers={
"X-Owner-ID": "owner-1",
})
self.assertEqual(result.code, 200)
data = yield result.json()
self.assertEqual(data, {
'opt_out_count': 0,
'status': {
'code': 200,
'reason': 'OK',
},
})
|
<commit_before><commit_msg>Add tests for server utilities.<commit_after>import yaml
from twisted.internet.defer import inlineCallbacks
from twisted.web.server import Site
from vumi.tests.helpers import VumiTestCase
from go_optouts.server import HealthResource, read_yaml_config, ApiSite
from go_optouts.tests.utils import SiteHelper
class TestHealthResource(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.site = Site(HealthResource())
self.site_helper = yield self.add_helper(SiteHelper(self.site))
@inlineCallbacks
def test_health_resource(self):
result = yield self.site_helper.get('/')
self.assertEqual(result.code, 200)
body = yield result.text()
self.assertEqual(body, "OK")
class TestReadYamlConfig(VumiTestCase):
def mk_config(self, data):
path = self.mktemp()
with open(path, "wb") as f:
f.write(yaml.safe_dump(data))
return path
def test_read_config(self):
path = self.mk_config({
"foo": "bar",
})
data = read_yaml_config(path)
self.assertEqual(data, {
"foo": "bar",
})
def test_optional_config(self):
data = read_yaml_config(None)
self.assertEqual(data, {})
class TestApiSite(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.api_site = ApiSite()
self.site_helper = yield self.add_helper(
SiteHelper(self.api_site.site))
@inlineCallbacks
def test_health(self):
result = yield self.site_helper.get('/health')
self.assertEqual(result.code, 200)
body = yield result.text()
self.assertEqual(body, "OK")
@inlineCallbacks
def test_opt_out(self):
result = yield self.site_helper.get('/optouts/count', headers={
"X-Owner-ID": "owner-1",
})
self.assertEqual(result.code, 200)
data = yield result.json()
self.assertEqual(data, {
'opt_out_count': 0,
'status': {
'code': 200,
'reason': 'OK',
},
})
|
|
f50644484f4b05fbb25adfd6430b6207441d8b2e
|
src/ggrc_basic_permissions/migrations/versions/20131008124800_8f33d9bd2043_fix_system_roles.py
|
src/ggrc_basic_permissions/migrations/versions/20131008124800_8f33d9bd2043_fix_system_roles.py
|
"""
Revision ID: 8f33d9bd2043
Revises: 758b4012b5f
Create Date: 2013-09-20 14:12:32.846302
"""
# revision identifiers, used by Alembic.
revision = '8f33d9bd2043'
down_revision = '758b4012b5f'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def upgrade():
basic_objects_editable = [
'Categorization',
'Category',
'Control',
'ControlControl',
'ControlSection',
'Cycle',
'DataAsset',
'Directive',
'Contract',
'Policy',
'Regulation',
'DirectiveControl',
'Document',
'Facility',
'Help',
'Market',
'Objective',
'ObjectiveControl',
'ObjectControl',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Option',
'OrgGroup',
'PopulationSample',
'Product',
'ProgramControl',
'ProgramDirective',
'Project',
'Relationship',
'RelationshipType',
'Section',
'SectionObjective',
'SystemOrProcess',
'System',
'Process',
'SystemControl',
'SystemSysetm',
]
basic_objects_readable = list(basic_objects_editable)
basic_objects_readable.extend([
'Person',
'Program',
'Role',
#'UserRole', ?? why?
])
basic_objects_creatable = list(basic_objects_editable)
basic_objects_creatable.extend([
'Person',
])
basic_objects_updateable = list(basic_objects_editable)
basic_objects_updateable.extend([
'Person',
])
basic_objects_deletable = list(basic_objects_editable)
op.execute(roles_table.update()\
.where(roles_table.c.name == 'Reader')\
.values(permissions_json=json.dumps({
'read': basic_objects_readable,
})))
op.execute(roles_table.update()\
.where(roles_table.c.name == 'ObjectEditor')\
.values(permissions_json=json.dumps({
'create': basic_objects_creatable,
'read': basic_objects_readable,
'update': basic_objects_updateable,
'delete': basic_objects_deletable,
})))
def downgrade():
# No reason to downgrade this one
pass
|
Add migration to fix system roles
|
Add migration to fix system roles
* `ObjectEditor` and `Reader` were missing `ProgramDirective`,
`ProgramControl`, and `Person` permissions (CRUD, except `Person`,
which is CRU.
* `ObjectControl` and `ObjectDocument` were combined due to a missing
comma in a previous migration.
|
Python
|
apache-2.0
|
hasanalom/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,hasanalom/ggrc-core,j0gurt/ggrc-core,uskudnik/ggrc-core,uskudnik/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,hyperNURb/ggrc-core,uskudnik/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,hyperNURb/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,hyperNURb/ggrc-core,vladan-m/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,hasanalom/ggrc-core,selahssea/ggrc-core,vladan-m/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,vladan-m/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,uskudnik/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,uskudnik/ggrc-core,vladan-m/ggrc-core,VinnieJohns/ggrc-core,vladan-m/ggrc-core,NejcZupec/ggrc-core,hyperNURb/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core
|
Add migration to fix system roles
* `ObjectEditor` and `Reader` were missing `ProgramDirective`,
`ProgramControl`, and `Person` permissions (CRUD, except `Person`,
which is CRU.
* `ObjectControl` and `ObjectDocument` were combined due to a missing
comma in a previous migration.
|
"""
Revision ID: 8f33d9bd2043
Revises: 758b4012b5f
Create Date: 2013-09-20 14:12:32.846302
"""
# revision identifiers, used by Alembic.
revision = '8f33d9bd2043'
down_revision = '758b4012b5f'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def upgrade():
basic_objects_editable = [
'Categorization',
'Category',
'Control',
'ControlControl',
'ControlSection',
'Cycle',
'DataAsset',
'Directive',
'Contract',
'Policy',
'Regulation',
'DirectiveControl',
'Document',
'Facility',
'Help',
'Market',
'Objective',
'ObjectiveControl',
'ObjectControl',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Option',
'OrgGroup',
'PopulationSample',
'Product',
'ProgramControl',
'ProgramDirective',
'Project',
'Relationship',
'RelationshipType',
'Section',
'SectionObjective',
'SystemOrProcess',
'System',
'Process',
'SystemControl',
'SystemSysetm',
]
basic_objects_readable = list(basic_objects_editable)
basic_objects_readable.extend([
'Person',
'Program',
'Role',
#'UserRole', ?? why?
])
basic_objects_creatable = list(basic_objects_editable)
basic_objects_creatable.extend([
'Person',
])
basic_objects_updateable = list(basic_objects_editable)
basic_objects_updateable.extend([
'Person',
])
basic_objects_deletable = list(basic_objects_editable)
op.execute(roles_table.update()\
.where(roles_table.c.name == 'Reader')\
.values(permissions_json=json.dumps({
'read': basic_objects_readable,
})))
op.execute(roles_table.update()\
.where(roles_table.c.name == 'ObjectEditor')\
.values(permissions_json=json.dumps({
'create': basic_objects_creatable,
'read': basic_objects_readable,
'update': basic_objects_updateable,
'delete': basic_objects_deletable,
})))
def downgrade():
# No reason to downgrade this one
pass
|
<commit_before><commit_msg>Add migration to fix system roles
* `ObjectEditor` and `Reader` were missing `ProgramDirective`,
`ProgramControl`, and `Person` permissions (CRUD, except `Person`,
which is CRU.
* `ObjectControl` and `ObjectDocument` were combined due to a missing
comma in a previous migration.<commit_after>
|
"""
Revision ID: 8f33d9bd2043
Revises: 758b4012b5f
Create Date: 2013-09-20 14:12:32.846302
"""
# revision identifiers, used by Alembic.
revision = '8f33d9bd2043'
down_revision = '758b4012b5f'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def upgrade():
basic_objects_editable = [
'Categorization',
'Category',
'Control',
'ControlControl',
'ControlSection',
'Cycle',
'DataAsset',
'Directive',
'Contract',
'Policy',
'Regulation',
'DirectiveControl',
'Document',
'Facility',
'Help',
'Market',
'Objective',
'ObjectiveControl',
'ObjectControl',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Option',
'OrgGroup',
'PopulationSample',
'Product',
'ProgramControl',
'ProgramDirective',
'Project',
'Relationship',
'RelationshipType',
'Section',
'SectionObjective',
'SystemOrProcess',
'System',
'Process',
'SystemControl',
'SystemSysetm',
]
basic_objects_readable = list(basic_objects_editable)
basic_objects_readable.extend([
'Person',
'Program',
'Role',
#'UserRole', ?? why?
])
basic_objects_creatable = list(basic_objects_editable)
basic_objects_creatable.extend([
'Person',
])
basic_objects_updateable = list(basic_objects_editable)
basic_objects_updateable.extend([
'Person',
])
basic_objects_deletable = list(basic_objects_editable)
op.execute(roles_table.update()\
.where(roles_table.c.name == 'Reader')\
.values(permissions_json=json.dumps({
'read': basic_objects_readable,
})))
op.execute(roles_table.update()\
.where(roles_table.c.name == 'ObjectEditor')\
.values(permissions_json=json.dumps({
'create': basic_objects_creatable,
'read': basic_objects_readable,
'update': basic_objects_updateable,
'delete': basic_objects_deletable,
})))
def downgrade():
# No reason to downgrade this one
pass
|
Add migration to fix system roles
* `ObjectEditor` and `Reader` were missing `ProgramDirective`,
`ProgramControl`, and `Person` permissions (CRUD, except `Person`,
which is CRU.
* `ObjectControl` and `ObjectDocument` were combined due to a missing
comma in a previous migration."""
Revision ID: 8f33d9bd2043
Revises: 758b4012b5f
Create Date: 2013-09-20 14:12:32.846302
"""
# revision identifiers, used by Alembic.
revision = '8f33d9bd2043'
down_revision = '758b4012b5f'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def upgrade():
basic_objects_editable = [
'Categorization',
'Category',
'Control',
'ControlControl',
'ControlSection',
'Cycle',
'DataAsset',
'Directive',
'Contract',
'Policy',
'Regulation',
'DirectiveControl',
'Document',
'Facility',
'Help',
'Market',
'Objective',
'ObjectiveControl',
'ObjectControl',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Option',
'OrgGroup',
'PopulationSample',
'Product',
'ProgramControl',
'ProgramDirective',
'Project',
'Relationship',
'RelationshipType',
'Section',
'SectionObjective',
'SystemOrProcess',
'System',
'Process',
'SystemControl',
'SystemSysetm',
]
basic_objects_readable = list(basic_objects_editable)
basic_objects_readable.extend([
'Person',
'Program',
'Role',
#'UserRole', ?? why?
])
basic_objects_creatable = list(basic_objects_editable)
basic_objects_creatable.extend([
'Person',
])
basic_objects_updateable = list(basic_objects_editable)
basic_objects_updateable.extend([
'Person',
])
basic_objects_deletable = list(basic_objects_editable)
op.execute(roles_table.update()\
.where(roles_table.c.name == 'Reader')\
.values(permissions_json=json.dumps({
'read': basic_objects_readable,
})))
op.execute(roles_table.update()\
.where(roles_table.c.name == 'ObjectEditor')\
.values(permissions_json=json.dumps({
'create': basic_objects_creatable,
'read': basic_objects_readable,
'update': basic_objects_updateable,
'delete': basic_objects_deletable,
})))
def downgrade():
# No reason to downgrade this one
pass
|
<commit_before><commit_msg>Add migration to fix system roles
* `ObjectEditor` and `Reader` were missing `ProgramDirective`,
`ProgramControl`, and `Person` permissions (CRUD, except `Person`,
which is CRU.
* `ObjectControl` and `ObjectDocument` were combined due to a missing
comma in a previous migration.<commit_after>"""
Revision ID: 8f33d9bd2043
Revises: 758b4012b5f
Create Date: 2013-09-20 14:12:32.846302
"""
# revision identifiers, used by Alembic.
revision = '8f33d9bd2043'
down_revision = '758b4012b5f'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def upgrade():
basic_objects_editable = [
'Categorization',
'Category',
'Control',
'ControlControl',
'ControlSection',
'Cycle',
'DataAsset',
'Directive',
'Contract',
'Policy',
'Regulation',
'DirectiveControl',
'Document',
'Facility',
'Help',
'Market',
'Objective',
'ObjectiveControl',
'ObjectControl',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Option',
'OrgGroup',
'PopulationSample',
'Product',
'ProgramControl',
'ProgramDirective',
'Project',
'Relationship',
'RelationshipType',
'Section',
'SectionObjective',
'SystemOrProcess',
'System',
'Process',
'SystemControl',
'SystemSysetm',
]
basic_objects_readable = list(basic_objects_editable)
basic_objects_readable.extend([
'Person',
'Program',
'Role',
#'UserRole', ?? why?
])
basic_objects_creatable = list(basic_objects_editable)
basic_objects_creatable.extend([
'Person',
])
basic_objects_updateable = list(basic_objects_editable)
basic_objects_updateable.extend([
'Person',
])
basic_objects_deletable = list(basic_objects_editable)
op.execute(roles_table.update()\
.where(roles_table.c.name == 'Reader')\
.values(permissions_json=json.dumps({
'read': basic_objects_readable,
})))
op.execute(roles_table.update()\
.where(roles_table.c.name == 'ObjectEditor')\
.values(permissions_json=json.dumps({
'create': basic_objects_creatable,
'read': basic_objects_readable,
'update': basic_objects_updateable,
'delete': basic_objects_deletable,
})))
def downgrade():
# No reason to downgrade this one
pass
|
|
f6b2e92328d6bc3fc01916ffd6fa872da7afeb4f
|
barbican/model/migration/alembic_migrations/versions/39a96e67e990_add_missing_constraints.py
|
barbican/model/migration/alembic_migrations/versions/39a96e67e990_add_missing_constraints.py
|
"""Add missing constraints
Revision ID: 39a96e67e990
Revises: 4ecde3a3a72a
Create Date: 2016-01-26 13:18:06.113621
"""
# revision identifiers, used by Alembic.
revision = '39a96e67e990'
down_revision = '4ecde3a3a72a'
from alembic import op
import sqlalchemy as sa
def upgrade():
# Add missing projects table keystone_id uniqueness constraint.
op.create_unique_constraint(
'uc_projects_external_ids', 'projects', ['external_id'])
# Add missing default for secret_acls' project_access.
op.alter_column('secret_acls', 'project_access',
server_default=sa.sql.expression.true(),
existing_type=sa.Boolean,
existing_server_default=None,
existing_nullable=False)
# Add missing default for container_acls' project_access.
op.alter_column('container_acls', 'project_access',
server_default=sa.sql.expression.true(),
existing_type=sa.Boolean,
existing_server_default=None,
existing_nullable=False)
|
Add missing database constraints and defaults
|
Add missing database constraints and defaults
The project table was missing a constraint on the external_id column.
The secret and container ACLs tables were missing a default value of
True for the project_access column.
Change-Id: I5bd4bc9bca830a37d841434c07a80ce857b676bb
|
Python
|
apache-2.0
|
openstack/barbican,openstack/barbican
|
Add missing database constraints and defaults
The project table was missing a constraint on the external_id column.
The secret and container ACLs tables were missing a default value of
True for the project_access column.
Change-Id: I5bd4bc9bca830a37d841434c07a80ce857b676bb
|
"""Add missing constraints
Revision ID: 39a96e67e990
Revises: 4ecde3a3a72a
Create Date: 2016-01-26 13:18:06.113621
"""
# revision identifiers, used by Alembic.
revision = '39a96e67e990'
down_revision = '4ecde3a3a72a'
from alembic import op
import sqlalchemy as sa
def upgrade():
# Add missing projects table keystone_id uniqueness constraint.
op.create_unique_constraint(
'uc_projects_external_ids', 'projects', ['external_id'])
# Add missing default for secret_acls' project_access.
op.alter_column('secret_acls', 'project_access',
server_default=sa.sql.expression.true(),
existing_type=sa.Boolean,
existing_server_default=None,
existing_nullable=False)
# Add missing default for container_acls' project_access.
op.alter_column('container_acls', 'project_access',
server_default=sa.sql.expression.true(),
existing_type=sa.Boolean,
existing_server_default=None,
existing_nullable=False)
|
<commit_before><commit_msg>Add missing database constraints and defaults
The project table was missing a constraint on the external_id column.
The secret and container ACLs tables were missing a default value of
True for the project_access column.
Change-Id: I5bd4bc9bca830a37d841434c07a80ce857b676bb<commit_after>
|
"""Add missing constraints
Revision ID: 39a96e67e990
Revises: 4ecde3a3a72a
Create Date: 2016-01-26 13:18:06.113621
"""
# revision identifiers, used by Alembic.
revision = '39a96e67e990'
down_revision = '4ecde3a3a72a'
from alembic import op
import sqlalchemy as sa
def upgrade():
# Add missing projects table keystone_id uniqueness constraint.
op.create_unique_constraint(
'uc_projects_external_ids', 'projects', ['external_id'])
# Add missing default for secret_acls' project_access.
op.alter_column('secret_acls', 'project_access',
server_default=sa.sql.expression.true(),
existing_type=sa.Boolean,
existing_server_default=None,
existing_nullable=False)
# Add missing default for container_acls' project_access.
op.alter_column('container_acls', 'project_access',
server_default=sa.sql.expression.true(),
existing_type=sa.Boolean,
existing_server_default=None,
existing_nullable=False)
|
Add missing database constraints and defaults
The project table was missing a constraint on the external_id column.
The secret and container ACLs tables were missing a default value of
True for the project_access column.
Change-Id: I5bd4bc9bca830a37d841434c07a80ce857b676bb"""Add missing constraints
Revision ID: 39a96e67e990
Revises: 4ecde3a3a72a
Create Date: 2016-01-26 13:18:06.113621
"""
# revision identifiers, used by Alembic.
revision = '39a96e67e990'
down_revision = '4ecde3a3a72a'
from alembic import op
import sqlalchemy as sa
def upgrade():
# Add missing projects table keystone_id uniqueness constraint.
op.create_unique_constraint(
'uc_projects_external_ids', 'projects', ['external_id'])
# Add missing default for secret_acls' project_access.
op.alter_column('secret_acls', 'project_access',
server_default=sa.sql.expression.true(),
existing_type=sa.Boolean,
existing_server_default=None,
existing_nullable=False)
# Add missing default for container_acls' project_access.
op.alter_column('container_acls', 'project_access',
server_default=sa.sql.expression.true(),
existing_type=sa.Boolean,
existing_server_default=None,
existing_nullable=False)
|
<commit_before><commit_msg>Add missing database constraints and defaults
The project table was missing a constraint on the external_id column.
The secret and container ACLs tables were missing a default value of
True for the project_access column.
Change-Id: I5bd4bc9bca830a37d841434c07a80ce857b676bb<commit_after>"""Add missing constraints
Revision ID: 39a96e67e990
Revises: 4ecde3a3a72a
Create Date: 2016-01-26 13:18:06.113621
"""
# revision identifiers, used by Alembic.
revision = '39a96e67e990'
down_revision = '4ecde3a3a72a'
from alembic import op
import sqlalchemy as sa
def upgrade():
# Add missing projects table keystone_id uniqueness constraint.
op.create_unique_constraint(
'uc_projects_external_ids', 'projects', ['external_id'])
# Add missing default for secret_acls' project_access.
op.alter_column('secret_acls', 'project_access',
server_default=sa.sql.expression.true(),
existing_type=sa.Boolean,
existing_server_default=None,
existing_nullable=False)
# Add missing default for container_acls' project_access.
op.alter_column('container_acls', 'project_access',
server_default=sa.sql.expression.true(),
existing_type=sa.Boolean,
existing_server_default=None,
existing_nullable=False)
|
|
35f7331a7086fe1598667ca4907feb7caca975a4
|
py/arranging-coins.py
|
py/arranging-coins.py
|
class Solution(object):
def arrangeCoins(self, n):
"""
:type n: int
:rtype: int
"""
return int((-1 + int((1 + 8 * n + .5) ** .5)) / 2)
|
Add py solution for 441. Arranging Coins
|
Add py solution for 441. Arranging Coins
441. Arranging Coins: https://leetcode.com/problems/arranging-coins/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 441. Arranging Coins
441. Arranging Coins: https://leetcode.com/problems/arranging-coins/
|
class Solution(object):
def arrangeCoins(self, n):
"""
:type n: int
:rtype: int
"""
return int((-1 + int((1 + 8 * n + .5) ** .5)) / 2)
|
<commit_before><commit_msg>Add py solution for 441. Arranging Coins
441. Arranging Coins: https://leetcode.com/problems/arranging-coins/<commit_after>
|
class Solution(object):
def arrangeCoins(self, n):
"""
:type n: int
:rtype: int
"""
return int((-1 + int((1 + 8 * n + .5) ** .5)) / 2)
|
Add py solution for 441. Arranging Coins
441. Arranging Coins: https://leetcode.com/problems/arranging-coins/class Solution(object):
def arrangeCoins(self, n):
"""
:type n: int
:rtype: int
"""
return int((-1 + int((1 + 8 * n + .5) ** .5)) / 2)
|
<commit_before><commit_msg>Add py solution for 441. Arranging Coins
441. Arranging Coins: https://leetcode.com/problems/arranging-coins/<commit_after>class Solution(object):
def arrangeCoins(self, n):
"""
:type n: int
:rtype: int
"""
return int((-1 + int((1 + 8 * n + .5) ** .5)) / 2)
|
|
45e78bd7cbfa93a96255df9b5144ce25a609eb9b
|
py/shopping-offers.py
|
py/shopping-offers.py
|
from collections import Counter
from operator import mul
class Solution(object):
def shoppingOffers(self, price, special, needs):
"""
:type price: List[int]
:type special: List[List[int]]
:type needs: List[int]
:rtype: int
"""
c = Counter()
c[tuple(needs)] = 0
q = [(tuple(needs), 0)]
min_offer = 0
for need, depth in q:
if depth < len(special):
sp, p = special[depth][:-1], special[depth][-1]
m = c[need]
max_group = None
q.append((need, depth + 1))
for nv, sv in zip(need, sp):
if nv < sv:
break
if sv > 0:
if max_group is None or nv / sv < max_group:
max_group = nv / sv
else:
for i in xrange(1, max_group + 1):
s_need = list(need)
orig = 0
for j, s in enumerate(sp):
s_need[j] -= s * i
orig += price[j] * s * i
used = p * i - orig
if c[tuple(s_need)] > m + used:
c[tuple(s_need)] = m + used
q.append((tuple(s_need), 1))
min_offer = min(min_offer, m + used)
return sum(map(mul, price, needs)) + min_offer
print Solution().shoppingOffers([2,3,4], [[1,1,0,4],[2,2,1,9]], [1,2,1])
|
Add py solution for 638. Shopping Offers
|
Add py solution for 638. Shopping Offers
638. Shopping Offers: https://leetcode.com/problems/shopping-offers/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 638. Shopping Offers
638. Shopping Offers: https://leetcode.com/problems/shopping-offers/
|
from collections import Counter
from operator import mul
class Solution(object):
def shoppingOffers(self, price, special, needs):
"""
:type price: List[int]
:type special: List[List[int]]
:type needs: List[int]
:rtype: int
"""
c = Counter()
c[tuple(needs)] = 0
q = [(tuple(needs), 0)]
min_offer = 0
for need, depth in q:
if depth < len(special):
sp, p = special[depth][:-1], special[depth][-1]
m = c[need]
max_group = None
q.append((need, depth + 1))
for nv, sv in zip(need, sp):
if nv < sv:
break
if sv > 0:
if max_group is None or nv / sv < max_group:
max_group = nv / sv
else:
for i in xrange(1, max_group + 1):
s_need = list(need)
orig = 0
for j, s in enumerate(sp):
s_need[j] -= s * i
orig += price[j] * s * i
used = p * i - orig
if c[tuple(s_need)] > m + used:
c[tuple(s_need)] = m + used
q.append((tuple(s_need), 1))
min_offer = min(min_offer, m + used)
return sum(map(mul, price, needs)) + min_offer
print Solution().shoppingOffers([2,3,4], [[1,1,0,4],[2,2,1,9]], [1,2,1])
|
<commit_before><commit_msg>Add py solution for 638. Shopping Offers
638. Shopping Offers: https://leetcode.com/problems/shopping-offers/<commit_after>
|
from collections import Counter
from operator import mul
class Solution(object):
def shoppingOffers(self, price, special, needs):
"""
:type price: List[int]
:type special: List[List[int]]
:type needs: List[int]
:rtype: int
"""
c = Counter()
c[tuple(needs)] = 0
q = [(tuple(needs), 0)]
min_offer = 0
for need, depth in q:
if depth < len(special):
sp, p = special[depth][:-1], special[depth][-1]
m = c[need]
max_group = None
q.append((need, depth + 1))
for nv, sv in zip(need, sp):
if nv < sv:
break
if sv > 0:
if max_group is None or nv / sv < max_group:
max_group = nv / sv
else:
for i in xrange(1, max_group + 1):
s_need = list(need)
orig = 0
for j, s in enumerate(sp):
s_need[j] -= s * i
orig += price[j] * s * i
used = p * i - orig
if c[tuple(s_need)] > m + used:
c[tuple(s_need)] = m + used
q.append((tuple(s_need), 1))
min_offer = min(min_offer, m + used)
return sum(map(mul, price, needs)) + min_offer
print Solution().shoppingOffers([2,3,4], [[1,1,0,4],[2,2,1,9]], [1,2,1])
|
Add py solution for 638. Shopping Offers
638. Shopping Offers: https://leetcode.com/problems/shopping-offers/from collections import Counter
from operator import mul
class Solution(object):
def shoppingOffers(self, price, special, needs):
"""
:type price: List[int]
:type special: List[List[int]]
:type needs: List[int]
:rtype: int
"""
c = Counter()
c[tuple(needs)] = 0
q = [(tuple(needs), 0)]
min_offer = 0
for need, depth in q:
if depth < len(special):
sp, p = special[depth][:-1], special[depth][-1]
m = c[need]
max_group = None
q.append((need, depth + 1))
for nv, sv in zip(need, sp):
if nv < sv:
break
if sv > 0:
if max_group is None or nv / sv < max_group:
max_group = nv / sv
else:
for i in xrange(1, max_group + 1):
s_need = list(need)
orig = 0
for j, s in enumerate(sp):
s_need[j] -= s * i
orig += price[j] * s * i
used = p * i - orig
if c[tuple(s_need)] > m + used:
c[tuple(s_need)] = m + used
q.append((tuple(s_need), 1))
min_offer = min(min_offer, m + used)
return sum(map(mul, price, needs)) + min_offer
print Solution().shoppingOffers([2,3,4], [[1,1,0,4],[2,2,1,9]], [1,2,1])
|
<commit_before><commit_msg>Add py solution for 638. Shopping Offers
638. Shopping Offers: https://leetcode.com/problems/shopping-offers/<commit_after>from collections import Counter
from operator import mul
class Solution(object):
def shoppingOffers(self, price, special, needs):
"""
:type price: List[int]
:type special: List[List[int]]
:type needs: List[int]
:rtype: int
"""
c = Counter()
c[tuple(needs)] = 0
q = [(tuple(needs), 0)]
min_offer = 0
for need, depth in q:
if depth < len(special):
sp, p = special[depth][:-1], special[depth][-1]
m = c[need]
max_group = None
q.append((need, depth + 1))
for nv, sv in zip(need, sp):
if nv < sv:
break
if sv > 0:
if max_group is None or nv / sv < max_group:
max_group = nv / sv
else:
for i in xrange(1, max_group + 1):
s_need = list(need)
orig = 0
for j, s in enumerate(sp):
s_need[j] -= s * i
orig += price[j] * s * i
used = p * i - orig
if c[tuple(s_need)] > m + used:
c[tuple(s_need)] = m + used
q.append((tuple(s_need), 1))
min_offer = min(min_offer, m + used)
return sum(map(mul, price, needs)) + min_offer
print Solution().shoppingOffers([2,3,4], [[1,1,0,4],[2,2,1,9]], [1,2,1])
|
|
a642619fa9b8b0ef7bb7dd66ecacf2d175494acd
|
tests/unit/returners/test_pgjsonb.py
|
tests/unit/returners/test_pgjsonb.py
|
# -*- coding: utf-8 -*-
'''
tests.unit.returners.pgjsonb_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the PGJsonb returner (pgjsonb).
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.returners.pgjsonb as pgjsonb
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PGJsonbCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests for the local_cache.clean_old_jobs function.
'''
def setup_loader_modules(self):
return {pgjsonb: {'__opts__': {'keep_jobs': 1, 'archive_jobs': 0}}}
def test_clean_old_jobs_purge(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
def test_clean_old_jobs_archive(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
with patch.dict(pgjsonb.__opts__, {'archive_jobs': 1}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
|
Rename test pgjsonb returner test file to match naming convention
|
Rename test pgjsonb returner test file to match naming convention
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Rename test pgjsonb returner test file to match naming convention
|
# -*- coding: utf-8 -*-
'''
tests.unit.returners.pgjsonb_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the PGJsonb returner (pgjsonb).
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.returners.pgjsonb as pgjsonb
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PGJsonbCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests for the local_cache.clean_old_jobs function.
'''
def setup_loader_modules(self):
return {pgjsonb: {'__opts__': {'keep_jobs': 1, 'archive_jobs': 0}}}
def test_clean_old_jobs_purge(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
def test_clean_old_jobs_archive(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
with patch.dict(pgjsonb.__opts__, {'archive_jobs': 1}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
|
<commit_before><commit_msg>Rename test pgjsonb returner test file to match naming convention<commit_after>
|
# -*- coding: utf-8 -*-
'''
tests.unit.returners.pgjsonb_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the PGJsonb returner (pgjsonb).
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.returners.pgjsonb as pgjsonb
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PGJsonbCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests for the local_cache.clean_old_jobs function.
'''
def setup_loader_modules(self):
return {pgjsonb: {'__opts__': {'keep_jobs': 1, 'archive_jobs': 0}}}
def test_clean_old_jobs_purge(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
def test_clean_old_jobs_archive(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
with patch.dict(pgjsonb.__opts__, {'archive_jobs': 1}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
|
Rename test pgjsonb returner test file to match naming convention# -*- coding: utf-8 -*-
'''
tests.unit.returners.pgjsonb_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the PGJsonb returner (pgjsonb).
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.returners.pgjsonb as pgjsonb
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PGJsonbCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests for the local_cache.clean_old_jobs function.
'''
def setup_loader_modules(self):
return {pgjsonb: {'__opts__': {'keep_jobs': 1, 'archive_jobs': 0}}}
def test_clean_old_jobs_purge(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
def test_clean_old_jobs_archive(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
with patch.dict(pgjsonb.__opts__, {'archive_jobs': 1}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
|
<commit_before><commit_msg>Rename test pgjsonb returner test file to match naming convention<commit_after># -*- coding: utf-8 -*-
'''
tests.unit.returners.pgjsonb_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the PGJsonb returner (pgjsonb).
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.returners.pgjsonb as pgjsonb
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PGJsonbCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests for the local_cache.clean_old_jobs function.
'''
def setup_loader_modules(self):
return {pgjsonb: {'__opts__': {'keep_jobs': 1, 'archive_jobs': 0}}}
def test_clean_old_jobs_purge(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
def test_clean_old_jobs_archive(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
with patch.dict(pgjsonb.__opts__, {'archive_jobs': 1}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
|
|
1c01ff6e4510abcb7a69fe61e1a5b8ef17818529
|
demo/chaco_demo.py
|
demo/chaco_demo.py
|
# Copyright (c) 2015, Warren Weckesser. All rights reserved.
# This software is licensed according to the "BSD 2-clause" license.
#
# Use chaco to display the eye diagram computed by eyediagram.grid_count.
import numpy as np
# ETS imports...
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
from enable.api import Component, ComponentEditor
from chaco.api import ArrayPlotData, cool, Plot, PlotGrid
from chaco.tools.api import PanTool, ZoomTool
from eyediagram.demo_data import demo_data
from eyediagram.core import grid_count
def _create_plot_component():
# Generate some data for the eye diagram.
num_samples = 5000
samples_per_symbol = 24
y = demo_data(num_samples, samples_per_symbol)
# Compute the eye diagram array.
ybounds = (-0.25, 1.25)
grid = grid_count(y, 2*samples_per_symbol, offset=16, size=(480, 480),
bounds=ybounds).T
# Convert the array to floating point, and replace 0 with np.nan.
# These points will be transparent in the image plot.
grid = grid.astype(np.float32)
grid[grid == 0] = np.nan
#---------------------------------------------------------------------
# The rest of the function creates the chaco image plot.
pd = ArrayPlotData()
pd.set_data("eyediagram", grid)
plot = Plot(pd)
img_plot = plot.img_plot("eyediagram",
xbounds=(0, 2),
ybounds=ybounds,
bgcolor=(0, 0, 0),
colormap=cool)[0]
# Tweak some of the plot properties
plot.title = "Eye Diagram"
plot.padding = 50
# Axis grids
vgrid = PlotGrid(component=plot, mapper=plot.index_mapper,
orientation='vertical',
line_color='gray', line_style='dot')
hgrid = PlotGrid(component=plot, mapper=plot.value_mapper,
orientation='horizontal',
line_color='gray', line_style='dot')
plot.underlays.append(vgrid)
plot.underlays.append(hgrid)
# Add pan and zoom tools.
plot.tools.append(PanTool(plot))
zoom = ZoomTool(component=img_plot, tool_mode="box", always_on=False)
img_plot.overlays.append(zoom)
return plot
class EyeDiagramDemo(HasTraits):
plot = Instance(Component)
traits_view = \
View(
Group(
Item('plot', editor=ComponentEditor(size=(700, 500)),
show_label=False),
orientation="vertical",
),
resizable=True, title="Eye Diagram",
)
def _plot_default(self):
return _create_plot_component()
if __name__ == "__main__":
demo = EyeDiagramDemo()
demo.configure_traits()
|
Add a demo that uses chaco to plot the eye diagram.
|
ENH: Add a demo that uses chaco to plot the eye diagram.
|
Python
|
bsd-2-clause
|
WarrenWeckesser/eyediagram
|
ENH: Add a demo that uses chaco to plot the eye diagram.
|
# Copyright (c) 2015, Warren Weckesser. All rights reserved.
# This software is licensed according to the "BSD 2-clause" license.
#
# Use chaco to display the eye diagram computed by eyediagram.grid_count.
import numpy as np
# ETS imports...
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
from enable.api import Component, ComponentEditor
from chaco.api import ArrayPlotData, cool, Plot, PlotGrid
from chaco.tools.api import PanTool, ZoomTool
from eyediagram.demo_data import demo_data
from eyediagram.core import grid_count
def _create_plot_component():
# Generate some data for the eye diagram.
num_samples = 5000
samples_per_symbol = 24
y = demo_data(num_samples, samples_per_symbol)
# Compute the eye diagram array.
ybounds = (-0.25, 1.25)
grid = grid_count(y, 2*samples_per_symbol, offset=16, size=(480, 480),
bounds=ybounds).T
# Convert the array to floating point, and replace 0 with np.nan.
# These points will be transparent in the image plot.
grid = grid.astype(np.float32)
grid[grid == 0] = np.nan
#---------------------------------------------------------------------
# The rest of the function creates the chaco image plot.
pd = ArrayPlotData()
pd.set_data("eyediagram", grid)
plot = Plot(pd)
img_plot = plot.img_plot("eyediagram",
xbounds=(0, 2),
ybounds=ybounds,
bgcolor=(0, 0, 0),
colormap=cool)[0]
# Tweak some of the plot properties
plot.title = "Eye Diagram"
plot.padding = 50
# Axis grids
vgrid = PlotGrid(component=plot, mapper=plot.index_mapper,
orientation='vertical',
line_color='gray', line_style='dot')
hgrid = PlotGrid(component=plot, mapper=plot.value_mapper,
orientation='horizontal',
line_color='gray', line_style='dot')
plot.underlays.append(vgrid)
plot.underlays.append(hgrid)
# Add pan and zoom tools.
plot.tools.append(PanTool(plot))
zoom = ZoomTool(component=img_plot, tool_mode="box", always_on=False)
img_plot.overlays.append(zoom)
return plot
class EyeDiagramDemo(HasTraits):
plot = Instance(Component)
traits_view = \
View(
Group(
Item('plot', editor=ComponentEditor(size=(700, 500)),
show_label=False),
orientation="vertical",
),
resizable=True, title="Eye Diagram",
)
def _plot_default(self):
return _create_plot_component()
if __name__ == "__main__":
demo = EyeDiagramDemo()
demo.configure_traits()
|
<commit_before><commit_msg>ENH: Add a demo that uses chaco to plot the eye diagram.<commit_after>
|
# Copyright (c) 2015, Warren Weckesser. All rights reserved.
# This software is licensed according to the "BSD 2-clause" license.
#
# Use chaco to display the eye diagram computed by eyediagram.grid_count.
import numpy as np
# ETS imports...
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
from enable.api import Component, ComponentEditor
from chaco.api import ArrayPlotData, cool, Plot, PlotGrid
from chaco.tools.api import PanTool, ZoomTool
from eyediagram.demo_data import demo_data
from eyediagram.core import grid_count
def _create_plot_component():
# Generate some data for the eye diagram.
num_samples = 5000
samples_per_symbol = 24
y = demo_data(num_samples, samples_per_symbol)
# Compute the eye diagram array.
ybounds = (-0.25, 1.25)
grid = grid_count(y, 2*samples_per_symbol, offset=16, size=(480, 480),
bounds=ybounds).T
# Convert the array to floating point, and replace 0 with np.nan.
# These points will be transparent in the image plot.
grid = grid.astype(np.float32)
grid[grid == 0] = np.nan
#---------------------------------------------------------------------
# The rest of the function creates the chaco image plot.
pd = ArrayPlotData()
pd.set_data("eyediagram", grid)
plot = Plot(pd)
img_plot = plot.img_plot("eyediagram",
xbounds=(0, 2),
ybounds=ybounds,
bgcolor=(0, 0, 0),
colormap=cool)[0]
# Tweak some of the plot properties
plot.title = "Eye Diagram"
plot.padding = 50
# Axis grids
vgrid = PlotGrid(component=plot, mapper=plot.index_mapper,
orientation='vertical',
line_color='gray', line_style='dot')
hgrid = PlotGrid(component=plot, mapper=plot.value_mapper,
orientation='horizontal',
line_color='gray', line_style='dot')
plot.underlays.append(vgrid)
plot.underlays.append(hgrid)
# Add pan and zoom tools.
plot.tools.append(PanTool(plot))
zoom = ZoomTool(component=img_plot, tool_mode="box", always_on=False)
img_plot.overlays.append(zoom)
return plot
class EyeDiagramDemo(HasTraits):
plot = Instance(Component)
traits_view = \
View(
Group(
Item('plot', editor=ComponentEditor(size=(700, 500)),
show_label=False),
orientation="vertical",
),
resizable=True, title="Eye Diagram",
)
def _plot_default(self):
return _create_plot_component()
if __name__ == "__main__":
demo = EyeDiagramDemo()
demo.configure_traits()
|
ENH: Add a demo that uses chaco to plot the eye diagram.# Copyright (c) 2015, Warren Weckesser. All rights reserved.
# This software is licensed according to the "BSD 2-clause" license.
#
# Use chaco to display the eye diagram computed by eyediagram.grid_count.
import numpy as np
# ETS imports...
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
from enable.api import Component, ComponentEditor
from chaco.api import ArrayPlotData, cool, Plot, PlotGrid
from chaco.tools.api import PanTool, ZoomTool
from eyediagram.demo_data import demo_data
from eyediagram.core import grid_count
def _create_plot_component():
# Generate some data for the eye diagram.
num_samples = 5000
samples_per_symbol = 24
y = demo_data(num_samples, samples_per_symbol)
# Compute the eye diagram array.
ybounds = (-0.25, 1.25)
grid = grid_count(y, 2*samples_per_symbol, offset=16, size=(480, 480),
bounds=ybounds).T
# Convert the array to floating point, and replace 0 with np.nan.
# These points will be transparent in the image plot.
grid = grid.astype(np.float32)
grid[grid == 0] = np.nan
#---------------------------------------------------------------------
# The rest of the function creates the chaco image plot.
pd = ArrayPlotData()
pd.set_data("eyediagram", grid)
plot = Plot(pd)
img_plot = plot.img_plot("eyediagram",
xbounds=(0, 2),
ybounds=ybounds,
bgcolor=(0, 0, 0),
colormap=cool)[0]
# Tweak some of the plot properties
plot.title = "Eye Diagram"
plot.padding = 50
# Axis grids
vgrid = PlotGrid(component=plot, mapper=plot.index_mapper,
orientation='vertical',
line_color='gray', line_style='dot')
hgrid = PlotGrid(component=plot, mapper=plot.value_mapper,
orientation='horizontal',
line_color='gray', line_style='dot')
plot.underlays.append(vgrid)
plot.underlays.append(hgrid)
# Add pan and zoom tools.
plot.tools.append(PanTool(plot))
zoom = ZoomTool(component=img_plot, tool_mode="box", always_on=False)
img_plot.overlays.append(zoom)
return plot
class EyeDiagramDemo(HasTraits):
plot = Instance(Component)
traits_view = \
View(
Group(
Item('plot', editor=ComponentEditor(size=(700, 500)),
show_label=False),
orientation="vertical",
),
resizable=True, title="Eye Diagram",
)
def _plot_default(self):
return _create_plot_component()
if __name__ == "__main__":
demo = EyeDiagramDemo()
demo.configure_traits()
|
<commit_before><commit_msg>ENH: Add a demo that uses chaco to plot the eye diagram.<commit_after># Copyright (c) 2015, Warren Weckesser. All rights reserved.
# This software is licensed according to the "BSD 2-clause" license.
#
# Use chaco to display the eye diagram computed by eyediagram.grid_count.
import numpy as np
# ETS imports...
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
from enable.api import Component, ComponentEditor
from chaco.api import ArrayPlotData, cool, Plot, PlotGrid
from chaco.tools.api import PanTool, ZoomTool
from eyediagram.demo_data import demo_data
from eyediagram.core import grid_count
def _create_plot_component():
# Generate some data for the eye diagram.
num_samples = 5000
samples_per_symbol = 24
y = demo_data(num_samples, samples_per_symbol)
# Compute the eye diagram array.
ybounds = (-0.25, 1.25)
grid = grid_count(y, 2*samples_per_symbol, offset=16, size=(480, 480),
bounds=ybounds).T
# Convert the array to floating point, and replace 0 with np.nan.
# These points will be transparent in the image plot.
grid = grid.astype(np.float32)
grid[grid == 0] = np.nan
#---------------------------------------------------------------------
# The rest of the function creates the chaco image plot.
pd = ArrayPlotData()
pd.set_data("eyediagram", grid)
plot = Plot(pd)
img_plot = plot.img_plot("eyediagram",
xbounds=(0, 2),
ybounds=ybounds,
bgcolor=(0, 0, 0),
colormap=cool)[0]
# Tweak some of the plot properties
plot.title = "Eye Diagram"
plot.padding = 50
# Axis grids
vgrid = PlotGrid(component=plot, mapper=plot.index_mapper,
orientation='vertical',
line_color='gray', line_style='dot')
hgrid = PlotGrid(component=plot, mapper=plot.value_mapper,
orientation='horizontal',
line_color='gray', line_style='dot')
plot.underlays.append(vgrid)
plot.underlays.append(hgrid)
# Add pan and zoom tools.
plot.tools.append(PanTool(plot))
zoom = ZoomTool(component=img_plot, tool_mode="box", always_on=False)
img_plot.overlays.append(zoom)
return plot
class EyeDiagramDemo(HasTraits):
plot = Instance(Component)
traits_view = \
View(
Group(
Item('plot', editor=ComponentEditor(size=(700, 500)),
show_label=False),
orientation="vertical",
),
resizable=True, title="Eye Diagram",
)
def _plot_default(self):
return _create_plot_component()
if __name__ == "__main__":
demo = EyeDiagramDemo()
demo.configure_traits()
|
|
9b836c695ca6ab64031bdd3f55f42ee9fc7bbb20
|
tests/curious_tests/test_api_batch.py
|
tests/curious_tests/test_api_batch.py
|
import json
from django.test import TestCase
from django.db import connection
from curious import model_registry
from curious_tests.models import Blog, Entry
import curious_tests.models
class TestBatchFetch(TestCase):
N = 20
def setUp(self):
blog = Blog(name='Databases')
blog.save()
self.blog = blog
headlines = ['MySQL is a relational DB']*TestBatchFetch.N
self.entries = [Entry(headline=headline, blog=blog) for i, headline in enumerate(headlines)]
for entry in self.entries:
entry.save()
# register model
if len(model_registry.model_names) == 0:
model_registry.register(curious_tests.models)
model_registry.add_custom_rel('Blog', 'authors')
def test_fetch_objects_and_related_objects(self):
data = dict(ids=[e.id for e in self.entries])
r = self.client.post('/curious/models/Entry/', data=json.dumps(data), content_type='application/json')
self.assertEquals(r.status_code, 200)
results = json.loads(r.content)['result']
self.assertEquals(results['fields'], ["id", "blog_id", "headline"])
self.assertItemsEqual(results['objects'],
[[e.id, [self.blog.__class__.__name__, self.blog.pk, self.blog.name, None], e.headline]
for e in self.entries])
|
Add test for batch fetch object API
|
Add test for batch fetch object API
|
Python
|
mit
|
ginkgobioworks/curious,benjiec/curious,ginkgobioworks/curious,benjiec/curious,benjiec/curious,ginkgobioworks/curious
|
Add test for batch fetch object API
|
import json
from django.test import TestCase
from django.db import connection
from curious import model_registry
from curious_tests.models import Blog, Entry
import curious_tests.models
class TestBatchFetch(TestCase):
N = 20
def setUp(self):
blog = Blog(name='Databases')
blog.save()
self.blog = blog
headlines = ['MySQL is a relational DB']*TestBatchFetch.N
self.entries = [Entry(headline=headline, blog=blog) for i, headline in enumerate(headlines)]
for entry in self.entries:
entry.save()
# register model
if len(model_registry.model_names) == 0:
model_registry.register(curious_tests.models)
model_registry.add_custom_rel('Blog', 'authors')
def test_fetch_objects_and_related_objects(self):
data = dict(ids=[e.id for e in self.entries])
r = self.client.post('/curious/models/Entry/', data=json.dumps(data), content_type='application/json')
self.assertEquals(r.status_code, 200)
results = json.loads(r.content)['result']
self.assertEquals(results['fields'], ["id", "blog_id", "headline"])
self.assertItemsEqual(results['objects'],
[[e.id, [self.blog.__class__.__name__, self.blog.pk, self.blog.name, None], e.headline]
for e in self.entries])
|
<commit_before><commit_msg>Add test for batch fetch object API<commit_after>
|
import json
from django.test import TestCase
from django.db import connection
from curious import model_registry
from curious_tests.models import Blog, Entry
import curious_tests.models
class TestBatchFetch(TestCase):
N = 20
def setUp(self):
blog = Blog(name='Databases')
blog.save()
self.blog = blog
headlines = ['MySQL is a relational DB']*TestBatchFetch.N
self.entries = [Entry(headline=headline, blog=blog) for i, headline in enumerate(headlines)]
for entry in self.entries:
entry.save()
# register model
if len(model_registry.model_names) == 0:
model_registry.register(curious_tests.models)
model_registry.add_custom_rel('Blog', 'authors')
def test_fetch_objects_and_related_objects(self):
data = dict(ids=[e.id for e in self.entries])
r = self.client.post('/curious/models/Entry/', data=json.dumps(data), content_type='application/json')
self.assertEquals(r.status_code, 200)
results = json.loads(r.content)['result']
self.assertEquals(results['fields'], ["id", "blog_id", "headline"])
self.assertItemsEqual(results['objects'],
[[e.id, [self.blog.__class__.__name__, self.blog.pk, self.blog.name, None], e.headline]
for e in self.entries])
|
Add test for batch fetch object APIimport json
from django.test import TestCase
from django.db import connection
from curious import model_registry
from curious_tests.models import Blog, Entry
import curious_tests.models
class TestBatchFetch(TestCase):
N = 20
def setUp(self):
blog = Blog(name='Databases')
blog.save()
self.blog = blog
headlines = ['MySQL is a relational DB']*TestBatchFetch.N
self.entries = [Entry(headline=headline, blog=blog) for i, headline in enumerate(headlines)]
for entry in self.entries:
entry.save()
# register model
if len(model_registry.model_names) == 0:
model_registry.register(curious_tests.models)
model_registry.add_custom_rel('Blog', 'authors')
def test_fetch_objects_and_related_objects(self):
data = dict(ids=[e.id for e in self.entries])
r = self.client.post('/curious/models/Entry/', data=json.dumps(data), content_type='application/json')
self.assertEquals(r.status_code, 200)
results = json.loads(r.content)['result']
self.assertEquals(results['fields'], ["id", "blog_id", "headline"])
self.assertItemsEqual(results['objects'],
[[e.id, [self.blog.__class__.__name__, self.blog.pk, self.blog.name, None], e.headline]
for e in self.entries])
|
<commit_before><commit_msg>Add test for batch fetch object API<commit_after>import json
from django.test import TestCase
from django.db import connection
from curious import model_registry
from curious_tests.models import Blog, Entry
import curious_tests.models
class TestBatchFetch(TestCase):
N = 20
def setUp(self):
blog = Blog(name='Databases')
blog.save()
self.blog = blog
headlines = ['MySQL is a relational DB']*TestBatchFetch.N
self.entries = [Entry(headline=headline, blog=blog) for i, headline in enumerate(headlines)]
for entry in self.entries:
entry.save()
# register model
if len(model_registry.model_names) == 0:
model_registry.register(curious_tests.models)
model_registry.add_custom_rel('Blog', 'authors')
def test_fetch_objects_and_related_objects(self):
data = dict(ids=[e.id for e in self.entries])
r = self.client.post('/curious/models/Entry/', data=json.dumps(data), content_type='application/json')
self.assertEquals(r.status_code, 200)
results = json.loads(r.content)['result']
self.assertEquals(results['fields'], ["id", "blog_id", "headline"])
self.assertItemsEqual(results['objects'],
[[e.id, [self.blog.__class__.__name__, self.blog.pk, self.blog.name, None], e.headline]
for e in self.entries])
|
|
480fda37fd552a756a4a0feaf061cc0794e33e65
|
list.py
|
list.py
|
#!/usr/bin/python
import os
import json
import pprint
DATA_DIR = "data"
def load():
contents = {}
for filename in os.listdir(DATA_DIR):
if filename.startswith("."): continue
path = os.path.join(DATA_DIR, filename)
content = {}
for line in file(path).readlines():
tokens = line.replace("\n", "").split("\t")
kind, id, key, value = tokens
# how I miss autovivification..
if not content.has_key(kind):
content[kind] = {}
if not content[kind].has_key(id):
content[kind][id] = {}
content[kind][id][key] = value
contents[filename] = content
return contents
def main():
content = load()
print json.dumps(content, sort_keys=True, indent=1)
if __name__ == "__main__":
main()
|
Load analyzed mod content .csvs as json
|
Load analyzed mod content .csvs as json
|
Python
|
bsd-3-clause
|
agaricusb/ModAnalyzer,agaricusb/ModAnalyzer
|
Load analyzed mod content .csvs as json
|
#!/usr/bin/python
import os
import json
import pprint
DATA_DIR = "data"
def load():
contents = {}
for filename in os.listdir(DATA_DIR):
if filename.startswith("."): continue
path = os.path.join(DATA_DIR, filename)
content = {}
for line in file(path).readlines():
tokens = line.replace("\n", "").split("\t")
kind, id, key, value = tokens
# how I miss autovivification..
if not content.has_key(kind):
content[kind] = {}
if not content[kind].has_key(id):
content[kind][id] = {}
content[kind][id][key] = value
contents[filename] = content
return contents
def main():
content = load()
print json.dumps(content, sort_keys=True, indent=1)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Load analyzed mod content .csvs as json<commit_after>
|
#!/usr/bin/python
import os
import json
import pprint
DATA_DIR = "data"
def load():
contents = {}
for filename in os.listdir(DATA_DIR):
if filename.startswith("."): continue
path = os.path.join(DATA_DIR, filename)
content = {}
for line in file(path).readlines():
tokens = line.replace("\n", "").split("\t")
kind, id, key, value = tokens
# how I miss autovivification..
if not content.has_key(kind):
content[kind] = {}
if not content[kind].has_key(id):
content[kind][id] = {}
content[kind][id][key] = value
contents[filename] = content
return contents
def main():
content = load()
print json.dumps(content, sort_keys=True, indent=1)
if __name__ == "__main__":
main()
|
Load analyzed mod content .csvs as json#!/usr/bin/python
import os
import json
import pprint
DATA_DIR = "data"
def load():
contents = {}
for filename in os.listdir(DATA_DIR):
if filename.startswith("."): continue
path = os.path.join(DATA_DIR, filename)
content = {}
for line in file(path).readlines():
tokens = line.replace("\n", "").split("\t")
kind, id, key, value = tokens
# how I miss autovivification..
if not content.has_key(kind):
content[kind] = {}
if not content[kind].has_key(id):
content[kind][id] = {}
content[kind][id][key] = value
contents[filename] = content
return contents
def main():
content = load()
print json.dumps(content, sort_keys=True, indent=1)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Load analyzed mod content .csvs as json<commit_after>#!/usr/bin/python
import os
import json
import pprint
DATA_DIR = "data"
def load():
contents = {}
for filename in os.listdir(DATA_DIR):
if filename.startswith("."): continue
path = os.path.join(DATA_DIR, filename)
content = {}
for line in file(path).readlines():
tokens = line.replace("\n", "").split("\t")
kind, id, key, value = tokens
# how I miss autovivification..
if not content.has_key(kind):
content[kind] = {}
if not content[kind].has_key(id):
content[kind][id] = {}
content[kind][id][key] = value
contents[filename] = content
return contents
def main():
content = load()
print json.dumps(content, sort_keys=True, indent=1)
if __name__ == "__main__":
main()
|
|
6e9fada3539abca49579340e20b06fc19554e2a6
|
sc2reader/__init__.py
|
sc2reader/__init__.py
|
from __future__ import absolute_import
from sc2reader import factories, log_utils
# setup the library logging
log_utils.setup()
# For backwards compatibility
SC2Reader = factories.SC2Factory
# Expose a nice module level interface
__defaultSC2Reader = factories.SC2Factory()
register_datapack = __defaultSC2Reader.register_datapack
register_listener = __defaultSC2Reader.register_listener
register_reader = __defaultSC2Reader.register_reader
get_listeners = __defaultSC2Reader.get_listeners
get_datapack = __defaultSC2Reader.get_datapack
get_reader = __defaultSC2Reader.get_reader
load_replays = __defaultSC2Reader.load_replays
load_replay = __defaultSC2Reader.load_replay
load_maps = __defaultSC2Reader.load_maps
load_map = __defaultSC2Reader.load_map
configure = __defaultSC2Reader.configure
reset = __defaultSC2Reader.reset
|
from __future__ import absolute_import
# import submodules
from sc2reader import listeners, data, scripts, processors
from sc2reader import factories, log_utils
# setup the library logging
log_utils.setup()
# For backwards compatibility
SC2Reader = factories.SC2Factory
# Expose a nice module level interface
__defaultSC2Reader = factories.SC2Factory()
register_datapack = __defaultSC2Reader.register_datapack
register_listener = __defaultSC2Reader.register_listener
register_reader = __defaultSC2Reader.register_reader
get_listeners = __defaultSC2Reader.get_listeners
get_datapack = __defaultSC2Reader.get_datapack
get_reader = __defaultSC2Reader.get_reader
load_replays = __defaultSC2Reader.load_replays
load_replay = __defaultSC2Reader.load_replay
load_maps = __defaultSC2Reader.load_maps
load_map = __defaultSC2Reader.load_map
configure = __defaultSC2Reader.configure
reset = __defaultSC2Reader.reset
|
Add explicit submodule imports to resolve certain runtime import issues.
|
Add explicit submodule imports to resolve certain runtime import issues.
|
Python
|
mit
|
GraylinKim/sc2reader,vlaufer/sc2reader,ggtracker/sc2reader,GraylinKim/sc2reader,StoicLoofah/sc2reader,ggtracker/sc2reader,StoicLoofah/sc2reader,vlaufer/sc2reader
|
from __future__ import absolute_import
from sc2reader import factories, log_utils
# setup the library logging
log_utils.setup()
# For backwards compatibility
SC2Reader = factories.SC2Factory
# Expose a nice module level interface
__defaultSC2Reader = factories.SC2Factory()
register_datapack = __defaultSC2Reader.register_datapack
register_listener = __defaultSC2Reader.register_listener
register_reader = __defaultSC2Reader.register_reader
get_listeners = __defaultSC2Reader.get_listeners
get_datapack = __defaultSC2Reader.get_datapack
get_reader = __defaultSC2Reader.get_reader
load_replays = __defaultSC2Reader.load_replays
load_replay = __defaultSC2Reader.load_replay
load_maps = __defaultSC2Reader.load_maps
load_map = __defaultSC2Reader.load_map
configure = __defaultSC2Reader.configure
reset = __defaultSC2Reader.resetAdd explicit submodule imports to resolve certain runtime import issues.
|
from __future__ import absolute_import
# import submodules
from sc2reader import listeners, data, scripts, processors
from sc2reader import factories, log_utils
# setup the library logging
log_utils.setup()
# For backwards compatibility
SC2Reader = factories.SC2Factory
# Expose a nice module level interface
__defaultSC2Reader = factories.SC2Factory()
register_datapack = __defaultSC2Reader.register_datapack
register_listener = __defaultSC2Reader.register_listener
register_reader = __defaultSC2Reader.register_reader
get_listeners = __defaultSC2Reader.get_listeners
get_datapack = __defaultSC2Reader.get_datapack
get_reader = __defaultSC2Reader.get_reader
load_replays = __defaultSC2Reader.load_replays
load_replay = __defaultSC2Reader.load_replay
load_maps = __defaultSC2Reader.load_maps
load_map = __defaultSC2Reader.load_map
configure = __defaultSC2Reader.configure
reset = __defaultSC2Reader.reset
|
<commit_before>from __future__ import absolute_import
from sc2reader import factories, log_utils
# setup the library logging
log_utils.setup()
# For backwards compatibility
SC2Reader = factories.SC2Factory
# Expose a nice module level interface
__defaultSC2Reader = factories.SC2Factory()
register_datapack = __defaultSC2Reader.register_datapack
register_listener = __defaultSC2Reader.register_listener
register_reader = __defaultSC2Reader.register_reader
get_listeners = __defaultSC2Reader.get_listeners
get_datapack = __defaultSC2Reader.get_datapack
get_reader = __defaultSC2Reader.get_reader
load_replays = __defaultSC2Reader.load_replays
load_replay = __defaultSC2Reader.load_replay
load_maps = __defaultSC2Reader.load_maps
load_map = __defaultSC2Reader.load_map
configure = __defaultSC2Reader.configure
reset = __defaultSC2Reader.reset<commit_msg>Add explicit submodule imports to resolve certain runtime import issues.<commit_after>
|
from __future__ import absolute_import
# import submodules
from sc2reader import listeners, data, scripts, processors
from sc2reader import factories, log_utils
# setup the library logging
log_utils.setup()
# For backwards compatibility
SC2Reader = factories.SC2Factory
# Expose a nice module level interface
__defaultSC2Reader = factories.SC2Factory()
register_datapack = __defaultSC2Reader.register_datapack
register_listener = __defaultSC2Reader.register_listener
register_reader = __defaultSC2Reader.register_reader
get_listeners = __defaultSC2Reader.get_listeners
get_datapack = __defaultSC2Reader.get_datapack
get_reader = __defaultSC2Reader.get_reader
load_replays = __defaultSC2Reader.load_replays
load_replay = __defaultSC2Reader.load_replay
load_maps = __defaultSC2Reader.load_maps
load_map = __defaultSC2Reader.load_map
configure = __defaultSC2Reader.configure
reset = __defaultSC2Reader.reset
|
from __future__ import absolute_import
from sc2reader import factories, log_utils
# setup the library logging
log_utils.setup()
# For backwards compatibility
SC2Reader = factories.SC2Factory
# Expose a nice module level interface
__defaultSC2Reader = factories.SC2Factory()
register_datapack = __defaultSC2Reader.register_datapack
register_listener = __defaultSC2Reader.register_listener
register_reader = __defaultSC2Reader.register_reader
get_listeners = __defaultSC2Reader.get_listeners
get_datapack = __defaultSC2Reader.get_datapack
get_reader = __defaultSC2Reader.get_reader
load_replays = __defaultSC2Reader.load_replays
load_replay = __defaultSC2Reader.load_replay
load_maps = __defaultSC2Reader.load_maps
load_map = __defaultSC2Reader.load_map
configure = __defaultSC2Reader.configure
reset = __defaultSC2Reader.resetAdd explicit submodule imports to resolve certain runtime import issues.from __future__ import absolute_import
# import submodules
from sc2reader import listeners, data, scripts, processors
from sc2reader import factories, log_utils
# setup the library logging
log_utils.setup()
# For backwards compatibility
SC2Reader = factories.SC2Factory
# Expose a nice module level interface
__defaultSC2Reader = factories.SC2Factory()
register_datapack = __defaultSC2Reader.register_datapack
register_listener = __defaultSC2Reader.register_listener
register_reader = __defaultSC2Reader.register_reader
get_listeners = __defaultSC2Reader.get_listeners
get_datapack = __defaultSC2Reader.get_datapack
get_reader = __defaultSC2Reader.get_reader
load_replays = __defaultSC2Reader.load_replays
load_replay = __defaultSC2Reader.load_replay
load_maps = __defaultSC2Reader.load_maps
load_map = __defaultSC2Reader.load_map
configure = __defaultSC2Reader.configure
reset = __defaultSC2Reader.reset
|
<commit_before>from __future__ import absolute_import
from sc2reader import factories, log_utils
# setup the library logging
log_utils.setup()
# For backwards compatibility
SC2Reader = factories.SC2Factory
# Expose a nice module level interface
__defaultSC2Reader = factories.SC2Factory()
register_datapack = __defaultSC2Reader.register_datapack
register_listener = __defaultSC2Reader.register_listener
register_reader = __defaultSC2Reader.register_reader
get_listeners = __defaultSC2Reader.get_listeners
get_datapack = __defaultSC2Reader.get_datapack
get_reader = __defaultSC2Reader.get_reader
load_replays = __defaultSC2Reader.load_replays
load_replay = __defaultSC2Reader.load_replay
load_maps = __defaultSC2Reader.load_maps
load_map = __defaultSC2Reader.load_map
configure = __defaultSC2Reader.configure
reset = __defaultSC2Reader.reset<commit_msg>Add explicit submodule imports to resolve certain runtime import issues.<commit_after>from __future__ import absolute_import
# import submodules
from sc2reader import listeners, data, scripts, processors
from sc2reader import factories, log_utils
# setup the library logging
log_utils.setup()
# For backwards compatibility
SC2Reader = factories.SC2Factory
# Expose a nice module level interface
__defaultSC2Reader = factories.SC2Factory()
register_datapack = __defaultSC2Reader.register_datapack
register_listener = __defaultSC2Reader.register_listener
register_reader = __defaultSC2Reader.register_reader
get_listeners = __defaultSC2Reader.get_listeners
get_datapack = __defaultSC2Reader.get_datapack
get_reader = __defaultSC2Reader.get_reader
load_replays = __defaultSC2Reader.load_replays
load_replay = __defaultSC2Reader.load_replay
load_maps = __defaultSC2Reader.load_maps
load_map = __defaultSC2Reader.load_map
configure = __defaultSC2Reader.configure
reset = __defaultSC2Reader.reset
|
02197b1fcaafb3bec1437a093b1c4170c82b55de
|
scripts/vac-load.py
|
scripts/vac-load.py
|
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import translate
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: vac-load.py <input> <output>", file=sys.stderr)
exit(-1)
spark = SparkSession.builder.appName('Load VAC JSON').getOrCreate()
spark.sparkContext._jsc.hadoopConfiguration()\
.set('mapreduce.input.fileinputformat.input.dir.recursive', 'true')
spark.read.json(sys.argv[1]) \
.withColumnRenamed('author', 'creator') \
.withColumn('text', translate('text', '\r', '\n')) \
.repartition(50) \
.write.save(sys.argv[2])
spark.stop()
|
Load Wright fiction JSON format.
|
Load Wright fiction JSON format.
|
Python
|
apache-2.0
|
ViralTexts/vt-passim,ViralTexts/vt-passim,ViralTexts/vt-passim
|
Load Wright fiction JSON format.
|
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import translate
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: vac-load.py <input> <output>", file=sys.stderr)
exit(-1)
spark = SparkSession.builder.appName('Load VAC JSON').getOrCreate()
spark.sparkContext._jsc.hadoopConfiguration()\
.set('mapreduce.input.fileinputformat.input.dir.recursive', 'true')
spark.read.json(sys.argv[1]) \
.withColumnRenamed('author', 'creator') \
.withColumn('text', translate('text', '\r', '\n')) \
.repartition(50) \
.write.save(sys.argv[2])
spark.stop()
|
<commit_before><commit_msg>Load Wright fiction JSON format.<commit_after>
|
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import translate
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: vac-load.py <input> <output>", file=sys.stderr)
exit(-1)
spark = SparkSession.builder.appName('Load VAC JSON').getOrCreate()
spark.sparkContext._jsc.hadoopConfiguration()\
.set('mapreduce.input.fileinputformat.input.dir.recursive', 'true')
spark.read.json(sys.argv[1]) \
.withColumnRenamed('author', 'creator') \
.withColumn('text', translate('text', '\r', '\n')) \
.repartition(50) \
.write.save(sys.argv[2])
spark.stop()
|
Load Wright fiction JSON format.from __future__ import print_function
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import translate
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: vac-load.py <input> <output>", file=sys.stderr)
exit(-1)
spark = SparkSession.builder.appName('Load VAC JSON').getOrCreate()
spark.sparkContext._jsc.hadoopConfiguration()\
.set('mapreduce.input.fileinputformat.input.dir.recursive', 'true')
spark.read.json(sys.argv[1]) \
.withColumnRenamed('author', 'creator') \
.withColumn('text', translate('text', '\r', '\n')) \
.repartition(50) \
.write.save(sys.argv[2])
spark.stop()
|
<commit_before><commit_msg>Load Wright fiction JSON format.<commit_after>from __future__ import print_function
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import translate
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: vac-load.py <input> <output>", file=sys.stderr)
exit(-1)
spark = SparkSession.builder.appName('Load VAC JSON').getOrCreate()
spark.sparkContext._jsc.hadoopConfiguration()\
.set('mapreduce.input.fileinputformat.input.dir.recursive', 'true')
spark.read.json(sys.argv[1]) \
.withColumnRenamed('author', 'creator') \
.withColumn('text', translate('text', '\r', '\n')) \
.repartition(50) \
.write.save(sys.argv[2])
spark.stop()
|
|
2ae585068ea0ba0501e28c27c56bf45359459540
|
py/remove-boxes.py
|
py/remove-boxes.py
|
from collections import defaultdict, Counter
from itertools import groupby
class Solution(object):
def removeBoxes(self, boxes):
"""
:type boxes: List[int]
:rtype: int
"""
table = defaultdict(Counter)
table_max = Counter()
B = []
for k, l in groupby(boxes):
B.append((k, len(list(l))))
lB = len(B)
for i in xrange(lB):
table[i, i + 1][B[i][1]] = 0
table_max[i, i + 1] = B[i][1] ** 2
for l in xrange(2, lB + 1):
for i in xrange(lB - l + 1):
fr, to = i, i + l
table_fr_to = table[fr, to]
size = B[fr][1]
table_fr_to[size] = max(table_fr_to[size], table_max[fr + 1, to])
table_max[fr, to] = max(table_max[fr, to], table_fr_to[size] + size ** 2)
for sp in xrange(fr + 1, to):
if B[fr][0] == B[sp][0]:
size_score_l = table[fr, sp]
size_score_r = table[sp, to]
max_size, max_score = 0, 0
size_scores = []
for size_l, score_l in size_score_l.iteritems():
for size_r, score_r in size_score_r.iteritems():
size_scores.append((size_l + size_r, score_l + score_r))
size_scores.sort(key=lambda (size, score): (-size, -score))
out_size_scores = []
for size, score in size_scores:
if not out_size_scores:
out_size_scores.append((size, score))
elif score > out_size_scores[-1][1]:
out_size_scores.append((size, score))
for size, score in out_size_scores:
table_fr_to[size] = max(table_fr_to[size], score)
table_max[fr, to] = max(table_max[fr, to], table_fr_to[size] + size ** 2)
return table_max[0, lB]
|
Add py solution for 546. Remove Boxes
|
Add py solution for 546. Remove Boxes
546. Remove Boxes: https://leetcode.com/problems/remove-boxes/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 546. Remove Boxes
546. Remove Boxes: https://leetcode.com/problems/remove-boxes/
|
from collections import defaultdict, Counter
from itertools import groupby
class Solution(object):
def removeBoxes(self, boxes):
"""
:type boxes: List[int]
:rtype: int
"""
table = defaultdict(Counter)
table_max = Counter()
B = []
for k, l in groupby(boxes):
B.append((k, len(list(l))))
lB = len(B)
for i in xrange(lB):
table[i, i + 1][B[i][1]] = 0
table_max[i, i + 1] = B[i][1] ** 2
for l in xrange(2, lB + 1):
for i in xrange(lB - l + 1):
fr, to = i, i + l
table_fr_to = table[fr, to]
size = B[fr][1]
table_fr_to[size] = max(table_fr_to[size], table_max[fr + 1, to])
table_max[fr, to] = max(table_max[fr, to], table_fr_to[size] + size ** 2)
for sp in xrange(fr + 1, to):
if B[fr][0] == B[sp][0]:
size_score_l = table[fr, sp]
size_score_r = table[sp, to]
max_size, max_score = 0, 0
size_scores = []
for size_l, score_l in size_score_l.iteritems():
for size_r, score_r in size_score_r.iteritems():
size_scores.append((size_l + size_r, score_l + score_r))
size_scores.sort(key=lambda (size, score): (-size, -score))
out_size_scores = []
for size, score in size_scores:
if not out_size_scores:
out_size_scores.append((size, score))
elif score > out_size_scores[-1][1]:
out_size_scores.append((size, score))
for size, score in out_size_scores:
table_fr_to[size] = max(table_fr_to[size], score)
table_max[fr, to] = max(table_max[fr, to], table_fr_to[size] + size ** 2)
return table_max[0, lB]
|
<commit_before><commit_msg>Add py solution for 546. Remove Boxes
546. Remove Boxes: https://leetcode.com/problems/remove-boxes/<commit_after>
|
from collections import defaultdict, Counter
from itertools import groupby
class Solution(object):
def removeBoxes(self, boxes):
"""
:type boxes: List[int]
:rtype: int
"""
table = defaultdict(Counter)
table_max = Counter()
B = []
for k, l in groupby(boxes):
B.append((k, len(list(l))))
lB = len(B)
for i in xrange(lB):
table[i, i + 1][B[i][1]] = 0
table_max[i, i + 1] = B[i][1] ** 2
for l in xrange(2, lB + 1):
for i in xrange(lB - l + 1):
fr, to = i, i + l
table_fr_to = table[fr, to]
size = B[fr][1]
table_fr_to[size] = max(table_fr_to[size], table_max[fr + 1, to])
table_max[fr, to] = max(table_max[fr, to], table_fr_to[size] + size ** 2)
for sp in xrange(fr + 1, to):
if B[fr][0] == B[sp][0]:
size_score_l = table[fr, sp]
size_score_r = table[sp, to]
max_size, max_score = 0, 0
size_scores = []
for size_l, score_l in size_score_l.iteritems():
for size_r, score_r in size_score_r.iteritems():
size_scores.append((size_l + size_r, score_l + score_r))
size_scores.sort(key=lambda (size, score): (-size, -score))
out_size_scores = []
for size, score in size_scores:
if not out_size_scores:
out_size_scores.append((size, score))
elif score > out_size_scores[-1][1]:
out_size_scores.append((size, score))
for size, score in out_size_scores:
table_fr_to[size] = max(table_fr_to[size], score)
table_max[fr, to] = max(table_max[fr, to], table_fr_to[size] + size ** 2)
return table_max[0, lB]
|
Add py solution for 546. Remove Boxes
546. Remove Boxes: https://leetcode.com/problems/remove-boxes/from collections import defaultdict, Counter
from itertools import groupby
class Solution(object):
def removeBoxes(self, boxes):
"""
:type boxes: List[int]
:rtype: int
"""
table = defaultdict(Counter)
table_max = Counter()
B = []
for k, l in groupby(boxes):
B.append((k, len(list(l))))
lB = len(B)
for i in xrange(lB):
table[i, i + 1][B[i][1]] = 0
table_max[i, i + 1] = B[i][1] ** 2
for l in xrange(2, lB + 1):
for i in xrange(lB - l + 1):
fr, to = i, i + l
table_fr_to = table[fr, to]
size = B[fr][1]
table_fr_to[size] = max(table_fr_to[size], table_max[fr + 1, to])
table_max[fr, to] = max(table_max[fr, to], table_fr_to[size] + size ** 2)
for sp in xrange(fr + 1, to):
if B[fr][0] == B[sp][0]:
size_score_l = table[fr, sp]
size_score_r = table[sp, to]
max_size, max_score = 0, 0
size_scores = []
for size_l, score_l in size_score_l.iteritems():
for size_r, score_r in size_score_r.iteritems():
size_scores.append((size_l + size_r, score_l + score_r))
size_scores.sort(key=lambda (size, score): (-size, -score))
out_size_scores = []
for size, score in size_scores:
if not out_size_scores:
out_size_scores.append((size, score))
elif score > out_size_scores[-1][1]:
out_size_scores.append((size, score))
for size, score in out_size_scores:
table_fr_to[size] = max(table_fr_to[size], score)
table_max[fr, to] = max(table_max[fr, to], table_fr_to[size] + size ** 2)
return table_max[0, lB]
|
<commit_before><commit_msg>Add py solution for 546. Remove Boxes
546. Remove Boxes: https://leetcode.com/problems/remove-boxes/<commit_after>from collections import defaultdict, Counter
from itertools import groupby
class Solution(object):
def removeBoxes(self, boxes):
"""
:type boxes: List[int]
:rtype: int
"""
table = defaultdict(Counter)
table_max = Counter()
B = []
for k, l in groupby(boxes):
B.append((k, len(list(l))))
lB = len(B)
for i in xrange(lB):
table[i, i + 1][B[i][1]] = 0
table_max[i, i + 1] = B[i][1] ** 2
for l in xrange(2, lB + 1):
for i in xrange(lB - l + 1):
fr, to = i, i + l
table_fr_to = table[fr, to]
size = B[fr][1]
table_fr_to[size] = max(table_fr_to[size], table_max[fr + 1, to])
table_max[fr, to] = max(table_max[fr, to], table_fr_to[size] + size ** 2)
for sp in xrange(fr + 1, to):
if B[fr][0] == B[sp][0]:
size_score_l = table[fr, sp]
size_score_r = table[sp, to]
max_size, max_score = 0, 0
size_scores = []
for size_l, score_l in size_score_l.iteritems():
for size_r, score_r in size_score_r.iteritems():
size_scores.append((size_l + size_r, score_l + score_r))
size_scores.sort(key=lambda (size, score): (-size, -score))
out_size_scores = []
for size, score in size_scores:
if not out_size_scores:
out_size_scores.append((size, score))
elif score > out_size_scores[-1][1]:
out_size_scores.append((size, score))
for size, score in out_size_scores:
table_fr_to[size] = max(table_fr_to[size], score)
table_max[fr, to] = max(table_max[fr, to], table_fr_to[size] + size ** 2)
return table_max[0, lB]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.