commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
07f64b443ad10b0bbb3740c1eb3e7a0a7bcfd001
|
skimage/viewer/tests/test_viewer.py
|
skimage/viewer/tests/test_viewer.py
|
import skimage.data as data
from skimage.viewer import ImageViewer
from numpy.testing import assert_equal, assert_allclose
def setup_line_profile(image):
from skimage.viewer.plugins.lineprofile import LineProfile
viewer = ImageViewer(image)
plugin = LineProfile()
viewer += plugin
return plugin
def test_line_profile():
""" Test a line profile using an ndim=2 image"""
plugin = setup_line_profile(data.camera())
line_image, scan_data = plugin.output()
for inp in [line_image.nonzero()[0].size,
line_image.sum() / line_image.max(),
scan_data.size]:
assert_equal(inp, 172)
assert_equal(line_image.shape, (512, 512))
assert_equal(scan_data.max(), 234.0)
assert_allclose(scan_data.mean(), 71.726744186046517)
def test_line_profile_rgb():
""" Test a line profile using an ndim=3 image"""
plugin = setup_line_profile(data.chelsea())
for i in range(6):
plugin.line_tool._thicken_scan_line()
line_image, scan_data = plugin.output()
assert_equal(line_image[line_image == 128].size, 906)
assert_equal(line_image[line_image == 255].size, 151)
assert_equal(line_image.shape, (300, 451))
assert_equal(scan_data.shape, (151, 3))
assert_allclose(scan_data.max(), 196.85714285714286)
assert_allclose(scan_data.mean(), 111.17029328287606)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
Add test for LineProfiler tool
|
Add test for LineProfiler tool
|
Python
|
bsd-3-clause
|
robintw/scikit-image,Britefury/scikit-image,SamHames/scikit-image,michaelpacer/scikit-image,GaZ3ll3/scikit-image,chintak/scikit-image,youprofit/scikit-image,oew1v07/scikit-image,newville/scikit-image,emon10005/scikit-image,bennlich/scikit-image,chriscrosscutler/scikit-image,pratapvardhan/scikit-image,juliusbierk/scikit-image,dpshelio/scikit-image,rjeli/scikit-image,Midafi/scikit-image,michaelaye/scikit-image,youprofit/scikit-image,blink1073/scikit-image,Hiyorimi/scikit-image,robintw/scikit-image,newville/scikit-image,paalge/scikit-image,Midafi/scikit-image,blink1073/scikit-image,rjeli/scikit-image,michaelaye/scikit-image,bennlich/scikit-image,vighneshbirodkar/scikit-image,michaelpacer/scikit-image,ajaybhat/scikit-image,ofgulban/scikit-image,ofgulban/scikit-image,paalge/scikit-image,jwiggins/scikit-image,SamHames/scikit-image,jwiggins/scikit-image,WarrenWeckesser/scikits-image,pratapvardhan/scikit-image,keflavich/scikit-image,GaZ3ll3/scikit-image,chintak/scikit-image,keflavich/scikit-image,chintak/scikit-image,paalge/scikit-image,warmspringwinds/scikit-image,SamHames/scikit-image,ClinicalGraphics/scikit-image,emon10005/scikit-image,rjeli/scikit-image,chriscrosscutler/scikit-image,vighneshbirodkar/scikit-image,bsipocz/scikit-image,juliusbierk/scikit-image,ofgulban/scikit-image,dpshelio/scikit-image,ajaybhat/scikit-image,Britefury/scikit-image,warmspringwinds/scikit-image,oew1v07/scikit-image,bsipocz/scikit-image,vighneshbirodkar/scikit-image,WarrenWeckesser/scikits-image,Hiyorimi/scikit-image,ClinicalGraphics/scikit-image,SamHames/scikit-image,chintak/scikit-image
|
Add test for LineProfiler tool
|
import skimage.data as data
from skimage.viewer import ImageViewer
from numpy.testing import assert_equal, assert_allclose
def setup_line_profile(image):
from skimage.viewer.plugins.lineprofile import LineProfile
viewer = ImageViewer(image)
plugin = LineProfile()
viewer += plugin
return plugin
def test_line_profile():
""" Test a line profile using an ndim=2 image"""
plugin = setup_line_profile(data.camera())
line_image, scan_data = plugin.output()
for inp in [line_image.nonzero()[0].size,
line_image.sum() / line_image.max(),
scan_data.size]:
assert_equal(inp, 172)
assert_equal(line_image.shape, (512, 512))
assert_equal(scan_data.max(), 234.0)
assert_allclose(scan_data.mean(), 71.726744186046517)
def test_line_profile_rgb():
""" Test a line profile using an ndim=3 image"""
plugin = setup_line_profile(data.chelsea())
for i in range(6):
plugin.line_tool._thicken_scan_line()
line_image, scan_data = plugin.output()
assert_equal(line_image[line_image == 128].size, 906)
assert_equal(line_image[line_image == 255].size, 151)
assert_equal(line_image.shape, (300, 451))
assert_equal(scan_data.shape, (151, 3))
assert_allclose(scan_data.max(), 196.85714285714286)
assert_allclose(scan_data.mean(), 111.17029328287606)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
<commit_before><commit_msg>Add test for LineProfiler tool<commit_after>
|
import skimage.data as data
from skimage.viewer import ImageViewer
from numpy.testing import assert_equal, assert_allclose
def setup_line_profile(image):
from skimage.viewer.plugins.lineprofile import LineProfile
viewer = ImageViewer(image)
plugin = LineProfile()
viewer += plugin
return plugin
def test_line_profile():
""" Test a line profile using an ndim=2 image"""
plugin = setup_line_profile(data.camera())
line_image, scan_data = plugin.output()
for inp in [line_image.nonzero()[0].size,
line_image.sum() / line_image.max(),
scan_data.size]:
assert_equal(inp, 172)
assert_equal(line_image.shape, (512, 512))
assert_equal(scan_data.max(), 234.0)
assert_allclose(scan_data.mean(), 71.726744186046517)
def test_line_profile_rgb():
""" Test a line profile using an ndim=3 image"""
plugin = setup_line_profile(data.chelsea())
for i in range(6):
plugin.line_tool._thicken_scan_line()
line_image, scan_data = plugin.output()
assert_equal(line_image[line_image == 128].size, 906)
assert_equal(line_image[line_image == 255].size, 151)
assert_equal(line_image.shape, (300, 451))
assert_equal(scan_data.shape, (151, 3))
assert_allclose(scan_data.max(), 196.85714285714286)
assert_allclose(scan_data.mean(), 111.17029328287606)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
Add test for LineProfiler toolimport skimage.data as data
from skimage.viewer import ImageViewer
from numpy.testing import assert_equal, assert_allclose
def setup_line_profile(image):
from skimage.viewer.plugins.lineprofile import LineProfile
viewer = ImageViewer(image)
plugin = LineProfile()
viewer += plugin
return plugin
def test_line_profile():
""" Test a line profile using an ndim=2 image"""
plugin = setup_line_profile(data.camera())
line_image, scan_data = plugin.output()
for inp in [line_image.nonzero()[0].size,
line_image.sum() / line_image.max(),
scan_data.size]:
assert_equal(inp, 172)
assert_equal(line_image.shape, (512, 512))
assert_equal(scan_data.max(), 234.0)
assert_allclose(scan_data.mean(), 71.726744186046517)
def test_line_profile_rgb():
""" Test a line profile using an ndim=3 image"""
plugin = setup_line_profile(data.chelsea())
for i in range(6):
plugin.line_tool._thicken_scan_line()
line_image, scan_data = plugin.output()
assert_equal(line_image[line_image == 128].size, 906)
assert_equal(line_image[line_image == 255].size, 151)
assert_equal(line_image.shape, (300, 451))
assert_equal(scan_data.shape, (151, 3))
assert_allclose(scan_data.max(), 196.85714285714286)
assert_allclose(scan_data.mean(), 111.17029328287606)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
<commit_before><commit_msg>Add test for LineProfiler tool<commit_after>import skimage.data as data
from skimage.viewer import ImageViewer
from numpy.testing import assert_equal, assert_allclose
def setup_line_profile(image):
from skimage.viewer.plugins.lineprofile import LineProfile
viewer = ImageViewer(image)
plugin = LineProfile()
viewer += plugin
return plugin
def test_line_profile():
""" Test a line profile using an ndim=2 image"""
plugin = setup_line_profile(data.camera())
line_image, scan_data = plugin.output()
for inp in [line_image.nonzero()[0].size,
line_image.sum() / line_image.max(),
scan_data.size]:
assert_equal(inp, 172)
assert_equal(line_image.shape, (512, 512))
assert_equal(scan_data.max(), 234.0)
assert_allclose(scan_data.mean(), 71.726744186046517)
def test_line_profile_rgb():
""" Test a line profile using an ndim=3 image"""
plugin = setup_line_profile(data.chelsea())
for i in range(6):
plugin.line_tool._thicken_scan_line()
line_image, scan_data = plugin.output()
assert_equal(line_image[line_image == 128].size, 906)
assert_equal(line_image[line_image == 255].size, 151)
assert_equal(line_image.shape, (300, 451))
assert_equal(scan_data.shape, (151, 3))
assert_allclose(scan_data.max(), 196.85714285714286)
assert_allclose(scan_data.mean(), 111.17029328287606)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
|
27977e3facfef020d56192beb2cb1b4fc87a57fa
|
src/text-extract/test.py
|
src/text-extract/test.py
|
#!/usr/bin/env python
fh = open('/home/ben/dump.txt', 'w')
import sys
for ln in sys.stdin:
print >> fh, "here"
print >> fh, ln
print >> fh, "input closed"
fh.close()
|
Test for lisp external-program functionality
|
Test for lisp external-program functionality
|
Python
|
apache-2.0
|
BnMcGn/warflagger,BnMcGn/warflagger,BnMcGn/warflagger,BnMcGn/warflagger
|
Test for lisp external-program functionality
|
#!/usr/bin/env python
fh = open('/home/ben/dump.txt', 'w')
import sys
for ln in sys.stdin:
print >> fh, "here"
print >> fh, ln
print >> fh, "input closed"
fh.close()
|
<commit_before><commit_msg>Test for lisp external-program functionality<commit_after>
|
#!/usr/bin/env python
fh = open('/home/ben/dump.txt', 'w')
import sys
for ln in sys.stdin:
print >> fh, "here"
print >> fh, ln
print >> fh, "input closed"
fh.close()
|
Test for lisp external-program functionality#!/usr/bin/env python
fh = open('/home/ben/dump.txt', 'w')
import sys
for ln in sys.stdin:
print >> fh, "here"
print >> fh, ln
print >> fh, "input closed"
fh.close()
|
<commit_before><commit_msg>Test for lisp external-program functionality<commit_after>#!/usr/bin/env python
fh = open('/home/ben/dump.txt', 'w')
import sys
for ln in sys.stdin:
print >> fh, "here"
print >> fh, ln
print >> fh, "input closed"
fh.close()
|
|
c1aecc68a1561506dff3295536425b92e549fc5a
|
appengine_django/db/creation.py
|
appengine_django/db/creation.py
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
settings.DATABASE_SUPPORTS_TRANSACTIONS = False
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
logging.debug("Destroyed test datastore")
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
# Only needed for Django 1.1, deprecated @ 1.2.
settings.DATABASE_SUPPORTS_TRANSACTIONS = False
self.connection.settings_dict['SUPPORTS_TRANSACTIONS'] = False
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
logging.debug("Destroyed test datastore")
|
Update SUPPORTS_TRANSACTIONS attribute to what is expected by Django 1.2.
|
Update SUPPORTS_TRANSACTIONS attribute to what is expected by Django 1.2.
Patch contributed by Felix Leong. Thanks.
Fixes Issue #162.
|
Python
|
apache-2.0
|
ekkleesia3/google-app-engine-django,hitrust/google-app-engine-django,jackxiang/google-app-engine-django,brian-zhao/google-app-engine-django
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
settings.DATABASE_SUPPORTS_TRANSACTIONS = False
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
logging.debug("Destroyed test datastore")
Update SUPPORTS_TRANSACTIONS attribute to what is expected by Django 1.2.
Patch contributed by Felix Leong. Thanks.
Fixes Issue #162.
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
# Only needed for Django 1.1, deprecated @ 1.2.
settings.DATABASE_SUPPORTS_TRANSACTIONS = False
self.connection.settings_dict['SUPPORTS_TRANSACTIONS'] = False
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
logging.debug("Destroyed test datastore")
|
<commit_before>#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
settings.DATABASE_SUPPORTS_TRANSACTIONS = False
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
logging.debug("Destroyed test datastore")
<commit_msg>Update SUPPORTS_TRANSACTIONS attribute to what is expected by Django 1.2.
Patch contributed by Felix Leong. Thanks.
Fixes Issue #162.<commit_after>
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
# Only needed for Django 1.1, deprecated @ 1.2.
settings.DATABASE_SUPPORTS_TRANSACTIONS = False
self.connection.settings_dict['SUPPORTS_TRANSACTIONS'] = False
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
logging.debug("Destroyed test datastore")
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
settings.DATABASE_SUPPORTS_TRANSACTIONS = False
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
logging.debug("Destroyed test datastore")
Update SUPPORTS_TRANSACTIONS attribute to what is expected by Django 1.2.
Patch contributed by Felix Leong. Thanks.
Fixes Issue #162.#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
# Only needed for Django 1.1, deprecated @ 1.2.
settings.DATABASE_SUPPORTS_TRANSACTIONS = False
self.connection.settings_dict['SUPPORTS_TRANSACTIONS'] = False
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
logging.debug("Destroyed test datastore")
|
<commit_before>#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
settings.DATABASE_SUPPORTS_TRANSACTIONS = False
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
logging.debug("Destroyed test datastore")
<commit_msg>Update SUPPORTS_TRANSACTIONS attribute to what is expected by Django 1.2.
Patch contributed by Felix Leong. Thanks.
Fixes Issue #162.<commit_after>#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
# Only needed for Django 1.1, deprecated @ 1.2.
settings.DATABASE_SUPPORTS_TRANSACTIONS = False
self.connection.settings_dict['SUPPORTS_TRANSACTIONS'] = False
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
logging.debug("Destroyed test datastore")
|
452c208a9be68f2fbfe513a15bdc94ed8fbb863f
|
util/hgfilesize.py
|
util/hgfilesize.py
|
from mercurial import context
from mercurial.i18n import _
'''
[extensions]
hgfilesize=~/m5/incoming/util/hgfilesize.py
[hooks]
pretxncommit = python:hgfilesize.limit_file_size
pretxnchangegroup = python:hgfilesize.limit_file_size
[limit_file_size]
maximum_file_size = 200000
'''
def limit_file_size(ui, repo, node=None, **kwargs):
'''forbid files over a given size'''
# default limit is 1 MB
limit = int(ui.config('limit_file_size', 'maximum_file_size', 1024*1024))
existing_tip = context.changectx(repo, node).rev()
new_tip = context.changectx(repo, 'tip').rev()
for rev in xrange(existing_tip, new_tip + 1):
ctx = context.changectx(repo, rev)
for f in ctx.files():
fctx = ctx.filectx(f)
if fctx.size() > limit:
ui.write(_('file %s of %s is too large: %d > %d\n') % \
(f, ctx, fctx.size(), limit))
return True # This is invalid
return False # Things are OK.
|
Add a hook to limit the size of any individual file
|
hooks: Add a hook to limit the size of any individual file
|
Python
|
bsd-3-clause
|
LingxiaoJIA/gem5,LingxiaoJIA/gem5,haowu4682/gem5,haowu4682/gem5,haowu4682/gem5,LingxiaoJIA/gem5,haowu4682/gem5,haowu4682/gem5,haowu4682/gem5,haowu4682/gem5,LingxiaoJIA/gem5,LingxiaoJIA/gem5,LingxiaoJIA/gem5,haowu4682/gem5,LingxiaoJIA/gem5,haowu4682/gem5
|
hooks: Add a hook to limit the size of any individual file
|
from mercurial import context
from mercurial.i18n import _
'''
[extensions]
hgfilesize=~/m5/incoming/util/hgfilesize.py
[hooks]
pretxncommit = python:hgfilesize.limit_file_size
pretxnchangegroup = python:hgfilesize.limit_file_size
[limit_file_size]
maximum_file_size = 200000
'''
def limit_file_size(ui, repo, node=None, **kwargs):
'''forbid files over a given size'''
# default limit is 1 MB
limit = int(ui.config('limit_file_size', 'maximum_file_size', 1024*1024))
existing_tip = context.changectx(repo, node).rev()
new_tip = context.changectx(repo, 'tip').rev()
for rev in xrange(existing_tip, new_tip + 1):
ctx = context.changectx(repo, rev)
for f in ctx.files():
fctx = ctx.filectx(f)
if fctx.size() > limit:
ui.write(_('file %s of %s is too large: %d > %d\n') % \
(f, ctx, fctx.size(), limit))
return True # This is invalid
return False # Things are OK.
|
<commit_before><commit_msg>hooks: Add a hook to limit the size of any individual file<commit_after>
|
from mercurial import context
from mercurial.i18n import _
'''
[extensions]
hgfilesize=~/m5/incoming/util/hgfilesize.py
[hooks]
pretxncommit = python:hgfilesize.limit_file_size
pretxnchangegroup = python:hgfilesize.limit_file_size
[limit_file_size]
maximum_file_size = 200000
'''
def limit_file_size(ui, repo, node=None, **kwargs):
'''forbid files over a given size'''
# default limit is 1 MB
limit = int(ui.config('limit_file_size', 'maximum_file_size', 1024*1024))
existing_tip = context.changectx(repo, node).rev()
new_tip = context.changectx(repo, 'tip').rev()
for rev in xrange(existing_tip, new_tip + 1):
ctx = context.changectx(repo, rev)
for f in ctx.files():
fctx = ctx.filectx(f)
if fctx.size() > limit:
ui.write(_('file %s of %s is too large: %d > %d\n') % \
(f, ctx, fctx.size(), limit))
return True # This is invalid
return False # Things are OK.
|
hooks: Add a hook to limit the size of any individual filefrom mercurial import context
from mercurial.i18n import _
'''
[extensions]
hgfilesize=~/m5/incoming/util/hgfilesize.py
[hooks]
pretxncommit = python:hgfilesize.limit_file_size
pretxnchangegroup = python:hgfilesize.limit_file_size
[limit_file_size]
maximum_file_size = 200000
'''
def limit_file_size(ui, repo, node=None, **kwargs):
'''forbid files over a given size'''
# default limit is 1 MB
limit = int(ui.config('limit_file_size', 'maximum_file_size', 1024*1024))
existing_tip = context.changectx(repo, node).rev()
new_tip = context.changectx(repo, 'tip').rev()
for rev in xrange(existing_tip, new_tip + 1):
ctx = context.changectx(repo, rev)
for f in ctx.files():
fctx = ctx.filectx(f)
if fctx.size() > limit:
ui.write(_('file %s of %s is too large: %d > %d\n') % \
(f, ctx, fctx.size(), limit))
return True # This is invalid
return False # Things are OK.
|
<commit_before><commit_msg>hooks: Add a hook to limit the size of any individual file<commit_after>from mercurial import context
from mercurial.i18n import _
'''
[extensions]
hgfilesize=~/m5/incoming/util/hgfilesize.py
[hooks]
pretxncommit = python:hgfilesize.limit_file_size
pretxnchangegroup = python:hgfilesize.limit_file_size
[limit_file_size]
maximum_file_size = 200000
'''
def limit_file_size(ui, repo, node=None, **kwargs):
'''forbid files over a given size'''
# default limit is 1 MB
limit = int(ui.config('limit_file_size', 'maximum_file_size', 1024*1024))
existing_tip = context.changectx(repo, node).rev()
new_tip = context.changectx(repo, 'tip').rev()
for rev in xrange(existing_tip, new_tip + 1):
ctx = context.changectx(repo, rev)
for f in ctx.files():
fctx = ctx.filectx(f)
if fctx.size() > limit:
ui.write(_('file %s of %s is too large: %d > %d\n') % \
(f, ctx, fctx.size(), limit))
return True # This is invalid
return False # Things are OK.
|
|
efabb671a359a98df2b739288be4fcd21f5d202c
|
package/migrations/0005_auto_20190927_1616.py
|
package/migrations/0005_auto_20190927_1616.py
|
# Generated by Django 2.2.5 on 2019-09-27 16:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('package', '0004_auto_20180214_0617'),
]
operations = [
migrations.AlterField(
model_name='package',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='package.Category', verbose_name='Installation'),
),
]
|
Add migrations that protects packages on category deletion
|
Add migrations that protects packages on category deletion
|
Python
|
mit
|
pydanny/djangopackages,pydanny/djangopackages,pydanny/djangopackages
|
Add migrations that protects packages on category deletion
|
# Generated by Django 2.2.5 on 2019-09-27 16:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('package', '0004_auto_20180214_0617'),
]
operations = [
migrations.AlterField(
model_name='package',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='package.Category', verbose_name='Installation'),
),
]
|
<commit_before><commit_msg>Add migrations that protects packages on category deletion<commit_after>
|
# Generated by Django 2.2.5 on 2019-09-27 16:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('package', '0004_auto_20180214_0617'),
]
operations = [
migrations.AlterField(
model_name='package',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='package.Category', verbose_name='Installation'),
),
]
|
Add migrations that protects packages on category deletion# Generated by Django 2.2.5 on 2019-09-27 16:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('package', '0004_auto_20180214_0617'),
]
operations = [
migrations.AlterField(
model_name='package',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='package.Category', verbose_name='Installation'),
),
]
|
<commit_before><commit_msg>Add migrations that protects packages on category deletion<commit_after># Generated by Django 2.2.5 on 2019-09-27 16:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('package', '0004_auto_20180214_0617'),
]
operations = [
migrations.AlterField(
model_name='package',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='package.Category', verbose_name='Installation'),
),
]
|
|
cbf1c788e2ff492c007e17be2b1efeaac42759f1
|
gobble/validation.py
|
gobble/validation.py
|
"""Validate data-packages"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from json import dumps
from datetime import datetime
from future import standard_library
from collections import OrderedDict
from datapackage import DataPackage
from jsonschema.exceptions import ValidationError
from gobble.config import VALIDATION_FEEDBACK
standard_library.install_aliases()
class Validator(object):
"""Validate a data-package
The :class:``Validator`` class is a thin wrapper around the
DataPackage class that produces a tunable, human readable report.
:type package: :class:`DataPackage`
:type feedback: :class:`list`
:param feedback: choose from 'message', 'cause', 'context',
'validator', 'validator_value', 'path',
'schema_path', 'instance', 'schema', 'parent'
"""
valid_feedback = {
'message', 'cause', 'context',
'validator', 'validator_value',
'path', 'schema_path', 'instance',
'schema', 'parent'
}
def __init__(self, package, *feedback):
bad_package = 'Package must be DataPackage class'
bad_feedback = 'Feedback must be %s' % str(self.valid_feedback)
assert isinstance(package, DataPackage), bad_package
assert set(feedback).issubset(self.valid_feedback), bad_feedback
self._feedback = feedback or VALIDATION_FEEDBACK
self._package = package
self._report = OrderedDict()
self.timestamp = str(datetime.now())
self._run()
@property
def report(self):
return self._report
@property
def name(self):
return self._package.metadata['name']
@property
def is_valid(self):
return self._report['is_valid']
@property
def result(self):
return 'success' if self.is_valid else 'fail'
def save(self, filepath):
if not filepath.endswith('.json'):
raise ValueError('Reports are JSON files')
with open(filepath, 'w+') as file:
file.write(dumps(self.report))
@property
def _package_info(self):
for attribute in self._package.required_attributes:
value = getattr(self._package.metadata, attribute, None)
yield attribute, value
@property
def _errors(self):
for error in self._package.iter_errors():
for choice in self._feedback:
yield getattr(error, choice)
def _run(self):
self._report.update(dict(is_valid=False, timestamp=self.timestamp))
self._report.update(dict(package_info=dict(self._package_info)))
try:
self._package.validate()
self._report.update(dict(is_valid=True))
except ValidationError:
self._report.update(dict(errors=list(self._errors)))
def __repr__(self):
return '<Validator %s: %s>' % (self.result.upper(), self.name)
|
Write the Validation class (wrapper around the DataPackage class).
|
Write the Validation class (wrapper around the DataPackage class).
|
Python
|
mit
|
openspending/gobble
|
Write the Validation class (wrapper around the DataPackage class).
|
"""Validate data-packages"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from json import dumps
from datetime import datetime
from future import standard_library
from collections import OrderedDict
from datapackage import DataPackage
from jsonschema.exceptions import ValidationError
from gobble.config import VALIDATION_FEEDBACK
standard_library.install_aliases()
class Validator(object):
"""Validate a data-package
The :class:``Validator`` class is a thin wrapper around the
DataPackage class that produces a tunable, human readable report.
:type package: :class:`DataPackage`
:type feedback: :class:`list`
:param feedback: choose from 'message', 'cause', 'context',
'validator', 'validator_value', 'path',
'schema_path', 'instance', 'schema', 'parent'
"""
valid_feedback = {
'message', 'cause', 'context',
'validator', 'validator_value',
'path', 'schema_path', 'instance',
'schema', 'parent'
}
def __init__(self, package, *feedback):
bad_package = 'Package must be DataPackage class'
bad_feedback = 'Feedback must be %s' % str(self.valid_feedback)
assert isinstance(package, DataPackage), bad_package
assert set(feedback).issubset(self.valid_feedback), bad_feedback
self._feedback = feedback or VALIDATION_FEEDBACK
self._package = package
self._report = OrderedDict()
self.timestamp = str(datetime.now())
self._run()
@property
def report(self):
return self._report
@property
def name(self):
return self._package.metadata['name']
@property
def is_valid(self):
return self._report['is_valid']
@property
def result(self):
return 'success' if self.is_valid else 'fail'
def save(self, filepath):
if not filepath.endswith('.json'):
raise ValueError('Reports are JSON files')
with open(filepath, 'w+') as file:
file.write(dumps(self.report))
@property
def _package_info(self):
for attribute in self._package.required_attributes:
value = getattr(self._package.metadata, attribute, None)
yield attribute, value
@property
def _errors(self):
for error in self._package.iter_errors():
for choice in self._feedback:
yield getattr(error, choice)
def _run(self):
self._report.update(dict(is_valid=False, timestamp=self.timestamp))
self._report.update(dict(package_info=dict(self._package_info)))
try:
self._package.validate()
self._report.update(dict(is_valid=True))
except ValidationError:
self._report.update(dict(errors=list(self._errors)))
def __repr__(self):
return '<Validator %s: %s>' % (self.result.upper(), self.name)
|
<commit_before><commit_msg>Write the Validation class (wrapper around the DataPackage class).<commit_after>
|
"""Validate data-packages"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from json import dumps
from datetime import datetime
from future import standard_library
from collections import OrderedDict
from datapackage import DataPackage
from jsonschema.exceptions import ValidationError
from gobble.config import VALIDATION_FEEDBACK
standard_library.install_aliases()
class Validator(object):
"""Validate a data-package
The :class:``Validator`` class is a thin wrapper around the
DataPackage class that produces a tunable, human readable report.
:type package: :class:`DataPackage`
:type feedback: :class:`list`
:param feedback: choose from 'message', 'cause', 'context',
'validator', 'validator_value', 'path',
'schema_path', 'instance', 'schema', 'parent'
"""
valid_feedback = {
'message', 'cause', 'context',
'validator', 'validator_value',
'path', 'schema_path', 'instance',
'schema', 'parent'
}
def __init__(self, package, *feedback):
bad_package = 'Package must be DataPackage class'
bad_feedback = 'Feedback must be %s' % str(self.valid_feedback)
assert isinstance(package, DataPackage), bad_package
assert set(feedback).issubset(self.valid_feedback), bad_feedback
self._feedback = feedback or VALIDATION_FEEDBACK
self._package = package
self._report = OrderedDict()
self.timestamp = str(datetime.now())
self._run()
@property
def report(self):
return self._report
@property
def name(self):
return self._package.metadata['name']
@property
def is_valid(self):
return self._report['is_valid']
@property
def result(self):
return 'success' if self.is_valid else 'fail'
def save(self, filepath):
if not filepath.endswith('.json'):
raise ValueError('Reports are JSON files')
with open(filepath, 'w+') as file:
file.write(dumps(self.report))
@property
def _package_info(self):
for attribute in self._package.required_attributes:
value = getattr(self._package.metadata, attribute, None)
yield attribute, value
@property
def _errors(self):
for error in self._package.iter_errors():
for choice in self._feedback:
yield getattr(error, choice)
def _run(self):
self._report.update(dict(is_valid=False, timestamp=self.timestamp))
self._report.update(dict(package_info=dict(self._package_info)))
try:
self._package.validate()
self._report.update(dict(is_valid=True))
except ValidationError:
self._report.update(dict(errors=list(self._errors)))
def __repr__(self):
return '<Validator %s: %s>' % (self.result.upper(), self.name)
|
Write the Validation class (wrapper around the DataPackage class)."""Validate data-packages"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from json import dumps
from datetime import datetime
from future import standard_library
from collections import OrderedDict
from datapackage import DataPackage
from jsonschema.exceptions import ValidationError
from gobble.config import VALIDATION_FEEDBACK
standard_library.install_aliases()
class Validator(object):
"""Validate a data-package
The :class:``Validator`` class is a thin wrapper around the
DataPackage class that produces a tunable, human readable report.
:type package: :class:`DataPackage`
:type feedback: :class:`list`
:param feedback: choose from 'message', 'cause', 'context',
'validator', 'validator_value', 'path',
'schema_path', 'instance', 'schema', 'parent'
"""
valid_feedback = {
'message', 'cause', 'context',
'validator', 'validator_value',
'path', 'schema_path', 'instance',
'schema', 'parent'
}
def __init__(self, package, *feedback):
bad_package = 'Package must be DataPackage class'
bad_feedback = 'Feedback must be %s' % str(self.valid_feedback)
assert isinstance(package, DataPackage), bad_package
assert set(feedback).issubset(self.valid_feedback), bad_feedback
self._feedback = feedback or VALIDATION_FEEDBACK
self._package = package
self._report = OrderedDict()
self.timestamp = str(datetime.now())
self._run()
@property
def report(self):
return self._report
@property
def name(self):
return self._package.metadata['name']
@property
def is_valid(self):
return self._report['is_valid']
@property
def result(self):
return 'success' if self.is_valid else 'fail'
def save(self, filepath):
if not filepath.endswith('.json'):
raise ValueError('Reports are JSON files')
with open(filepath, 'w+') as file:
file.write(dumps(self.report))
@property
def _package_info(self):
for attribute in self._package.required_attributes:
value = getattr(self._package.metadata, attribute, None)
yield attribute, value
@property
def _errors(self):
for error in self._package.iter_errors():
for choice in self._feedback:
yield getattr(error, choice)
def _run(self):
self._report.update(dict(is_valid=False, timestamp=self.timestamp))
self._report.update(dict(package_info=dict(self._package_info)))
try:
self._package.validate()
self._report.update(dict(is_valid=True))
except ValidationError:
self._report.update(dict(errors=list(self._errors)))
def __repr__(self):
return '<Validator %s: %s>' % (self.result.upper(), self.name)
|
<commit_before><commit_msg>Write the Validation class (wrapper around the DataPackage class).<commit_after>"""Validate data-packages"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from json import dumps
from datetime import datetime
from future import standard_library
from collections import OrderedDict
from datapackage import DataPackage
from jsonschema.exceptions import ValidationError
from gobble.config import VALIDATION_FEEDBACK
standard_library.install_aliases()
class Validator(object):
"""Validate a data-package
The :class:``Validator`` class is a thin wrapper around the
DataPackage class that produces a tunable, human readable report.
:type package: :class:`DataPackage`
:type feedback: :class:`list`
:param feedback: choose from 'message', 'cause', 'context',
'validator', 'validator_value', 'path',
'schema_path', 'instance', 'schema', 'parent'
"""
valid_feedback = {
'message', 'cause', 'context',
'validator', 'validator_value',
'path', 'schema_path', 'instance',
'schema', 'parent'
}
def __init__(self, package, *feedback):
bad_package = 'Package must be DataPackage class'
bad_feedback = 'Feedback must be %s' % str(self.valid_feedback)
assert isinstance(package, DataPackage), bad_package
assert set(feedback).issubset(self.valid_feedback), bad_feedback
self._feedback = feedback or VALIDATION_FEEDBACK
self._package = package
self._report = OrderedDict()
self.timestamp = str(datetime.now())
self._run()
@property
def report(self):
return self._report
@property
def name(self):
return self._package.metadata['name']
@property
def is_valid(self):
return self._report['is_valid']
@property
def result(self):
return 'success' if self.is_valid else 'fail'
def save(self, filepath):
if not filepath.endswith('.json'):
raise ValueError('Reports are JSON files')
with open(filepath, 'w+') as file:
file.write(dumps(self.report))
@property
def _package_info(self):
for attribute in self._package.required_attributes:
value = getattr(self._package.metadata, attribute, None)
yield attribute, value
@property
def _errors(self):
for error in self._package.iter_errors():
for choice in self._feedback:
yield getattr(error, choice)
def _run(self):
self._report.update(dict(is_valid=False, timestamp=self.timestamp))
self._report.update(dict(package_info=dict(self._package_info)))
try:
self._package.validate()
self._report.update(dict(is_valid=True))
except ValidationError:
self._report.update(dict(errors=list(self._errors)))
def __repr__(self):
return '<Validator %s: %s>' % (self.result.upper(), self.name)
|
|
614cdfc97698204c07e172d09ad99ce45e8c3210
|
genealogio/migrations/0004_auto_20150220_1242.py
|
genealogio/migrations/0004_auto_20150220_1242.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0003_auto_20150220_0733'),
]
operations = [
migrations.AlterModelOptions(
name='family',
options={'ordering': ('name',), 'verbose_name': 'Familie', 'verbose_name_plural': 'Familien'},
),
]
|
Add ordering by family name to Family model.
|
Add ordering by family name to Family model.
|
Python
|
bsd-3-clause
|
ugoertz/django-familio,ugoertz/django-familio,ugoertz/django-familio,ugoertz/django-familio
|
Add ordering by family name to Family model.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0003_auto_20150220_0733'),
]
operations = [
migrations.AlterModelOptions(
name='family',
options={'ordering': ('name',), 'verbose_name': 'Familie', 'verbose_name_plural': 'Familien'},
),
]
|
<commit_before><commit_msg>Add ordering by family name to Family model.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0003_auto_20150220_0733'),
]
operations = [
migrations.AlterModelOptions(
name='family',
options={'ordering': ('name',), 'verbose_name': 'Familie', 'verbose_name_plural': 'Familien'},
),
]
|
Add ordering by family name to Family model.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0003_auto_20150220_0733'),
]
operations = [
migrations.AlterModelOptions(
name='family',
options={'ordering': ('name',), 'verbose_name': 'Familie', 'verbose_name_plural': 'Familien'},
),
]
|
<commit_before><commit_msg>Add ordering by family name to Family model.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0003_auto_20150220_0733'),
]
operations = [
migrations.AlterModelOptions(
name='family',
options={'ordering': ('name',), 'verbose_name': 'Familie', 'verbose_name_plural': 'Familien'},
),
]
|
|
6a8bf7122f57fc80290acbd61d7e14d4c1742fe9
|
tests/parser/test_shadow_stdout.py
|
tests/parser/test_shadow_stdout.py
|
import pytest
import parser.log_file.shadow_stdout
good_shadow_text = '''root:!:16179:0:99999:7:::
vagrant:$6$5M8f9rEy$nFWJvEnn2KFQwFsm6oRMyxva3mVixbyxZIE3cYTJ.ARFMt6Nq6gsnqScUkZ/slZ8tQzhZovx1M2CnmSsF71JA1:16179:0:99999:7:::
vboxadd:!:16179::::::
'''
@pytest.fixture(scope='function')
def good_shadow(tmpdir):
p = tmpdir.join('shadow.log')
p.write(good_shadow_text)
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
o.parse()
return o
def test_length(good_shadow):
assert len(good_shadow.users) is 3
def test_user_name(good_shadow):
assert 'root' in good_shadow.users
@pytest.fixture(scope='function')
def root_user(good_shadow):
return good_shadow.users['root'].shadow
def test_good_user_password(root_user):
assert root_user.password == '!'
def test_good_user_lastchanged(root_user):
assert root_user.lastchanged == '16179'
def test_good_user_minimum(root_user):
assert root_user.minimum == '0'
def test_good_user_maximum(root_user):
assert root_user.maximum == '99999'
def test_good_user_warn(root_user):
assert root_user.warn == '7'
def test_good_user_inactive(root_user):
assert root_user.inactive == ''
def test_good_user_expire(root_user):
assert root_user.expire == ''
def test_good_user_reserved(root_user):
assert root_user.reserved == ''
def test_bad_field_count(tmpdir):
p = tmpdir.join('shadow.log')
p.write('root:!:16179:0')
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
with pytest.raises(AssertionError):
o.parse()
def test_duplicates(tmpdir):
p = tmpdir.join('shadow.log')
p.write('root:!:16179:0:99999:7:::\nroot:!:16179:0:99999:7:::')
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
with pytest.raises(AssertionError):
o.parse()
|
Add testing for shadow_stdout parser module
|
Add testing for shadow_stdout parser module
|
Python
|
mit
|
Doveps/mono,Doveps/mono,Doveps/bassist,Doveps/mono,Doveps/mono
|
Add testing for shadow_stdout parser module
|
import pytest
import parser.log_file.shadow_stdout
good_shadow_text = '''root:!:16179:0:99999:7:::
vagrant:$6$5M8f9rEy$nFWJvEnn2KFQwFsm6oRMyxva3mVixbyxZIE3cYTJ.ARFMt6Nq6gsnqScUkZ/slZ8tQzhZovx1M2CnmSsF71JA1:16179:0:99999:7:::
vboxadd:!:16179::::::
'''
@pytest.fixture(scope='function')
def good_shadow(tmpdir):
p = tmpdir.join('shadow.log')
p.write(good_shadow_text)
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
o.parse()
return o
def test_length(good_shadow):
assert len(good_shadow.users) is 3
def test_user_name(good_shadow):
assert 'root' in good_shadow.users
@pytest.fixture(scope='function')
def root_user(good_shadow):
return good_shadow.users['root'].shadow
def test_good_user_password(root_user):
assert root_user.password == '!'
def test_good_user_lastchanged(root_user):
assert root_user.lastchanged == '16179'
def test_good_user_minimum(root_user):
assert root_user.minimum == '0'
def test_good_user_maximum(root_user):
assert root_user.maximum == '99999'
def test_good_user_warn(root_user):
assert root_user.warn == '7'
def test_good_user_inactive(root_user):
assert root_user.inactive == ''
def test_good_user_expire(root_user):
assert root_user.expire == ''
def test_good_user_reserved(root_user):
assert root_user.reserved == ''
def test_bad_field_count(tmpdir):
p = tmpdir.join('shadow.log')
p.write('root:!:16179:0')
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
with pytest.raises(AssertionError):
o.parse()
def test_duplicates(tmpdir):
p = tmpdir.join('shadow.log')
p.write('root:!:16179:0:99999:7:::\nroot:!:16179:0:99999:7:::')
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
with pytest.raises(AssertionError):
o.parse()
|
<commit_before><commit_msg>Add testing for shadow_stdout parser module<commit_after>
|
import pytest
import parser.log_file.shadow_stdout
good_shadow_text = '''root:!:16179:0:99999:7:::
vagrant:$6$5M8f9rEy$nFWJvEnn2KFQwFsm6oRMyxva3mVixbyxZIE3cYTJ.ARFMt6Nq6gsnqScUkZ/slZ8tQzhZovx1M2CnmSsF71JA1:16179:0:99999:7:::
vboxadd:!:16179::::::
'''
@pytest.fixture(scope='function')
def good_shadow(tmpdir):
p = tmpdir.join('shadow.log')
p.write(good_shadow_text)
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
o.parse()
return o
def test_length(good_shadow):
assert len(good_shadow.users) is 3
def test_user_name(good_shadow):
assert 'root' in good_shadow.users
@pytest.fixture(scope='function')
def root_user(good_shadow):
return good_shadow.users['root'].shadow
def test_good_user_password(root_user):
assert root_user.password == '!'
def test_good_user_lastchanged(root_user):
assert root_user.lastchanged == '16179'
def test_good_user_minimum(root_user):
assert root_user.minimum == '0'
def test_good_user_maximum(root_user):
assert root_user.maximum == '99999'
def test_good_user_warn(root_user):
assert root_user.warn == '7'
def test_good_user_inactive(root_user):
assert root_user.inactive == ''
def test_good_user_expire(root_user):
assert root_user.expire == ''
def test_good_user_reserved(root_user):
assert root_user.reserved == ''
def test_bad_field_count(tmpdir):
p = tmpdir.join('shadow.log')
p.write('root:!:16179:0')
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
with pytest.raises(AssertionError):
o.parse()
def test_duplicates(tmpdir):
p = tmpdir.join('shadow.log')
p.write('root:!:16179:0:99999:7:::\nroot:!:16179:0:99999:7:::')
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
with pytest.raises(AssertionError):
o.parse()
|
Add testing for shadow_stdout parser moduleimport pytest
import parser.log_file.shadow_stdout
good_shadow_text = '''root:!:16179:0:99999:7:::
vagrant:$6$5M8f9rEy$nFWJvEnn2KFQwFsm6oRMyxva3mVixbyxZIE3cYTJ.ARFMt6Nq6gsnqScUkZ/slZ8tQzhZovx1M2CnmSsF71JA1:16179:0:99999:7:::
vboxadd:!:16179::::::
'''
@pytest.fixture(scope='function')
def good_shadow(tmpdir):
p = tmpdir.join('shadow.log')
p.write(good_shadow_text)
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
o.parse()
return o
def test_length(good_shadow):
assert len(good_shadow.users) is 3
def test_user_name(good_shadow):
assert 'root' in good_shadow.users
@pytest.fixture(scope='function')
def root_user(good_shadow):
return good_shadow.users['root'].shadow
def test_good_user_password(root_user):
assert root_user.password == '!'
def test_good_user_lastchanged(root_user):
assert root_user.lastchanged == '16179'
def test_good_user_minimum(root_user):
assert root_user.minimum == '0'
def test_good_user_maximum(root_user):
assert root_user.maximum == '99999'
def test_good_user_warn(root_user):
assert root_user.warn == '7'
def test_good_user_inactive(root_user):
assert root_user.inactive == ''
def test_good_user_expire(root_user):
assert root_user.expire == ''
def test_good_user_reserved(root_user):
assert root_user.reserved == ''
def test_bad_field_count(tmpdir):
p = tmpdir.join('shadow.log')
p.write('root:!:16179:0')
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
with pytest.raises(AssertionError):
o.parse()
def test_duplicates(tmpdir):
p = tmpdir.join('shadow.log')
p.write('root:!:16179:0:99999:7:::\nroot:!:16179:0:99999:7:::')
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
with pytest.raises(AssertionError):
o.parse()
|
<commit_before><commit_msg>Add testing for shadow_stdout parser module<commit_after>import pytest
import parser.log_file.shadow_stdout
good_shadow_text = '''root:!:16179:0:99999:7:::
vagrant:$6$5M8f9rEy$nFWJvEnn2KFQwFsm6oRMyxva3mVixbyxZIE3cYTJ.ARFMt6Nq6gsnqScUkZ/slZ8tQzhZovx1M2CnmSsF71JA1:16179:0:99999:7:::
vboxadd:!:16179::::::
'''
@pytest.fixture(scope='function')
def good_shadow(tmpdir):
p = tmpdir.join('shadow.log')
p.write(good_shadow_text)
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
o.parse()
return o
def test_length(good_shadow):
assert len(good_shadow.users) is 3
def test_user_name(good_shadow):
assert 'root' in good_shadow.users
@pytest.fixture(scope='function')
def root_user(good_shadow):
return good_shadow.users['root'].shadow
def test_good_user_password(root_user):
assert root_user.password == '!'
def test_good_user_lastchanged(root_user):
assert root_user.lastchanged == '16179'
def test_good_user_minimum(root_user):
assert root_user.minimum == '0'
def test_good_user_maximum(root_user):
assert root_user.maximum == '99999'
def test_good_user_warn(root_user):
assert root_user.warn == '7'
def test_good_user_inactive(root_user):
assert root_user.inactive == ''
def test_good_user_expire(root_user):
assert root_user.expire == ''
def test_good_user_reserved(root_user):
assert root_user.reserved == ''
def test_bad_field_count(tmpdir):
p = tmpdir.join('shadow.log')
p.write('root:!:16179:0')
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
with pytest.raises(AssertionError):
o.parse()
def test_duplicates(tmpdir):
p = tmpdir.join('shadow.log')
p.write('root:!:16179:0:99999:7:::\nroot:!:16179:0:99999:7:::')
o = parser.log_file.shadow_stdout.ShadowStdoutLog(str(p))
with pytest.raises(AssertionError):
o.parse()
|
|
0b76510e58c4eaa71fb37c563b00fa6cc67d49fc
|
tests/print_view_hierarchy_test.py
|
tests/print_view_hierarchy_test.py
|
"""Tests for scripts/print_view_hierarchy.py."""
import re
import unittest
from test_utils import import_utils
import_utils.prepare_lldb_import_or_exit()
import lldb
import_utils.prepare_for_scripts_imports()
from scripts import print_view_hierarchy
class PrintViewHierarchyTest(unittest.TestCase):
def testPrintViewHierarchy(self):
"""Tests the expected output of the |pv| command."""
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(False)
target = debugger.CreateTarget('')
error = lldb.SBError()
process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp',
False, error)
if not process:
self.assertTrue(False, 'Could not attach to process "TestApp"')
debugger.SetSelectedTarget(target)
result = lldb.SBCommandReturnObject()
print_view_hierarchy.print_view_hierarchy(debugger, None, result, None)
self.assertTrue(result.Succeeded())
expected_output_regex = r'<UIWindow: 0x\w{12}; frame = \(0 0; 414 736\); autoresize = W\+H; gestureRecognizers = <NSArray: 0x\w{12}>; layer = <UIWindowLayer: 0x\w{12}>>\n \|'
self.assertTrue(re.search(expected_output_regex, result.GetOutput(), re.M))
debugger.DeleteTarget(target)
|
Add a test for |pv| command.
|
Add a test for |pv| command.
|
Python
|
mit
|
mrhappyasthma/HappyDebugging,mrhappyasthma/happydebugging
|
Add a test for |pv| command.
|
"""Tests for scripts/print_view_hierarchy.py."""
import re
import unittest
from test_utils import import_utils
import_utils.prepare_lldb_import_or_exit()
import lldb
import_utils.prepare_for_scripts_imports()
from scripts import print_view_hierarchy
class PrintViewHierarchyTest(unittest.TestCase):
def testPrintViewHierarchy(self):
"""Tests the expected output of the |pv| command."""
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(False)
target = debugger.CreateTarget('')
error = lldb.SBError()
process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp',
False, error)
if not process:
self.assertTrue(False, 'Could not attach to process "TestApp"')
debugger.SetSelectedTarget(target)
result = lldb.SBCommandReturnObject()
print_view_hierarchy.print_view_hierarchy(debugger, None, result, None)
self.assertTrue(result.Succeeded())
expected_output_regex = r'<UIWindow: 0x\w{12}; frame = \(0 0; 414 736\); autoresize = W\+H; gestureRecognizers = <NSArray: 0x\w{12}>; layer = <UIWindowLayer: 0x\w{12}>>\n \|'
self.assertTrue(re.search(expected_output_regex, result.GetOutput(), re.M))
debugger.DeleteTarget(target)
|
<commit_before><commit_msg>Add a test for |pv| command.<commit_after>
|
"""Tests for scripts/print_view_hierarchy.py."""
import re
import unittest
from test_utils import import_utils
import_utils.prepare_lldb_import_or_exit()
import lldb
import_utils.prepare_for_scripts_imports()
from scripts import print_view_hierarchy
class PrintViewHierarchyTest(unittest.TestCase):
def testPrintViewHierarchy(self):
"""Tests the expected output of the |pv| command."""
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(False)
target = debugger.CreateTarget('')
error = lldb.SBError()
process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp',
False, error)
if not process:
self.assertTrue(False, 'Could not attach to process "TestApp"')
debugger.SetSelectedTarget(target)
result = lldb.SBCommandReturnObject()
print_view_hierarchy.print_view_hierarchy(debugger, None, result, None)
self.assertTrue(result.Succeeded())
expected_output_regex = r'<UIWindow: 0x\w{12}; frame = \(0 0; 414 736\); autoresize = W\+H; gestureRecognizers = <NSArray: 0x\w{12}>; layer = <UIWindowLayer: 0x\w{12}>>\n \|'
self.assertTrue(re.search(expected_output_regex, result.GetOutput(), re.M))
debugger.DeleteTarget(target)
|
Add a test for |pv| command."""Tests for scripts/print_view_hierarchy.py."""
import re
import unittest
from test_utils import import_utils
import_utils.prepare_lldb_import_or_exit()
import lldb
import_utils.prepare_for_scripts_imports()
from scripts import print_view_hierarchy
class PrintViewHierarchyTest(unittest.TestCase):
def testPrintViewHierarchy(self):
"""Tests the expected output of the |pv| command."""
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(False)
target = debugger.CreateTarget('')
error = lldb.SBError()
process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp',
False, error)
if not process:
self.assertTrue(False, 'Could not attach to process "TestApp"')
debugger.SetSelectedTarget(target)
result = lldb.SBCommandReturnObject()
print_view_hierarchy.print_view_hierarchy(debugger, None, result, None)
self.assertTrue(result.Succeeded())
expected_output_regex = r'<UIWindow: 0x\w{12}; frame = \(0 0; 414 736\); autoresize = W\+H; gestureRecognizers = <NSArray: 0x\w{12}>; layer = <UIWindowLayer: 0x\w{12}>>\n \|'
self.assertTrue(re.search(expected_output_regex, result.GetOutput(), re.M))
debugger.DeleteTarget(target)
|
<commit_before><commit_msg>Add a test for |pv| command.<commit_after>"""Tests for scripts/print_view_hierarchy.py."""
import re
import unittest
from test_utils import import_utils
import_utils.prepare_lldb_import_or_exit()
import lldb
import_utils.prepare_for_scripts_imports()
from scripts import print_view_hierarchy
class PrintViewHierarchyTest(unittest.TestCase):
def testPrintViewHierarchy(self):
"""Tests the expected output of the |pv| command."""
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(False)
target = debugger.CreateTarget('')
error = lldb.SBError()
process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp',
False, error)
if not process:
self.assertTrue(False, 'Could not attach to process "TestApp"')
debugger.SetSelectedTarget(target)
result = lldb.SBCommandReturnObject()
print_view_hierarchy.print_view_hierarchy(debugger, None, result, None)
self.assertTrue(result.Succeeded())
expected_output_regex = r'<UIWindow: 0x\w{12}; frame = \(0 0; 414 736\); autoresize = W\+H; gestureRecognizers = <NSArray: 0x\w{12}>; layer = <UIWindowLayer: 0x\w{12}>>\n \|'
self.assertTrue(re.search(expected_output_regex, result.GetOutput(), re.M))
debugger.DeleteTarget(target)
|
|
96b9c9f0a3bb3fd96ff7dd2c289dd0e4dee2bbb0
|
14B-088/HI/analysis/rotation_curves/cube_subtract_rotation.py
|
14B-088/HI/analysis/rotation_curves/cube_subtract_rotation.py
|
from astropy.io import fits
from spectral_cube import SpectralCube
import astropy.units as u
import numpy as np
import os
from astropy.utils.console import ProgressBar
'''
Subtract a rotation model from a cube.
'''
# Load in my huge FITS creator
execfile(os.path.expanduser("~/Dropbox/code_development/ewky_scripts/write_huge_fits.py"))
def find_nearest(array, value):
'''
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
'''
idx = (np.abs(array - value)).argmin()
return idx
# Set vsys. Using the fit value from DISKFIT
vsys = -180610 * u.m / u.s
data_path = "/media/eric/MyRAID/M33/14B-088/HI/full_imaging/"
cube = SpectralCube.read(os.path.join(data_path,
"M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.fits"))
# Where's the center?
center_pixel = find_nearest(cube.spectral_axis, vsys)
# In this case, the remaining difference is a minuscule 3 m/s.
model = fits.open(os.path.join(data_path,
"diskfit_noasymm_nowarp_output/rad.mod.fits"))
# Now calculate the spectral shifts needed for each pixel
# Assuming that the array shapes for the same (which they are here)
shifts = np.zeros(model[0].data.shape)
posns = np.where(np.isfinite(model[0].data))
# Adjust the header
new_header = cube.header.copy()
# There's a 1 pixel offset
new_header["CRPIX3"] = center_pixel + 1
new_header["CRVAL3"] = (cube.spectral_axis[center_pixel] - vsys).value
# Create the FITS file so we can write 1 spectrum in at a time
new_fitsname = os.path.join(data_path,
"M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.rotsub.fits")
create_huge_fits(cube.shape, new_fitsname, header=new_header)
new_fits = fits.open(new_fitsname, mode='update')
write_every = 1000
for num, (i, j) in enumerate(ProgressBar(zip(*posns))):
shift = find_nearest(cube.spectral_axis,
model[0].data[i, j] * u.m / u.s) - center_pixel
new_fits[0].data[:, i, j] = np.roll(cube.filled_data[:, i, j].astype(np.float32), shift)
if num % write_every == 0:
new_fits.flush()
new_fits.flush()
new_fits.close()
|
Subtract a rotation curve from a cube
|
Subtract a rotation curve from a cube
|
Python
|
mit
|
e-koch/VLA_Lband,e-koch/VLA_Lband
|
Subtract a rotation curve from a cube
|
from astropy.io import fits
from spectral_cube import SpectralCube
import astropy.units as u
import numpy as np
import os
from astropy.utils.console import ProgressBar
'''
Subtract a rotation model from a cube.
'''
# Load in my huge FITS creator
execfile(os.path.expanduser("~/Dropbox/code_development/ewky_scripts/write_huge_fits.py"))
def find_nearest(array, value):
'''
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
'''
idx = (np.abs(array - value)).argmin()
return idx
# Set vsys. Using the fit value from DISKFIT
vsys = -180610 * u.m / u.s
data_path = "/media/eric/MyRAID/M33/14B-088/HI/full_imaging/"
cube = SpectralCube.read(os.path.join(data_path,
"M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.fits"))
# Where's the center?
center_pixel = find_nearest(cube.spectral_axis, vsys)
# In this case, the remaining difference is a minuscule 3 m/s.
model = fits.open(os.path.join(data_path,
"diskfit_noasymm_nowarp_output/rad.mod.fits"))
# Now calculate the spectral shifts needed for each pixel
# Assuming that the array shapes for the same (which they are here)
shifts = np.zeros(model[0].data.shape)
posns = np.where(np.isfinite(model[0].data))
# Adjust the header
new_header = cube.header.copy()
# There's a 1 pixel offset
new_header["CRPIX3"] = center_pixel + 1
new_header["CRVAL3"] = (cube.spectral_axis[center_pixel] - vsys).value
# Create the FITS file so we can write 1 spectrum in at a time
new_fitsname = os.path.join(data_path,
"M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.rotsub.fits")
create_huge_fits(cube.shape, new_fitsname, header=new_header)
new_fits = fits.open(new_fitsname, mode='update')
write_every = 1000
for num, (i, j) in enumerate(ProgressBar(zip(*posns))):
shift = find_nearest(cube.spectral_axis,
model[0].data[i, j] * u.m / u.s) - center_pixel
new_fits[0].data[:, i, j] = np.roll(cube.filled_data[:, i, j].astype(np.float32), shift)
if num % write_every == 0:
new_fits.flush()
new_fits.flush()
new_fits.close()
|
<commit_before><commit_msg>Subtract a rotation curve from a cube<commit_after>
|
from astropy.io import fits
from spectral_cube import SpectralCube
import astropy.units as u
import numpy as np
import os
from astropy.utils.console import ProgressBar
'''
Subtract a rotation model from a cube.
'''
# Load in my huge FITS creator
execfile(os.path.expanduser("~/Dropbox/code_development/ewky_scripts/write_huge_fits.py"))
def find_nearest(array, value):
'''
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
'''
idx = (np.abs(array - value)).argmin()
return idx
# Set vsys. Using the fit value from DISKFIT
vsys = -180610 * u.m / u.s
data_path = "/media/eric/MyRAID/M33/14B-088/HI/full_imaging/"
cube = SpectralCube.read(os.path.join(data_path,
"M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.fits"))
# Where's the center?
center_pixel = find_nearest(cube.spectral_axis, vsys)
# In this case, the remaining difference is a minuscule 3 m/s.
model = fits.open(os.path.join(data_path,
"diskfit_noasymm_nowarp_output/rad.mod.fits"))
# Now calculate the spectral shifts needed for each pixel
# Assuming that the array shapes for the same (which they are here)
shifts = np.zeros(model[0].data.shape)
posns = np.where(np.isfinite(model[0].data))
# Adjust the header
new_header = cube.header.copy()
# There's a 1 pixel offset
new_header["CRPIX3"] = center_pixel + 1
new_header["CRVAL3"] = (cube.spectral_axis[center_pixel] - vsys).value
# Create the FITS file so we can write 1 spectrum in at a time
new_fitsname = os.path.join(data_path,
"M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.rotsub.fits")
create_huge_fits(cube.shape, new_fitsname, header=new_header)
new_fits = fits.open(new_fitsname, mode='update')
write_every = 1000
for num, (i, j) in enumerate(ProgressBar(zip(*posns))):
shift = find_nearest(cube.spectral_axis,
model[0].data[i, j] * u.m / u.s) - center_pixel
new_fits[0].data[:, i, j] = np.roll(cube.filled_data[:, i, j].astype(np.float32), shift)
if num % write_every == 0:
new_fits.flush()
new_fits.flush()
new_fits.close()
|
Subtract a rotation curve from a cube
from astropy.io import fits
from spectral_cube import SpectralCube
import astropy.units as u
import numpy as np
import os
from astropy.utils.console import ProgressBar
'''
Subtract a rotation model from a cube.
'''
# Load in my huge FITS creator
execfile(os.path.expanduser("~/Dropbox/code_development/ewky_scripts/write_huge_fits.py"))
def find_nearest(array, value):
'''
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
'''
idx = (np.abs(array - value)).argmin()
return idx
# Set vsys. Using the fit value from DISKFIT
vsys = -180610 * u.m / u.s
data_path = "/media/eric/MyRAID/M33/14B-088/HI/full_imaging/"
cube = SpectralCube.read(os.path.join(data_path,
"M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.fits"))
# Where's the center?
center_pixel = find_nearest(cube.spectral_axis, vsys)
# In this case, the remaining difference is a minuscule 3 m/s.
model = fits.open(os.path.join(data_path,
"diskfit_noasymm_nowarp_output/rad.mod.fits"))
# Now calculate the spectral shifts needed for each pixel
# Assuming that the array shapes for the same (which they are here)
shifts = np.zeros(model[0].data.shape)
posns = np.where(np.isfinite(model[0].data))
# Adjust the header
new_header = cube.header.copy()
# There's a 1 pixel offset
new_header["CRPIX3"] = center_pixel + 1
new_header["CRVAL3"] = (cube.spectral_axis[center_pixel] - vsys).value
# Create the FITS file so we can write 1 spectrum in at a time
new_fitsname = os.path.join(data_path,
"M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.rotsub.fits")
create_huge_fits(cube.shape, new_fitsname, header=new_header)
new_fits = fits.open(new_fitsname, mode='update')
write_every = 1000
for num, (i, j) in enumerate(ProgressBar(zip(*posns))):
shift = find_nearest(cube.spectral_axis,
model[0].data[i, j] * u.m / u.s) - center_pixel
new_fits[0].data[:, i, j] = np.roll(cube.filled_data[:, i, j].astype(np.float32), shift)
if num % write_every == 0:
new_fits.flush()
new_fits.flush()
new_fits.close()
|
<commit_before><commit_msg>Subtract a rotation curve from a cube<commit_after>
from astropy.io import fits
from spectral_cube import SpectralCube
import astropy.units as u
import numpy as np
import os
from astropy.utils.console import ProgressBar
'''
Subtract a rotation model from a cube.
'''
# Load in my huge FITS creator
execfile(os.path.expanduser("~/Dropbox/code_development/ewky_scripts/write_huge_fits.py"))
def find_nearest(array, value):
'''
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
'''
idx = (np.abs(array - value)).argmin()
return idx
# Set vsys. Using the fit value from DISKFIT
vsys = -180610 * u.m / u.s
data_path = "/media/eric/MyRAID/M33/14B-088/HI/full_imaging/"
cube = SpectralCube.read(os.path.join(data_path,
"M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.fits"))
# Where's the center?
center_pixel = find_nearest(cube.spectral_axis, vsys)
# In this case, the remaining difference is a minuscule 3 m/s.
model = fits.open(os.path.join(data_path,
"diskfit_noasymm_nowarp_output/rad.mod.fits"))
# Now calculate the spectral shifts needed for each pixel
# Assuming that the array shapes for the same (which they are here)
shifts = np.zeros(model[0].data.shape)
posns = np.where(np.isfinite(model[0].data))
# Adjust the header
new_header = cube.header.copy()
# There's a 1 pixel offset
new_header["CRPIX3"] = center_pixel + 1
new_header["CRVAL3"] = (cube.spectral_axis[center_pixel] - vsys).value
# Create the FITS file so we can write 1 spectrum in at a time
new_fitsname = os.path.join(data_path,
"M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.rotsub.fits")
create_huge_fits(cube.shape, new_fitsname, header=new_header)
new_fits = fits.open(new_fitsname, mode='update')
write_every = 1000
for num, (i, j) in enumerate(ProgressBar(zip(*posns))):
shift = find_nearest(cube.spectral_axis,
model[0].data[i, j] * u.m / u.s) - center_pixel
new_fits[0].data[:, i, j] = np.roll(cube.filled_data[:, i, j].astype(np.float32), shift)
if num % write_every == 0:
new_fits.flush()
new_fits.flush()
new_fits.close()
|
|
8d1216cae58f179e903b1f4e7873ba8282ca77bd
|
qtpy/tests/test_qtwebenginewidgets.py
|
qtpy/tests/test_qtwebenginewidgets.py
|
from __future__ import absolute_import
import pytest
def test_qtwebenginewidgets():
"""Test the qtpy.QtWebSockets namespace"""
from qtpy import QtWebEngineWidgets
assert QtWebEngineWidgets.QWebEnginePage is not None
assert QtWebEngineWidgets.QWebEngineView is not None
assert QtWebEngineWidgets.QWebEngineSettings is not None
|
Add a test for QtWebEngineWidgets
|
Testing: Add a test for QtWebEngineWidgets
|
Python
|
mit
|
davvid/qtpy,spyder-ide/qtpy,davvid/qtpy,goanpeca/qtpy,goanpeca/qtpy
|
Testing: Add a test for QtWebEngineWidgets
|
from __future__ import absolute_import
import pytest
def test_qtwebenginewidgets():
"""Test the qtpy.QtWebSockets namespace"""
from qtpy import QtWebEngineWidgets
assert QtWebEngineWidgets.QWebEnginePage is not None
assert QtWebEngineWidgets.QWebEngineView is not None
assert QtWebEngineWidgets.QWebEngineSettings is not None
|
<commit_before><commit_msg>Testing: Add a test for QtWebEngineWidgets<commit_after>
|
from __future__ import absolute_import
import pytest
def test_qtwebenginewidgets():
"""Test the qtpy.QtWebSockets namespace"""
from qtpy import QtWebEngineWidgets
assert QtWebEngineWidgets.QWebEnginePage is not None
assert QtWebEngineWidgets.QWebEngineView is not None
assert QtWebEngineWidgets.QWebEngineSettings is not None
|
Testing: Add a test for QtWebEngineWidgetsfrom __future__ import absolute_import
import pytest
def test_qtwebenginewidgets():
"""Test the qtpy.QtWebSockets namespace"""
from qtpy import QtWebEngineWidgets
assert QtWebEngineWidgets.QWebEnginePage is not None
assert QtWebEngineWidgets.QWebEngineView is not None
assert QtWebEngineWidgets.QWebEngineSettings is not None
|
<commit_before><commit_msg>Testing: Add a test for QtWebEngineWidgets<commit_after>from __future__ import absolute_import
import pytest
def test_qtwebenginewidgets():
"""Test the qtpy.QtWebSockets namespace"""
from qtpy import QtWebEngineWidgets
assert QtWebEngineWidgets.QWebEnginePage is not None
assert QtWebEngineWidgets.QWebEngineView is not None
assert QtWebEngineWidgets.QWebEngineSettings is not None
|
|
8c9fa6d2b31cc08212cb42ca4b429ae4a2793b70
|
tools/shared-packs.py
|
tools/shared-packs.py
|
import os
import sys
import yaml
import paste.util.multidict
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'devstack',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from devstack import log
from devstack import utils
class CustomDumper(yaml.SafeDumper):
def ignore_aliases(self, _data):
return True
fn = sys.argv[1]
with open(fn, "r") as fh:
data = fh.read()
b = yaml.load(data)
names = set()
for c in b['components']:
names.add(c)
idf = 'packages'
pkgs = paste.util.multidict.MultiDict()
for name in names:
data = b['components'][name]
#print name
for p in data.get(idf) or []:
pname = p['name']
pkgs.add(pname, p)
common = list()
for pkg in sorted(list(set(pkgs.keys()))):
items = pkgs.getall(pkg)
if len(items) > 1:
print("Package dupe on: %r with %s dups" % (pkg, len(items)))
versions = set()
for v in items:
if v.get('version'):
versions.add(str(v.get('version')))
if len(versions) > 1:
print("\tWith many versions: %s" % (versions))
else:
print("\tAll with the same version %s" % (versions))
common.append(items[0])
idf = 'pips'
pkgs = paste.util.multidict.MultiDict()
for name in names:
data = b['components'][name]
for p in data.get(idf) or []:
pname = p['name']
pkgs.add(pname, p)
print("-" * 20)
common_pips = list()
for pkg in sorted(list(set(pkgs.keys()))):
items = pkgs.getall(pkg)
if len(items) > 1:
print("Pip dupe on: %r with %s dups" % (pkg, len(items)))
versions = set()
for v in items:
if v.get('version'):
versions.add(str(v.get('version')))
if len(versions) > 1:
print("\tWith many versions: %s" % (versions))
else:
print("\tAll with the same version %s" % (versions))
common_pips.append(items[0])
#data = {'common': {'packages': common, 'pips': common_pips}}
#formatted = yaml.dump(data,
# line_break="\n",
# indent=4,
# explicit_start=True,
# explicit_end=True,
# default_flow_style=False,
# Dumper=CustomDumper,
# )
#print formatted
|
Add tool that will show the shared packages
|
Add tool that will show the shared packages
|
Python
|
apache-2.0
|
stackforge/anvil,mc2014/anvil,stackforge/anvil,mc2014/anvil
|
Add tool that will show the shared packages
|
import os
import sys
import yaml
import paste.util.multidict
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'devstack',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from devstack import log
from devstack import utils
class CustomDumper(yaml.SafeDumper):
def ignore_aliases(self, _data):
return True
fn = sys.argv[1]
with open(fn, "r") as fh:
data = fh.read()
b = yaml.load(data)
names = set()
for c in b['components']:
names.add(c)
idf = 'packages'
pkgs = paste.util.multidict.MultiDict()
for name in names:
data = b['components'][name]
#print name
for p in data.get(idf) or []:
pname = p['name']
pkgs.add(pname, p)
common = list()
for pkg in sorted(list(set(pkgs.keys()))):
items = pkgs.getall(pkg)
if len(items) > 1:
print("Package dupe on: %r with %s dups" % (pkg, len(items)))
versions = set()
for v in items:
if v.get('version'):
versions.add(str(v.get('version')))
if len(versions) > 1:
print("\tWith many versions: %s" % (versions))
else:
print("\tAll with the same version %s" % (versions))
common.append(items[0])
idf = 'pips'
pkgs = paste.util.multidict.MultiDict()
for name in names:
data = b['components'][name]
for p in data.get(idf) or []:
pname = p['name']
pkgs.add(pname, p)
print("-" * 20)
common_pips = list()
for pkg in sorted(list(set(pkgs.keys()))):
items = pkgs.getall(pkg)
if len(items) > 1:
print("Pip dupe on: %r with %s dups" % (pkg, len(items)))
versions = set()
for v in items:
if v.get('version'):
versions.add(str(v.get('version')))
if len(versions) > 1:
print("\tWith many versions: %s" % (versions))
else:
print("\tAll with the same version %s" % (versions))
common_pips.append(items[0])
#data = {'common': {'packages': common, 'pips': common_pips}}
#formatted = yaml.dump(data,
# line_break="\n",
# indent=4,
# explicit_start=True,
# explicit_end=True,
# default_flow_style=False,
# Dumper=CustomDumper,
# )
#print formatted
|
<commit_before><commit_msg>Add tool that will show the shared packages<commit_after>
|
import os
import sys
import yaml
import paste.util.multidict
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'devstack',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from devstack import log
from devstack import utils
class CustomDumper(yaml.SafeDumper):
def ignore_aliases(self, _data):
return True
fn = sys.argv[1]
with open(fn, "r") as fh:
data = fh.read()
b = yaml.load(data)
names = set()
for c in b['components']:
names.add(c)
idf = 'packages'
pkgs = paste.util.multidict.MultiDict()
for name in names:
data = b['components'][name]
#print name
for p in data.get(idf) or []:
pname = p['name']
pkgs.add(pname, p)
common = list()
for pkg in sorted(list(set(pkgs.keys()))):
items = pkgs.getall(pkg)
if len(items) > 1:
print("Package dupe on: %r with %s dups" % (pkg, len(items)))
versions = set()
for v in items:
if v.get('version'):
versions.add(str(v.get('version')))
if len(versions) > 1:
print("\tWith many versions: %s" % (versions))
else:
print("\tAll with the same version %s" % (versions))
common.append(items[0])
idf = 'pips'
pkgs = paste.util.multidict.MultiDict()
for name in names:
data = b['components'][name]
for p in data.get(idf) or []:
pname = p['name']
pkgs.add(pname, p)
print("-" * 20)
common_pips = list()
for pkg in sorted(list(set(pkgs.keys()))):
items = pkgs.getall(pkg)
if len(items) > 1:
print("Pip dupe on: %r with %s dups" % (pkg, len(items)))
versions = set()
for v in items:
if v.get('version'):
versions.add(str(v.get('version')))
if len(versions) > 1:
print("\tWith many versions: %s" % (versions))
else:
print("\tAll with the same version %s" % (versions))
common_pips.append(items[0])
#data = {'common': {'packages': common, 'pips': common_pips}}
#formatted = yaml.dump(data,
# line_break="\n",
# indent=4,
# explicit_start=True,
# explicit_end=True,
# default_flow_style=False,
# Dumper=CustomDumper,
# )
#print formatted
|
Add tool that will show the shared packagesimport os
import sys
import yaml
import paste.util.multidict
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'devstack',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from devstack import log
from devstack import utils
class CustomDumper(yaml.SafeDumper):
def ignore_aliases(self, _data):
return True
fn = sys.argv[1]
with open(fn, "r") as fh:
data = fh.read()
b = yaml.load(data)
names = set()
for c in b['components']:
names.add(c)
idf = 'packages'
pkgs = paste.util.multidict.MultiDict()
for name in names:
data = b['components'][name]
#print name
for p in data.get(idf) or []:
pname = p['name']
pkgs.add(pname, p)
common = list()
for pkg in sorted(list(set(pkgs.keys()))):
items = pkgs.getall(pkg)
if len(items) > 1:
print("Package dupe on: %r with %s dups" % (pkg, len(items)))
versions = set()
for v in items:
if v.get('version'):
versions.add(str(v.get('version')))
if len(versions) > 1:
print("\tWith many versions: %s" % (versions))
else:
print("\tAll with the same version %s" % (versions))
common.append(items[0])
idf = 'pips'
pkgs = paste.util.multidict.MultiDict()
for name in names:
data = b['components'][name]
for p in data.get(idf) or []:
pname = p['name']
pkgs.add(pname, p)
print("-" * 20)
common_pips = list()
for pkg in sorted(list(set(pkgs.keys()))):
items = pkgs.getall(pkg)
if len(items) > 1:
print("Pip dupe on: %r with %s dups" % (pkg, len(items)))
versions = set()
for v in items:
if v.get('version'):
versions.add(str(v.get('version')))
if len(versions) > 1:
print("\tWith many versions: %s" % (versions))
else:
print("\tAll with the same version %s" % (versions))
common_pips.append(items[0])
#data = {'common': {'packages': common, 'pips': common_pips}}
#formatted = yaml.dump(data,
# line_break="\n",
# indent=4,
# explicit_start=True,
# explicit_end=True,
# default_flow_style=False,
# Dumper=CustomDumper,
# )
#print formatted
|
<commit_before><commit_msg>Add tool that will show the shared packages<commit_after>import os
import sys
import yaml
import paste.util.multidict
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'devstack',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from devstack import log
from devstack import utils
class CustomDumper(yaml.SafeDumper):
def ignore_aliases(self, _data):
return True
fn = sys.argv[1]
with open(fn, "r") as fh:
data = fh.read()
b = yaml.load(data)
names = set()
for c in b['components']:
names.add(c)
idf = 'packages'
pkgs = paste.util.multidict.MultiDict()
for name in names:
data = b['components'][name]
#print name
for p in data.get(idf) or []:
pname = p['name']
pkgs.add(pname, p)
common = list()
for pkg in sorted(list(set(pkgs.keys()))):
items = pkgs.getall(pkg)
if len(items) > 1:
print("Package dupe on: %r with %s dups" % (pkg, len(items)))
versions = set()
for v in items:
if v.get('version'):
versions.add(str(v.get('version')))
if len(versions) > 1:
print("\tWith many versions: %s" % (versions))
else:
print("\tAll with the same version %s" % (versions))
common.append(items[0])
idf = 'pips'
pkgs = paste.util.multidict.MultiDict()
for name in names:
data = b['components'][name]
for p in data.get(idf) or []:
pname = p['name']
pkgs.add(pname, p)
print("-" * 20)
common_pips = list()
for pkg in sorted(list(set(pkgs.keys()))):
items = pkgs.getall(pkg)
if len(items) > 1:
print("Pip dupe on: %r with %s dups" % (pkg, len(items)))
versions = set()
for v in items:
if v.get('version'):
versions.add(str(v.get('version')))
if len(versions) > 1:
print("\tWith many versions: %s" % (versions))
else:
print("\tAll with the same version %s" % (versions))
common_pips.append(items[0])
#data = {'common': {'packages': common, 'pips': common_pips}}
#formatted = yaml.dump(data,
# line_break="\n",
# indent=4,
# explicit_start=True,
# explicit_end=True,
# default_flow_style=False,
# Dumper=CustomDumper,
# )
#print formatted
|
|
ed9a152caf57db35c5bf7f86dd5148b0e8f582b4
|
examples/widgets/generic_code_edit.py
|
examples/widgets/generic_code_edit.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A simple example that shows how to setup CodeEdit.
In this example, we install a syntax highlighter mode (based on pygments), a
mode that highlights the current line and a _search and replace_ panel.
There are many other modes and panels, feel free to use this example as a
starting point to experiment.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
from pyqode.qt import QtWidgets
from pyqode.core.widgets import GenericCodeEdit
def main():
app = QtWidgets.QApplication(sys.argv)
# create editor and window
window = QtWidgets.QMainWindow()
editor = GenericCodeEdit()
# open a file
editor.file.open(__file__)
window.setCentralWidget(editor)
# run
window.show()
app.exec_()
editor.file.close()
if __name__ == "__main__":
main()
|
Add a generic code edit example
|
Add a generic code edit example
|
Python
|
mit
|
pyQode/pyqode.core,pyQode/pyqode.core,zwadar/pyqode.core
|
Add a generic code edit example
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A simple example that shows how to setup CodeEdit.
In this example, we install a syntax highlighter mode (based on pygments), a
mode that highlights the current line and a _search and replace_ panel.
There are many other modes and panels, feel free to use this example as a
starting point to experiment.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
from pyqode.qt import QtWidgets
from pyqode.core.widgets import GenericCodeEdit
def main():
app = QtWidgets.QApplication(sys.argv)
# create editor and window
window = QtWidgets.QMainWindow()
editor = GenericCodeEdit()
# open a file
editor.file.open(__file__)
window.setCentralWidget(editor)
# run
window.show()
app.exec_()
editor.file.close()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a generic code edit example<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A simple example that shows how to setup CodeEdit.
In this example, we install a syntax highlighter mode (based on pygments), a
mode that highlights the current line and a _search and replace_ panel.
There are many other modes and panels, feel free to use this example as a
starting point to experiment.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
from pyqode.qt import QtWidgets
from pyqode.core.widgets import GenericCodeEdit
def main():
app = QtWidgets.QApplication(sys.argv)
# create editor and window
window = QtWidgets.QMainWindow()
editor = GenericCodeEdit()
# open a file
editor.file.open(__file__)
window.setCentralWidget(editor)
# run
window.show()
app.exec_()
editor.file.close()
if __name__ == "__main__":
main()
|
Add a generic code edit example#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A simple example that shows how to setup CodeEdit.
In this example, we install a syntax highlighter mode (based on pygments), a
mode that highlights the current line and a _search and replace_ panel.
There are many other modes and panels, feel free to use this example as a
starting point to experiment.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
from pyqode.qt import QtWidgets
from pyqode.core.widgets import GenericCodeEdit
def main():
app = QtWidgets.QApplication(sys.argv)
# create editor and window
window = QtWidgets.QMainWindow()
editor = GenericCodeEdit()
# open a file
editor.file.open(__file__)
window.setCentralWidget(editor)
# run
window.show()
app.exec_()
editor.file.close()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a generic code edit example<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A simple example that shows how to setup CodeEdit.
In this example, we install a syntax highlighter mode (based on pygments), a
mode that highlights the current line and a _search and replace_ panel.
There are many other modes and panels, feel free to use this example as a
starting point to experiment.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
from pyqode.qt import QtWidgets
from pyqode.core.widgets import GenericCodeEdit
def main():
app = QtWidgets.QApplication(sys.argv)
# create editor and window
window = QtWidgets.QMainWindow()
editor = GenericCodeEdit()
# open a file
editor.file.open(__file__)
window.setCentralWidget(editor)
# run
window.show()
app.exec_()
editor.file.close()
if __name__ == "__main__":
main()
|
|
101f26d8c21bed496aa3d465ccad690b711af612
|
pyface/tests/test_split_application_window.py
|
pyface/tests/test_split_application_window.py
|
from __future__ import absolute_import
from traits.testing.unittest_tools import unittest, UnittestTools
from ..gui import GUI
from ..split_application_window import SplitApplicationWindow
class TestSplitApplicationWindow(unittest.TestCase, UnittestTools):
def setUp(self):
self.gui = GUI()
self.window = SplitApplicationWindow()
def test_destroy(self):
# test that destroy works even when no control
self.window.destroy()
def test_open_close(self):
# test that openaing and closing works as expected
with self.assertTraitChanges(self.window, 'opening', count=1):
with self.assertTraitChanges(self.window, 'opened', count=1):
self.window.open()
self.gui.process_events()
with self.assertTraitChanges(self.window, 'closing', count=1):
with self.assertTraitChanges(self.window, 'closed', count=1):
self.window.close()
self.gui.process_events()
def test_show(self):
# test that opening and closing works as expected
self.window._create()
self.window.show(True)
self.gui.process_events()
self.window.show(False)
self.gui.process_events()
self.window.destroy()
|
Add test for split application window.
|
Add test for split application window.
|
Python
|
bsd-3-clause
|
brett-patterson/pyface,geggo/pyface,geggo/pyface
|
Add test for split application window.
|
from __future__ import absolute_import
from traits.testing.unittest_tools import unittest, UnittestTools
from ..gui import GUI
from ..split_application_window import SplitApplicationWindow
class TestSplitApplicationWindow(unittest.TestCase, UnittestTools):
def setUp(self):
self.gui = GUI()
self.window = SplitApplicationWindow()
def test_destroy(self):
# test that destroy works even when no control
self.window.destroy()
def test_open_close(self):
# test that openaing and closing works as expected
with self.assertTraitChanges(self.window, 'opening', count=1):
with self.assertTraitChanges(self.window, 'opened', count=1):
self.window.open()
self.gui.process_events()
with self.assertTraitChanges(self.window, 'closing', count=1):
with self.assertTraitChanges(self.window, 'closed', count=1):
self.window.close()
self.gui.process_events()
def test_show(self):
# test that opening and closing works as expected
self.window._create()
self.window.show(True)
self.gui.process_events()
self.window.show(False)
self.gui.process_events()
self.window.destroy()
|
<commit_before><commit_msg>Add test for split application window.<commit_after>
|
from __future__ import absolute_import
from traits.testing.unittest_tools import unittest, UnittestTools
from ..gui import GUI
from ..split_application_window import SplitApplicationWindow
class TestSplitApplicationWindow(unittest.TestCase, UnittestTools):
def setUp(self):
self.gui = GUI()
self.window = SplitApplicationWindow()
def test_destroy(self):
# test that destroy works even when no control
self.window.destroy()
def test_open_close(self):
# test that openaing and closing works as expected
with self.assertTraitChanges(self.window, 'opening', count=1):
with self.assertTraitChanges(self.window, 'opened', count=1):
self.window.open()
self.gui.process_events()
with self.assertTraitChanges(self.window, 'closing', count=1):
with self.assertTraitChanges(self.window, 'closed', count=1):
self.window.close()
self.gui.process_events()
def test_show(self):
# test that opening and closing works as expected
self.window._create()
self.window.show(True)
self.gui.process_events()
self.window.show(False)
self.gui.process_events()
self.window.destroy()
|
Add test for split application window.from __future__ import absolute_import
from traits.testing.unittest_tools import unittest, UnittestTools
from ..gui import GUI
from ..split_application_window import SplitApplicationWindow
class TestSplitApplicationWindow(unittest.TestCase, UnittestTools):
def setUp(self):
self.gui = GUI()
self.window = SplitApplicationWindow()
def test_destroy(self):
# test that destroy works even when no control
self.window.destroy()
def test_open_close(self):
# test that openaing and closing works as expected
with self.assertTraitChanges(self.window, 'opening', count=1):
with self.assertTraitChanges(self.window, 'opened', count=1):
self.window.open()
self.gui.process_events()
with self.assertTraitChanges(self.window, 'closing', count=1):
with self.assertTraitChanges(self.window, 'closed', count=1):
self.window.close()
self.gui.process_events()
def test_show(self):
# test that opening and closing works as expected
self.window._create()
self.window.show(True)
self.gui.process_events()
self.window.show(False)
self.gui.process_events()
self.window.destroy()
|
<commit_before><commit_msg>Add test for split application window.<commit_after>from __future__ import absolute_import
from traits.testing.unittest_tools import unittest, UnittestTools
from ..gui import GUI
from ..split_application_window import SplitApplicationWindow
class TestSplitApplicationWindow(unittest.TestCase, UnittestTools):
def setUp(self):
self.gui = GUI()
self.window = SplitApplicationWindow()
def test_destroy(self):
# test that destroy works even when no control
self.window.destroy()
def test_open_close(self):
# test that openaing and closing works as expected
with self.assertTraitChanges(self.window, 'opening', count=1):
with self.assertTraitChanges(self.window, 'opened', count=1):
self.window.open()
self.gui.process_events()
with self.assertTraitChanges(self.window, 'closing', count=1):
with self.assertTraitChanges(self.window, 'closed', count=1):
self.window.close()
self.gui.process_events()
def test_show(self):
# test that opening and closing works as expected
self.window._create()
self.window.show(True)
self.gui.process_events()
self.window.show(False)
self.gui.process_events()
self.window.destroy()
|
|
2f8da2ff28fa9cc87c8ad810b67caf994a584100
|
examples/lvm_cache.py
|
examples/lvm_cache.py
|
import os
from examples.common import print_devices
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
from blivet.devices.lvm import LVMCacheRequest
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.config.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.config.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and unbounded growth and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
parents=[vg], name="unbounded")
b.create_device(dev)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
maxsize=Size("15GiB"), parents=[vg], name="bounded")
b.create_device(dev)
# new lv with a fixed size of 2GiB formatted as swap space
cache_spec = LVMCacheRequest(size=Size("1GiB"), pvs=[pv2])
dev = b.new_lv(fmt_type="ext4", size=Size("2GiB"), parents=[vg], name="cached", cache_request=cache_spec)
b.create_device(dev)
# allocate the growable lvs
blivet.partitioning.grow_lvm(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.do_it()
print_devices(b)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
Add example of LVM cache creation
|
Add example of LVM cache creation
Useful for testing as well as for users wondering how to do something like this.
|
Python
|
lgpl-2.1
|
jkonecny12/blivet,jkonecny12/blivet,rvykydal/blivet,rvykydal/blivet,vpodzime/blivet,rhinstaller/blivet,vojtechtrefny/blivet,vojtechtrefny/blivet,rhinstaller/blivet,vpodzime/blivet,AdamWill/blivet,AdamWill/blivet
|
Add example of LVM cache creation
Useful for testing as well as for users wondering how to do something like this.
|
import os
from examples.common import print_devices
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
from blivet.devices.lvm import LVMCacheRequest
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.config.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.config.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and unbounded growth and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
parents=[vg], name="unbounded")
b.create_device(dev)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
maxsize=Size("15GiB"), parents=[vg], name="bounded")
b.create_device(dev)
# new lv with a fixed size of 2GiB formatted as swap space
cache_spec = LVMCacheRequest(size=Size("1GiB"), pvs=[pv2])
dev = b.new_lv(fmt_type="ext4", size=Size("2GiB"), parents=[vg], name="cached", cache_request=cache_spec)
b.create_device(dev)
# allocate the growable lvs
blivet.partitioning.grow_lvm(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.do_it()
print_devices(b)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
<commit_before><commit_msg>Add example of LVM cache creation
Useful for testing as well as for users wondering how to do something like this.<commit_after>
|
import os
from examples.common import print_devices
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
from blivet.devices.lvm import LVMCacheRequest
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.config.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.config.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and unbounded growth and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
parents=[vg], name="unbounded")
b.create_device(dev)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
maxsize=Size("15GiB"), parents=[vg], name="bounded")
b.create_device(dev)
# new lv with a fixed size of 2GiB formatted as swap space
cache_spec = LVMCacheRequest(size=Size("1GiB"), pvs=[pv2])
dev = b.new_lv(fmt_type="ext4", size=Size("2GiB"), parents=[vg], name="cached", cache_request=cache_spec)
b.create_device(dev)
# allocate the growable lvs
blivet.partitioning.grow_lvm(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.do_it()
print_devices(b)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
Add example of LVM cache creation
Useful for testing as well as for users wondering how to do something like this.import os
from examples.common import print_devices
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
from blivet.devices.lvm import LVMCacheRequest
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.config.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.config.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and unbounded growth and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
parents=[vg], name="unbounded")
b.create_device(dev)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
maxsize=Size("15GiB"), parents=[vg], name="bounded")
b.create_device(dev)
# new lv with a fixed size of 2GiB formatted as swap space
cache_spec = LVMCacheRequest(size=Size("1GiB"), pvs=[pv2])
dev = b.new_lv(fmt_type="ext4", size=Size("2GiB"), parents=[vg], name="cached", cache_request=cache_spec)
b.create_device(dev)
# allocate the growable lvs
blivet.partitioning.grow_lvm(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.do_it()
print_devices(b)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
<commit_before><commit_msg>Add example of LVM cache creation
Useful for testing as well as for users wondering how to do something like this.<commit_after>import os
from examples.common import print_devices
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
from blivet.devices.lvm import LVMCacheRequest
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.config.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.config.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and unbounded growth and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
parents=[vg], name="unbounded")
b.create_device(dev)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
dev = b.new_lv(fmt_type="ext4", size=Size("5GiB"), grow=True,
maxsize=Size("15GiB"), parents=[vg], name="bounded")
b.create_device(dev)
# new lv with a fixed size of 2GiB formatted as swap space
cache_spec = LVMCacheRequest(size=Size("1GiB"), pvs=[pv2])
dev = b.new_lv(fmt_type="ext4", size=Size("2GiB"), parents=[vg], name="cached", cache_request=cache_spec)
b.create_device(dev)
# allocate the growable lvs
blivet.partitioning.grow_lvm(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.do_it()
print_devices(b)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
|
94f59e405a63ae6f08ec66a55cd40f6817b72ab6
|
dimagi/utils/excel_importer.py
|
dimagi/utils/excel_importer.py
|
from dimagi.utils.excel import WorkbookJSONReader
from soil import DownloadBase
class ExcelImporter(object):
# TODO make sure people can't instantiate ExcelImporter directly
def __init__(self, task, file_ref_id):
self.task = task
self.progress = 0
if self.task:
DownloadBase.set_progress(self.task, 0, 100)
download_ref = DownloadBase.get(file_ref_id)
self.workbook = WorkbookJSONReader(download_ref.get_filename())
def mark_complete(self):
if self.task:
DownloadBase.set_progress(self.task, 100, 100)
def add_progress(self, count=1):
self.progress += count
if self.task:
DownloadBase.set_progress(self.task, self.progress, self.total_rows)
class SingleExcelImporter(ExcelImporter):
def __init__(self, *args, **kwargs):
super(SingleExcelImporter, self).__init__(*args, **kwargs)
self.worksheet = self.workbook.worksheets[0]
self.total_rows = self.worksheet.worksheet.get_highest_row()
class MultiExcelImporter(ExcelImporter):
def __init__(self, *args, **kwargs):
super(MultiExcelImporter, self).__init__(*args, **kwargs)
self.worksheets = self.workbook.worksheets
self.total_rows = sum(ws.worksheet.get_highest_row() for ws in self.worksheets)
|
Add helper class for excel importing
|
Add helper class for excel importing
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add helper class for excel importing
|
from dimagi.utils.excel import WorkbookJSONReader
from soil import DownloadBase
class ExcelImporter(object):
# TODO make sure people can't instantiate ExcelImporter directly
def __init__(self, task, file_ref_id):
self.task = task
self.progress = 0
if self.task:
DownloadBase.set_progress(self.task, 0, 100)
download_ref = DownloadBase.get(file_ref_id)
self.workbook = WorkbookJSONReader(download_ref.get_filename())
def mark_complete(self):
if self.task:
DownloadBase.set_progress(self.task, 100, 100)
def add_progress(self, count=1):
self.progress += count
if self.task:
DownloadBase.set_progress(self.task, self.progress, self.total_rows)
class SingleExcelImporter(ExcelImporter):
def __init__(self, *args, **kwargs):
super(SingleExcelImporter, self).__init__(*args, **kwargs)
self.worksheet = self.workbook.worksheets[0]
self.total_rows = self.worksheet.worksheet.get_highest_row()
class MultiExcelImporter(ExcelImporter):
def __init__(self, *args, **kwargs):
super(MultiExcelImporter, self).__init__(*args, **kwargs)
self.worksheets = self.workbook.worksheets
self.total_rows = sum(ws.worksheet.get_highest_row() for ws in self.worksheets)
|
<commit_before><commit_msg>Add helper class for excel importing<commit_after>
|
from dimagi.utils.excel import WorkbookJSONReader
from soil import DownloadBase
class ExcelImporter(object):
# TODO make sure people can't instantiate ExcelImporter directly
def __init__(self, task, file_ref_id):
self.task = task
self.progress = 0
if self.task:
DownloadBase.set_progress(self.task, 0, 100)
download_ref = DownloadBase.get(file_ref_id)
self.workbook = WorkbookJSONReader(download_ref.get_filename())
def mark_complete(self):
if self.task:
DownloadBase.set_progress(self.task, 100, 100)
def add_progress(self, count=1):
self.progress += count
if self.task:
DownloadBase.set_progress(self.task, self.progress, self.total_rows)
class SingleExcelImporter(ExcelImporter):
def __init__(self, *args, **kwargs):
super(SingleExcelImporter, self).__init__(*args, **kwargs)
self.worksheet = self.workbook.worksheets[0]
self.total_rows = self.worksheet.worksheet.get_highest_row()
class MultiExcelImporter(ExcelImporter):
def __init__(self, *args, **kwargs):
super(MultiExcelImporter, self).__init__(*args, **kwargs)
self.worksheets = self.workbook.worksheets
self.total_rows = sum(ws.worksheet.get_highest_row() for ws in self.worksheets)
|
Add helper class for excel importingfrom dimagi.utils.excel import WorkbookJSONReader
from soil import DownloadBase
class ExcelImporter(object):
# TODO make sure people can't instantiate ExcelImporter directly
def __init__(self, task, file_ref_id):
self.task = task
self.progress = 0
if self.task:
DownloadBase.set_progress(self.task, 0, 100)
download_ref = DownloadBase.get(file_ref_id)
self.workbook = WorkbookJSONReader(download_ref.get_filename())
def mark_complete(self):
if self.task:
DownloadBase.set_progress(self.task, 100, 100)
def add_progress(self, count=1):
self.progress += count
if self.task:
DownloadBase.set_progress(self.task, self.progress, self.total_rows)
class SingleExcelImporter(ExcelImporter):
def __init__(self, *args, **kwargs):
super(SingleExcelImporter, self).__init__(*args, **kwargs)
self.worksheet = self.workbook.worksheets[0]
self.total_rows = self.worksheet.worksheet.get_highest_row()
class MultiExcelImporter(ExcelImporter):
def __init__(self, *args, **kwargs):
super(MultiExcelImporter, self).__init__(*args, **kwargs)
self.worksheets = self.workbook.worksheets
self.total_rows = sum(ws.worksheet.get_highest_row() for ws in self.worksheets)
|
<commit_before><commit_msg>Add helper class for excel importing<commit_after>from dimagi.utils.excel import WorkbookJSONReader
from soil import DownloadBase
class ExcelImporter(object):
# TODO make sure people can't instantiate ExcelImporter directly
def __init__(self, task, file_ref_id):
self.task = task
self.progress = 0
if self.task:
DownloadBase.set_progress(self.task, 0, 100)
download_ref = DownloadBase.get(file_ref_id)
self.workbook = WorkbookJSONReader(download_ref.get_filename())
def mark_complete(self):
if self.task:
DownloadBase.set_progress(self.task, 100, 100)
def add_progress(self, count=1):
self.progress += count
if self.task:
DownloadBase.set_progress(self.task, self.progress, self.total_rows)
class SingleExcelImporter(ExcelImporter):
def __init__(self, *args, **kwargs):
super(SingleExcelImporter, self).__init__(*args, **kwargs)
self.worksheet = self.workbook.worksheets[0]
self.total_rows = self.worksheet.worksheet.get_highest_row()
class MultiExcelImporter(ExcelImporter):
def __init__(self, *args, **kwargs):
super(MultiExcelImporter, self).__init__(*args, **kwargs)
self.worksheets = self.workbook.worksheets
self.total_rows = sum(ws.worksheet.get_highest_row() for ws in self.worksheets)
|
|
c620ff34384cf15e6a236db6c306bcff6c53649f
|
Lib/test/crashers/modify_dict_attr.py
|
Lib/test/crashers/modify_dict_attr.py
|
# http://python.org/sf/1303614
class Y(object):
pass
class type_with_modifiable_dict(type, Y):
pass
class MyClass(object):
"""This class has its __dict__ attribute indirectly
exposed via the __dict__ getter/setter of Y.
"""
__metaclass__ = type_with_modifiable_dict
if __name__ == '__main__':
dictattr = Y.__dict__['__dict__']
dictattr.__delete__(MyClass) # if we set tp_dict to NULL,
print MyClass # doing anything with MyClass segfaults
|
Modify the segfaulting example to show why r53997 is not a solution to it.
|
Modify the segfaulting example to show why r53997 is not a solution to
it.
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Modify the segfaulting example to show why r53997 is not a solution to
it.
|
# http://python.org/sf/1303614
class Y(object):
pass
class type_with_modifiable_dict(type, Y):
pass
class MyClass(object):
"""This class has its __dict__ attribute indirectly
exposed via the __dict__ getter/setter of Y.
"""
__metaclass__ = type_with_modifiable_dict
if __name__ == '__main__':
dictattr = Y.__dict__['__dict__']
dictattr.__delete__(MyClass) # if we set tp_dict to NULL,
print MyClass # doing anything with MyClass segfaults
|
<commit_before><commit_msg>Modify the segfaulting example to show why r53997 is not a solution to
it.<commit_after>
|
# http://python.org/sf/1303614
class Y(object):
pass
class type_with_modifiable_dict(type, Y):
pass
class MyClass(object):
"""This class has its __dict__ attribute indirectly
exposed via the __dict__ getter/setter of Y.
"""
__metaclass__ = type_with_modifiable_dict
if __name__ == '__main__':
dictattr = Y.__dict__['__dict__']
dictattr.__delete__(MyClass) # if we set tp_dict to NULL,
print MyClass # doing anything with MyClass segfaults
|
Modify the segfaulting example to show why r53997 is not a solution to
it.
# http://python.org/sf/1303614
class Y(object):
pass
class type_with_modifiable_dict(type, Y):
pass
class MyClass(object):
"""This class has its __dict__ attribute indirectly
exposed via the __dict__ getter/setter of Y.
"""
__metaclass__ = type_with_modifiable_dict
if __name__ == '__main__':
dictattr = Y.__dict__['__dict__']
dictattr.__delete__(MyClass) # if we set tp_dict to NULL,
print MyClass # doing anything with MyClass segfaults
|
<commit_before><commit_msg>Modify the segfaulting example to show why r53997 is not a solution to
it.<commit_after>
# http://python.org/sf/1303614
class Y(object):
pass
class type_with_modifiable_dict(type, Y):
pass
class MyClass(object):
"""This class has its __dict__ attribute indirectly
exposed via the __dict__ getter/setter of Y.
"""
__metaclass__ = type_with_modifiable_dict
if __name__ == '__main__':
dictattr = Y.__dict__['__dict__']
dictattr.__delete__(MyClass) # if we set tp_dict to NULL,
print MyClass # doing anything with MyClass segfaults
|
|
9921774bf7b81bc21ae31b179c9a4af72f1410b9
|
oneflow/settings/obi_1flow_io.py
|
oneflow/settings/obi_1flow_io.py
|
# -*- coding: utf-8 -*-
# Settings for obi.1flow.net (test)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
# no debug on OBI, we need to be in "real-life-mode".
# Sentry will help us catch errors, anyway.
'00_production',
'1flow_io',
'common',
'db_common',
'db_test',
'cache_common',
'cache_development',
'mail_production',
# But it's a preview/test environment, still.
'raven_test',
'common_test',
# Thus we get rosetta, and the Django Debug toolbar.
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_io` for preview/test environment.
SITE_DOMAIN = 'obi.1flow.io'
|
# -*- coding: utf-8 -*-
# Settings for obi.1flow.net (test)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
# no debug on OBI, we need to be in "real-life-mode".
# Sentry will help us catch errors, anyway.
'00_production',
'1flow_io',
'common',
'db_common',
'db_preview',
'cache_common',
'cache_development',
'mail_production',
# But it's a preview/test environment, still.
'raven_preview',
'common_preview',
# Thus we get rosetta, and the Django Debug toolbar.
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_io` for preview/test environment.
SITE_DOMAIN = 'obi.1flow.io'
|
Use *_preview settings for OBI.
|
Use *_preview settings for OBI.
|
Python
|
agpl-3.0
|
WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,1flow/1flow
|
# -*- coding: utf-8 -*-
# Settings for obi.1flow.net (test)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
# no debug on OBI, we need to be in "real-life-mode".
# Sentry will help us catch errors, anyway.
'00_production',
'1flow_io',
'common',
'db_common',
'db_test',
'cache_common',
'cache_development',
'mail_production',
# But it's a preview/test environment, still.
'raven_test',
'common_test',
# Thus we get rosetta, and the Django Debug toolbar.
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_io` for preview/test environment.
SITE_DOMAIN = 'obi.1flow.io'
Use *_preview settings for OBI.
|
# -*- coding: utf-8 -*-
# Settings for obi.1flow.net (test)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
# no debug on OBI, we need to be in "real-life-mode".
# Sentry will help us catch errors, anyway.
'00_production',
'1flow_io',
'common',
'db_common',
'db_preview',
'cache_common',
'cache_development',
'mail_production',
# But it's a preview/test environment, still.
'raven_preview',
'common_preview',
# Thus we get rosetta, and the Django Debug toolbar.
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_io` for preview/test environment.
SITE_DOMAIN = 'obi.1flow.io'
|
<commit_before># -*- coding: utf-8 -*-
# Settings for obi.1flow.net (test)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
# no debug on OBI, we need to be in "real-life-mode".
# Sentry will help us catch errors, anyway.
'00_production',
'1flow_io',
'common',
'db_common',
'db_test',
'cache_common',
'cache_development',
'mail_production',
# But it's a preview/test environment, still.
'raven_test',
'common_test',
# Thus we get rosetta, and the Django Debug toolbar.
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_io` for preview/test environment.
SITE_DOMAIN = 'obi.1flow.io'
<commit_msg>Use *_preview settings for OBI.<commit_after>
|
# -*- coding: utf-8 -*-
# Settings for obi.1flow.net (test)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
# no debug on OBI, we need to be in "real-life-mode".
# Sentry will help us catch errors, anyway.
'00_production',
'1flow_io',
'common',
'db_common',
'db_preview',
'cache_common',
'cache_development',
'mail_production',
# But it's a preview/test environment, still.
'raven_preview',
'common_preview',
# Thus we get rosetta, and the Django Debug toolbar.
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_io` for preview/test environment.
SITE_DOMAIN = 'obi.1flow.io'
|
# -*- coding: utf-8 -*-
# Settings for obi.1flow.net (test)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
# no debug on OBI, we need to be in "real-life-mode".
# Sentry will help us catch errors, anyway.
'00_production',
'1flow_io',
'common',
'db_common',
'db_test',
'cache_common',
'cache_development',
'mail_production',
# But it's a preview/test environment, still.
'raven_test',
'common_test',
# Thus we get rosetta, and the Django Debug toolbar.
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_io` for preview/test environment.
SITE_DOMAIN = 'obi.1flow.io'
Use *_preview settings for OBI.# -*- coding: utf-8 -*-
# Settings for obi.1flow.net (test)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
# no debug on OBI, we need to be in "real-life-mode".
# Sentry will help us catch errors, anyway.
'00_production',
'1flow_io',
'common',
'db_common',
'db_preview',
'cache_common',
'cache_development',
'mail_production',
# But it's a preview/test environment, still.
'raven_preview',
'common_preview',
# Thus we get rosetta, and the Django Debug toolbar.
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_io` for preview/test environment.
SITE_DOMAIN = 'obi.1flow.io'
|
<commit_before># -*- coding: utf-8 -*-
# Settings for obi.1flow.net (test)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
# no debug on OBI, we need to be in "real-life-mode".
# Sentry will help us catch errors, anyway.
'00_production',
'1flow_io',
'common',
'db_common',
'db_test',
'cache_common',
'cache_development',
'mail_production',
# But it's a preview/test environment, still.
'raven_test',
'common_test',
# Thus we get rosetta, and the Django Debug toolbar.
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_io` for preview/test environment.
SITE_DOMAIN = 'obi.1flow.io'
<commit_msg>Use *_preview settings for OBI.<commit_after># -*- coding: utf-8 -*-
# Settings for obi.1flow.net (test)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
# no debug on OBI, we need to be in "real-life-mode".
# Sentry will help us catch errors, anyway.
'00_production',
'1flow_io',
'common',
'db_common',
'db_preview',
'cache_common',
'cache_development',
'mail_production',
# But it's a preview/test environment, still.
'raven_preview',
'common_preview',
# Thus we get rosetta, and the Django Debug toolbar.
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_io` for preview/test environment.
SITE_DOMAIN = 'obi.1flow.io'
|
edb66fa3323279bb005bf7201549fd3b6278da3c
|
src/icp/pollinator/src/pollinator/reclass/write_crop_type_json.py
|
src/icp/pollinator/src/pollinator/reclass/write_crop_type_json.py
|
from __future__ import print_function
import csv
import json
import sys
def init():
with open('../data/cdl_data_grouped.csv', mode='r') as cdl_data_grouped:
reader = csv.DictReader(cdl_data_grouped)
crop_types = {}
for row in reader:
crop_group = row['Attributes']
group_id = row['group_id']
crop_types[group_id] = crop_group
with open('./cropTypes.json', mode='w') as crop_types_json:
crop_types_json.write(json.dumps(crop_types, sort_keys=True,
separators=(',', ':'), indent=4))
if __name__ == '__main__':
msg = """
Uses ../data/cdl_data_grouped.csv to create the `cropTypes.json`
that the frontend uses as a lookup.
You may want to remove any enhancement/covercrop rows that aren't part
of the actual raster.
Takes no arguments but expects to have access to CDL data at:
../data/cdl_data_grouped.csv
"""
if len(sys.argv) > 1:
print('\nUsage:', msg)
sys.exit()
init()
|
Add script to make json crop types look up
|
Add script to make json crop types look up
- Without script if there's an update to the group_ids and crop type mapping
you have to column copy paste the csv into `cropTypes.json`
|
Python
|
apache-2.0
|
project-icp/bee-pollinator-app,project-icp/bee-pollinator-app,project-icp/bee-pollinator-app,project-icp/bee-pollinator-app
|
Add script to make json crop types look up
- Without script if there's an update to the group_ids and crop type mapping
you have to column copy paste the csv into `cropTypes.json`
|
from __future__ import print_function
import csv
import json
import sys
def init():
with open('../data/cdl_data_grouped.csv', mode='r') as cdl_data_grouped:
reader = csv.DictReader(cdl_data_grouped)
crop_types = {}
for row in reader:
crop_group = row['Attributes']
group_id = row['group_id']
crop_types[group_id] = crop_group
with open('./cropTypes.json', mode='w') as crop_types_json:
crop_types_json.write(json.dumps(crop_types, sort_keys=True,
separators=(',', ':'), indent=4))
if __name__ == '__main__':
msg = """
Uses ../data/cdl_data_grouped.csv to create the `cropTypes.json`
that the frontend uses as a lookup.
You may want to remove any enhancement/covercrop rows that aren't part
of the actual raster.
Takes no arguments but expects to have access to CDL data at:
../data/cdl_data_grouped.csv
"""
if len(sys.argv) > 1:
print('\nUsage:', msg)
sys.exit()
init()
|
<commit_before><commit_msg>Add script to make json crop types look up
- Without script if there's an update to the group_ids and crop type mapping
you have to column copy paste the csv into `cropTypes.json`<commit_after>
|
from __future__ import print_function
import csv
import json
import sys
def init():
with open('../data/cdl_data_grouped.csv', mode='r') as cdl_data_grouped:
reader = csv.DictReader(cdl_data_grouped)
crop_types = {}
for row in reader:
crop_group = row['Attributes']
group_id = row['group_id']
crop_types[group_id] = crop_group
with open('./cropTypes.json', mode='w') as crop_types_json:
crop_types_json.write(json.dumps(crop_types, sort_keys=True,
separators=(',', ':'), indent=4))
if __name__ == '__main__':
msg = """
Uses ../data/cdl_data_grouped.csv to create the `cropTypes.json`
that the frontend uses as a lookup.
You may want to remove any enhancement/covercrop rows that aren't part
of the actual raster.
Takes no arguments but expects to have access to CDL data at:
../data/cdl_data_grouped.csv
"""
if len(sys.argv) > 1:
print('\nUsage:', msg)
sys.exit()
init()
|
Add script to make json crop types look up
- Without script if there's an update to the group_ids and crop type mapping
you have to column copy paste the csv into `cropTypes.json`from __future__ import print_function
import csv
import json
import sys
def init():
with open('../data/cdl_data_grouped.csv', mode='r') as cdl_data_grouped:
reader = csv.DictReader(cdl_data_grouped)
crop_types = {}
for row in reader:
crop_group = row['Attributes']
group_id = row['group_id']
crop_types[group_id] = crop_group
with open('./cropTypes.json', mode='w') as crop_types_json:
crop_types_json.write(json.dumps(crop_types, sort_keys=True,
separators=(',', ':'), indent=4))
if __name__ == '__main__':
msg = """
Uses ../data/cdl_data_grouped.csv to create the `cropTypes.json`
that the frontend uses as a lookup.
You may want to remove any enhancement/covercrop rows that aren't part
of the actual raster.
Takes no arguments but expects to have access to CDL data at:
../data/cdl_data_grouped.csv
"""
if len(sys.argv) > 1:
print('\nUsage:', msg)
sys.exit()
init()
|
<commit_before><commit_msg>Add script to make json crop types look up
- Without script if there's an update to the group_ids and crop type mapping
you have to column copy paste the csv into `cropTypes.json`<commit_after>from __future__ import print_function
import csv
import json
import sys
def init():
with open('../data/cdl_data_grouped.csv', mode='r') as cdl_data_grouped:
reader = csv.DictReader(cdl_data_grouped)
crop_types = {}
for row in reader:
crop_group = row['Attributes']
group_id = row['group_id']
crop_types[group_id] = crop_group
with open('./cropTypes.json', mode='w') as crop_types_json:
crop_types_json.write(json.dumps(crop_types, sort_keys=True,
separators=(',', ':'), indent=4))
if __name__ == '__main__':
msg = """
Uses ../data/cdl_data_grouped.csv to create the `cropTypes.json`
that the frontend uses as a lookup.
You may want to remove any enhancement/covercrop rows that aren't part
of the actual raster.
Takes no arguments but expects to have access to CDL data at:
../data/cdl_data_grouped.csv
"""
if len(sys.argv) > 1:
print('\nUsage:', msg)
sys.exit()
init()
|
|
99febc5ce4131d434ad861e2ecb4596b66a1be47
|
src/nodeconductor_paas_oracle/migrations/0002_ovm_iaas_support.py
|
src/nodeconductor_paas_oracle/migrations/0002_ovm_iaas_support.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('nodeconductor_paas_oracle', '0001_squashed_0007_change_support_requests'),
]
operations = [
migrations.AlterField(
model_name='deployment',
name='db_arch_size',
field=models.PositiveIntegerField(blank=True, help_text=b'Archive storage size in GB', null=True, validators=[django.core.validators.MinValueValidator(10), django.core.validators.MaxValueValidator(2048)]),
),
migrations.AlterField(
model_name='deployment',
name='db_charset',
field=models.CharField(blank=True, max_length=256, choices=[(b'AL32UTF8 - Unicode UTF-8 Universal Character Set', b'AL32UTF8 - Unicode UTF-8 Universal Character Set'), (b'AR8ISO8859P6 - ISO 8859-6 Latin/Arabic', b'AR8ISO8859P6 - ISO 8859-6 Latin/Arabic'), (b'AR8MSWIN1256 - MS Windows Code Page 1256 8-Bit Latin/Arabic', b'AR8MSWIN1256 - MS Windows Code Page 1256 8-Bit Latin/Arabic'), (b'Other - please specify in Addtional Data field.', b'Other - please specify in Addtional Data field.')]),
),
migrations.AlterField(
model_name='deployment',
name='db_template',
field=models.CharField(blank=True, max_length=256, choices=[(b'General Purpose', b'General Purpose'), (b'Data Warehouse', b'Data Warehouse')]),
),
migrations.AlterField(
model_name='deployment',
name='db_type',
field=models.PositiveSmallIntegerField(choices=[(1, b'RAC'), (2, b'Single Instance/ASM'), (3, b'Single Instance'), (4, b'No database')]),
),
migrations.AlterField(
model_name='deployment',
name='db_version',
field=models.CharField(blank=True, max_length=256, choices=[(b'11.2.0.4', b'11.2.0.4'), (b'12.1.0.2', b'12.1.0.2')]),
),
]
|
Support simplified requests for non-DB deployments
|
Support simplified requests for non-DB deployments
|
Python
|
mit
|
opennode/nodeconductor-paas-oracle
|
Support simplified requests for non-DB deployments
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('nodeconductor_paas_oracle', '0001_squashed_0007_change_support_requests'),
]
operations = [
migrations.AlterField(
model_name='deployment',
name='db_arch_size',
field=models.PositiveIntegerField(blank=True, help_text=b'Archive storage size in GB', null=True, validators=[django.core.validators.MinValueValidator(10), django.core.validators.MaxValueValidator(2048)]),
),
migrations.AlterField(
model_name='deployment',
name='db_charset',
field=models.CharField(blank=True, max_length=256, choices=[(b'AL32UTF8 - Unicode UTF-8 Universal Character Set', b'AL32UTF8 - Unicode UTF-8 Universal Character Set'), (b'AR8ISO8859P6 - ISO 8859-6 Latin/Arabic', b'AR8ISO8859P6 - ISO 8859-6 Latin/Arabic'), (b'AR8MSWIN1256 - MS Windows Code Page 1256 8-Bit Latin/Arabic', b'AR8MSWIN1256 - MS Windows Code Page 1256 8-Bit Latin/Arabic'), (b'Other - please specify in Addtional Data field.', b'Other - please specify in Addtional Data field.')]),
),
migrations.AlterField(
model_name='deployment',
name='db_template',
field=models.CharField(blank=True, max_length=256, choices=[(b'General Purpose', b'General Purpose'), (b'Data Warehouse', b'Data Warehouse')]),
),
migrations.AlterField(
model_name='deployment',
name='db_type',
field=models.PositiveSmallIntegerField(choices=[(1, b'RAC'), (2, b'Single Instance/ASM'), (3, b'Single Instance'), (4, b'No database')]),
),
migrations.AlterField(
model_name='deployment',
name='db_version',
field=models.CharField(blank=True, max_length=256, choices=[(b'11.2.0.4', b'11.2.0.4'), (b'12.1.0.2', b'12.1.0.2')]),
),
]
|
<commit_before><commit_msg>Support simplified requests for non-DB deployments<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('nodeconductor_paas_oracle', '0001_squashed_0007_change_support_requests'),
]
operations = [
migrations.AlterField(
model_name='deployment',
name='db_arch_size',
field=models.PositiveIntegerField(blank=True, help_text=b'Archive storage size in GB', null=True, validators=[django.core.validators.MinValueValidator(10), django.core.validators.MaxValueValidator(2048)]),
),
migrations.AlterField(
model_name='deployment',
name='db_charset',
field=models.CharField(blank=True, max_length=256, choices=[(b'AL32UTF8 - Unicode UTF-8 Universal Character Set', b'AL32UTF8 - Unicode UTF-8 Universal Character Set'), (b'AR8ISO8859P6 - ISO 8859-6 Latin/Arabic', b'AR8ISO8859P6 - ISO 8859-6 Latin/Arabic'), (b'AR8MSWIN1256 - MS Windows Code Page 1256 8-Bit Latin/Arabic', b'AR8MSWIN1256 - MS Windows Code Page 1256 8-Bit Latin/Arabic'), (b'Other - please specify in Addtional Data field.', b'Other - please specify in Addtional Data field.')]),
),
migrations.AlterField(
model_name='deployment',
name='db_template',
field=models.CharField(blank=True, max_length=256, choices=[(b'General Purpose', b'General Purpose'), (b'Data Warehouse', b'Data Warehouse')]),
),
migrations.AlterField(
model_name='deployment',
name='db_type',
field=models.PositiveSmallIntegerField(choices=[(1, b'RAC'), (2, b'Single Instance/ASM'), (3, b'Single Instance'), (4, b'No database')]),
),
migrations.AlterField(
model_name='deployment',
name='db_version',
field=models.CharField(blank=True, max_length=256, choices=[(b'11.2.0.4', b'11.2.0.4'), (b'12.1.0.2', b'12.1.0.2')]),
),
]
|
Support simplified requests for non-DB deployments# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('nodeconductor_paas_oracle', '0001_squashed_0007_change_support_requests'),
]
operations = [
migrations.AlterField(
model_name='deployment',
name='db_arch_size',
field=models.PositiveIntegerField(blank=True, help_text=b'Archive storage size in GB', null=True, validators=[django.core.validators.MinValueValidator(10), django.core.validators.MaxValueValidator(2048)]),
),
migrations.AlterField(
model_name='deployment',
name='db_charset',
field=models.CharField(blank=True, max_length=256, choices=[(b'AL32UTF8 - Unicode UTF-8 Universal Character Set', b'AL32UTF8 - Unicode UTF-8 Universal Character Set'), (b'AR8ISO8859P6 - ISO 8859-6 Latin/Arabic', b'AR8ISO8859P6 - ISO 8859-6 Latin/Arabic'), (b'AR8MSWIN1256 - MS Windows Code Page 1256 8-Bit Latin/Arabic', b'AR8MSWIN1256 - MS Windows Code Page 1256 8-Bit Latin/Arabic'), (b'Other - please specify in Addtional Data field.', b'Other - please specify in Addtional Data field.')]),
),
migrations.AlterField(
model_name='deployment',
name='db_template',
field=models.CharField(blank=True, max_length=256, choices=[(b'General Purpose', b'General Purpose'), (b'Data Warehouse', b'Data Warehouse')]),
),
migrations.AlterField(
model_name='deployment',
name='db_type',
field=models.PositiveSmallIntegerField(choices=[(1, b'RAC'), (2, b'Single Instance/ASM'), (3, b'Single Instance'), (4, b'No database')]),
),
migrations.AlterField(
model_name='deployment',
name='db_version',
field=models.CharField(blank=True, max_length=256, choices=[(b'11.2.0.4', b'11.2.0.4'), (b'12.1.0.2', b'12.1.0.2')]),
),
]
|
<commit_before><commit_msg>Support simplified requests for non-DB deployments<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('nodeconductor_paas_oracle', '0001_squashed_0007_change_support_requests'),
]
operations = [
migrations.AlterField(
model_name='deployment',
name='db_arch_size',
field=models.PositiveIntegerField(blank=True, help_text=b'Archive storage size in GB', null=True, validators=[django.core.validators.MinValueValidator(10), django.core.validators.MaxValueValidator(2048)]),
),
migrations.AlterField(
model_name='deployment',
name='db_charset',
field=models.CharField(blank=True, max_length=256, choices=[(b'AL32UTF8 - Unicode UTF-8 Universal Character Set', b'AL32UTF8 - Unicode UTF-8 Universal Character Set'), (b'AR8ISO8859P6 - ISO 8859-6 Latin/Arabic', b'AR8ISO8859P6 - ISO 8859-6 Latin/Arabic'), (b'AR8MSWIN1256 - MS Windows Code Page 1256 8-Bit Latin/Arabic', b'AR8MSWIN1256 - MS Windows Code Page 1256 8-Bit Latin/Arabic'), (b'Other - please specify in Addtional Data field.', b'Other - please specify in Addtional Data field.')]),
),
migrations.AlterField(
model_name='deployment',
name='db_template',
field=models.CharField(blank=True, max_length=256, choices=[(b'General Purpose', b'General Purpose'), (b'Data Warehouse', b'Data Warehouse')]),
),
migrations.AlterField(
model_name='deployment',
name='db_type',
field=models.PositiveSmallIntegerField(choices=[(1, b'RAC'), (2, b'Single Instance/ASM'), (3, b'Single Instance'), (4, b'No database')]),
),
migrations.AlterField(
model_name='deployment',
name='db_version',
field=models.CharField(blank=True, max_length=256, choices=[(b'11.2.0.4', b'11.2.0.4'), (b'12.1.0.2', b'12.1.0.2')]),
),
]
|
|
8773a5072cf968999ed3b7f3a826c513513aefbf
|
taiga/users/migrations/0018_remove_vote_issues_in_roles_permissions_field.py
|
taiga/users/migrations/0018_remove_vote_issues_in_roles_permissions_field.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-04 09:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0017_auto_20160208_1751'),
]
operations = [
migrations.RunSQL(
"UPDATE users_role SET permissions = ARRAY_REMOVE(permissions, 'vote_issues')"
),
]
|
Clean user role permissions: remove vote_issues in Role.permissions
|
[Backport] Clean user role permissions: remove vote_issues in Role.permissions
|
Python
|
agpl-3.0
|
xdevelsistemas/taiga-back-community,taigaio/taiga-back,dayatz/taiga-back,taigaio/taiga-back,dayatz/taiga-back,taigaio/taiga-back,Rademade/taiga-back,Rademade/taiga-back,Rademade/taiga-back,dayatz/taiga-back,xdevelsistemas/taiga-back-community,Rademade/taiga-back,xdevelsistemas/taiga-back-community,Rademade/taiga-back
|
[Backport] Clean user role permissions: remove vote_issues in Role.permissions
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-04 09:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0017_auto_20160208_1751'),
]
operations = [
migrations.RunSQL(
"UPDATE users_role SET permissions = ARRAY_REMOVE(permissions, 'vote_issues')"
),
]
|
<commit_before><commit_msg>[Backport] Clean user role permissions: remove vote_issues in Role.permissions<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-04 09:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0017_auto_20160208_1751'),
]
operations = [
migrations.RunSQL(
"UPDATE users_role SET permissions = ARRAY_REMOVE(permissions, 'vote_issues')"
),
]
|
[Backport] Clean user role permissions: remove vote_issues in Role.permissions# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-04 09:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0017_auto_20160208_1751'),
]
operations = [
migrations.RunSQL(
"UPDATE users_role SET permissions = ARRAY_REMOVE(permissions, 'vote_issues')"
),
]
|
<commit_before><commit_msg>[Backport] Clean user role permissions: remove vote_issues in Role.permissions<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-04 09:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0017_auto_20160208_1751'),
]
operations = [
migrations.RunSQL(
"UPDATE users_role SET permissions = ARRAY_REMOVE(permissions, 'vote_issues')"
),
]
|
|
e15869a34daf577c409a773358c82a7448d03033
|
src/examples/tutorial/example_noveltycurve.py
|
src/examples/tutorial/example_noveltycurve.py
|
import sys
import essentia.standard as es
from essentia import *
import numpy
import pylab
try:
input_file = sys.argv[1]
except:
print "usage:", sys.argv[0], "<input_file>"
sys.exit()
frameSize = 2048
hopSize = 128
weight = 'hybrid'
print "Frame size:", frameSize
print "Hop size:", hopSize
print "weight:", weight
audio = es.MonoLoader(filename=input_file)()
w = es.Windowing(type='hann')
s = es.Spectrum()
freq_bands = es.FrequencyBands()
bands_energies = []
for frame in es.FrameGenerator(audio, frameSize=frameSize, hopSize=hopSize):
bands_energies.append(freq_bands(s(w(frame))))
novelty = es.NoveltyCurve(frameRate=44100./hopSize, weightCurveType=weight)(numpy.array(bands_energies))
bpm, candidates, magnitudes, tempogram, _, ticks, ticks_strength, sinusoid = es.BpmHistogram(frameRate=44100./hopSize)(novelty)
print "BPM =", bpm
#pylab.plot(novelty)
#pylab.show()
pylab.matshow(tempogram.transpose(), origin='lower', aspect='auto')
pylab.show()
|
Add python example for NoveltyCurve use
|
Add python example for NoveltyCurve use
|
Python
|
agpl-3.0
|
MTG/essentia,carthach/essentia,MTG/essentia,MTG/essentia,MTG/essentia,carthach/essentia,MTG/essentia,carthach/essentia,carthach/essentia,carthach/essentia
|
Add python example for NoveltyCurve use
|
import sys
import essentia.standard as es
from essentia import *
import numpy
import pylab
try:
input_file = sys.argv[1]
except:
print "usage:", sys.argv[0], "<input_file>"
sys.exit()
frameSize = 2048
hopSize = 128
weight = 'hybrid'
print "Frame size:", frameSize
print "Hop size:", hopSize
print "weight:", weight
audio = es.MonoLoader(filename=input_file)()
w = es.Windowing(type='hann')
s = es.Spectrum()
freq_bands = es.FrequencyBands()
bands_energies = []
for frame in es.FrameGenerator(audio, frameSize=frameSize, hopSize=hopSize):
bands_energies.append(freq_bands(s(w(frame))))
novelty = es.NoveltyCurve(frameRate=44100./hopSize, weightCurveType=weight)(numpy.array(bands_energies))
bpm, candidates, magnitudes, tempogram, _, ticks, ticks_strength, sinusoid = es.BpmHistogram(frameRate=44100./hopSize)(novelty)
print "BPM =", bpm
#pylab.plot(novelty)
#pylab.show()
pylab.matshow(tempogram.transpose(), origin='lower', aspect='auto')
pylab.show()
|
<commit_before><commit_msg>Add python example for NoveltyCurve use<commit_after>
|
import sys
import essentia.standard as es
from essentia import *
import numpy
import pylab
try:
input_file = sys.argv[1]
except:
print "usage:", sys.argv[0], "<input_file>"
sys.exit()
frameSize = 2048
hopSize = 128
weight = 'hybrid'
print "Frame size:", frameSize
print "Hop size:", hopSize
print "weight:", weight
audio = es.MonoLoader(filename=input_file)()
w = es.Windowing(type='hann')
s = es.Spectrum()
freq_bands = es.FrequencyBands()
bands_energies = []
for frame in es.FrameGenerator(audio, frameSize=frameSize, hopSize=hopSize):
bands_energies.append(freq_bands(s(w(frame))))
novelty = es.NoveltyCurve(frameRate=44100./hopSize, weightCurveType=weight)(numpy.array(bands_energies))
bpm, candidates, magnitudes, tempogram, _, ticks, ticks_strength, sinusoid = es.BpmHistogram(frameRate=44100./hopSize)(novelty)
print "BPM =", bpm
#pylab.plot(novelty)
#pylab.show()
pylab.matshow(tempogram.transpose(), origin='lower', aspect='auto')
pylab.show()
|
Add python example for NoveltyCurve useimport sys
import essentia.standard as es
from essentia import *
import numpy
import pylab
try:
input_file = sys.argv[1]
except:
print "usage:", sys.argv[0], "<input_file>"
sys.exit()
frameSize = 2048
hopSize = 128
weight = 'hybrid'
print "Frame size:", frameSize
print "Hop size:", hopSize
print "weight:", weight
audio = es.MonoLoader(filename=input_file)()
w = es.Windowing(type='hann')
s = es.Spectrum()
freq_bands = es.FrequencyBands()
bands_energies = []
for frame in es.FrameGenerator(audio, frameSize=frameSize, hopSize=hopSize):
bands_energies.append(freq_bands(s(w(frame))))
novelty = es.NoveltyCurve(frameRate=44100./hopSize, weightCurveType=weight)(numpy.array(bands_energies))
bpm, candidates, magnitudes, tempogram, _, ticks, ticks_strength, sinusoid = es.BpmHistogram(frameRate=44100./hopSize)(novelty)
print "BPM =", bpm
#pylab.plot(novelty)
#pylab.show()
pylab.matshow(tempogram.transpose(), origin='lower', aspect='auto')
pylab.show()
|
<commit_before><commit_msg>Add python example for NoveltyCurve use<commit_after>import sys
import essentia.standard as es
from essentia import *
import numpy
import pylab
try:
input_file = sys.argv[1]
except:
print "usage:", sys.argv[0], "<input_file>"
sys.exit()
frameSize = 2048
hopSize = 128
weight = 'hybrid'
print "Frame size:", frameSize
print "Hop size:", hopSize
print "weight:", weight
audio = es.MonoLoader(filename=input_file)()
w = es.Windowing(type='hann')
s = es.Spectrum()
freq_bands = es.FrequencyBands()
bands_energies = []
for frame in es.FrameGenerator(audio, frameSize=frameSize, hopSize=hopSize):
bands_energies.append(freq_bands(s(w(frame))))
novelty = es.NoveltyCurve(frameRate=44100./hopSize, weightCurveType=weight)(numpy.array(bands_energies))
bpm, candidates, magnitudes, tempogram, _, ticks, ticks_strength, sinusoid = es.BpmHistogram(frameRate=44100./hopSize)(novelty)
print "BPM =", bpm
#pylab.plot(novelty)
#pylab.show()
pylab.matshow(tempogram.transpose(), origin='lower', aspect='auto')
pylab.show()
|
|
dfe116ebf8520cdb8d575b9c9bda9c93cdc3a54c
|
scripts/migration/migrate_date_modified_for_existing_nodes.py
|
scripts/migration/migrate_date_modified_for_existing_nodes.py
|
"""
This will add a date_modified field to all nodes. Date_modified will be set equal to the date of the last log.
"""
import sys
import logging
from website import models
from website.app import init_app
logger = logging.getLogger(__name__)
def main():
init_app(routes=False)
dry_run = 'dry' in sys.argv
logger.warn('Date_modified field will be added to all nodes.')
if dry_run:
logger.warn('Dry_run mode')
for node in models.Node.find():
logger.info('Node {0} "date_modified" added'.format(node._id))
if not dry_run:
node.date_modified = node.date_updated()
node.save()
if __name__ == '__main__':
main()
|
Add migration for date_modified field.
|
Add migration for date_modified field.
|
Python
|
apache-2.0
|
jnayak1/osf.io,rdhyee/osf.io,Johnetordoff/osf.io,doublebits/osf.io,Ghalko/osf.io,alexschiller/osf.io,pattisdr/osf.io,saradbowman/osf.io,caseyrollins/osf.io,RomanZWang/osf.io,KAsante95/osf.io,amyshi188/osf.io,RomanZWang/osf.io,aaxelb/osf.io,kwierman/osf.io,sloria/osf.io,asanfilippo7/osf.io,icereval/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,chrisseto/osf.io,doublebits/osf.io,leb2dg/osf.io,sloria/osf.io,erinspace/osf.io,DanielSBrown/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,binoculars/osf.io,TomHeatwole/osf.io,billyhunt/osf.io,SSJohns/osf.io,DanielSBrown/osf.io,aaxelb/osf.io,mluke93/osf.io,RomanZWang/osf.io,Ghalko/osf.io,GageGaskins/osf.io,emetsger/osf.io,billyhunt/osf.io,TomBaxter/osf.io,mluo613/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,cwisecarver/osf.io,doublebits/osf.io,cwisecarver/osf.io,brandonPurvis/osf.io,monikagrabowska/osf.io,GageGaskins/osf.io,cslzchen/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,abought/osf.io,emetsger/osf.io,chrisseto/osf.io,kch8qx/osf.io,samchrisinger/osf.io,billyhunt/osf.io,billyhunt/osf.io,felliott/osf.io,GageGaskins/osf.io,samchrisinger/osf.io,amyshi188/osf.io,adlius/osf.io,leb2dg/osf.io,caneruguz/osf.io,mluo613/osf.io,mfraezz/osf.io,mattclark/osf.io,felliott/osf.io,GageGaskins/osf.io,acshi/osf.io,zachjanicki/osf.io,mluke93/osf.io,TomHeatwole/osf.io,chrisseto/osf.io,emetsger/osf.io,SSJohns/osf.io,alexschiller/osf.io,kwierman/osf.io,zamattiac/osf.io,aaxelb/osf.io,caseyrollins/osf.io,zachjanicki/osf.io,chrisseto/osf.io,adlius/osf.io,kch8qx/osf.io,cslzchen/osf.io,jnayak1/osf.io,GageGaskins/osf.io,abought/osf.io,monikagrabowska/osf.io,wearpants/osf.io,adlius/osf.io,acshi/osf.io,crcresearch/osf.io,jnayak1/osf.io,DanielSBrown/osf.io,rdhyee/osf.io,KAsante95/osf.io,emetsger/osf.io,brandonPurvis/osf.io,baylee-d/osf.io,monikagrabowska/osf.io,SSJohns/osf.io,CenterForOpenScience/osf.io,kwierman/osf.io,KAsante95/osf.io,asanfilippo7/osf.io,KAsante95/osf.io,caneruguz/osf.io,binoculars/osf.io,cwisecarver/osf.io,hmoco/osf.io,icereval/osf.io,crcresearch/osf.io,KAsante95/osf.io,mluo613/osf.io,brandonPurvis/osf.io,amyshi188/osf.io,wearpants/osf.io,Nesiehr/osf.io,Ghalko/osf.io,TomHeatwole/osf.io,mfraezz/osf.io,mfraezz/osf.io,erinspace/osf.io,CenterForOpenScience/osf.io,binoculars/osf.io,abought/osf.io,chennan47/osf.io,hmoco/osf.io,DanielSBrown/osf.io,rdhyee/osf.io,abought/osf.io,kwierman/osf.io,monikagrabowska/osf.io,amyshi188/osf.io,crcresearch/osf.io,felliott/osf.io,cslzchen/osf.io,TomHeatwole/osf.io,wearpants/osf.io,laurenrevere/osf.io,icereval/osf.io,alexschiller/osf.io,saradbowman/osf.io,chennan47/osf.io,mluke93/osf.io,mluke93/osf.io,zachjanicki/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,acshi/osf.io,rdhyee/osf.io,Nesiehr/osf.io,alexschiller/osf.io,doublebits/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,leb2dg/osf.io,mluo613/osf.io,RomanZWang/osf.io,acshi/osf.io,mattclark/osf.io,caneruguz/osf.io,mattclark/osf.io,brandonPurvis/osf.io,Johnetordoff/osf.io,hmoco/osf.io,sloria/osf.io,billyhunt/osf.io,laurenrevere/osf.io,baylee-d/osf.io,caneruguz/osf.io,TomBaxter/osf.io,Nesiehr/osf.io,adlius/osf.io,TomBaxter/osf.io,caseyrollins/osf.io,asanfilippo7/osf.io,samchrisinger/osf.io,baylee-d/osf.io,Nesiehr/osf.io,pattisdr/osf.io,mfraezz/osf.io,zamattiac/osf.io,zachjanicki/osf.io,chennan47/osf.io,jnayak1/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,erinspace/osf.io,hmoco/osf.io,Johnetordoff/osf.io,zamattiac/osf.io,kch8qx/osf.io,mluo613/osf.io,acshi/osf.io,monikagrabowska/osf.io,kch8qx/osf.io,doublebits/osf.io,alexschiller/osf.io,wearpants/osf.io,SSJohns/osf.io,zamattiac/osf.io,asanfilippo7/osf.io,RomanZWang/osf.io,laurenrevere/osf.io,samchrisinger/osf.io,brianjgeiger/osf.io,kch8qx/osf.io,brandonPurvis/osf.io,Ghalko/osf.io
|
Add migration for date_modified field.
|
"""
This will add a date_modified field to all nodes. Date_modified will be set equal to the date of the last log.
"""
import sys
import logging
from website import models
from website.app import init_app
logger = logging.getLogger(__name__)
def main():
init_app(routes=False)
dry_run = 'dry' in sys.argv
logger.warn('Date_modified field will be added to all nodes.')
if dry_run:
logger.warn('Dry_run mode')
for node in models.Node.find():
logger.info('Node {0} "date_modified" added'.format(node._id))
if not dry_run:
node.date_modified = node.date_updated()
node.save()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add migration for date_modified field.<commit_after>
|
"""
This will add a date_modified field to all nodes. Date_modified will be set equal to the date of the last log.
"""
import sys
import logging
from website import models
from website.app import init_app
logger = logging.getLogger(__name__)
def main():
init_app(routes=False)
dry_run = 'dry' in sys.argv
logger.warn('Date_modified field will be added to all nodes.')
if dry_run:
logger.warn('Dry_run mode')
for node in models.Node.find():
logger.info('Node {0} "date_modified" added'.format(node._id))
if not dry_run:
node.date_modified = node.date_updated()
node.save()
if __name__ == '__main__':
main()
|
Add migration for date_modified field."""
This will add a date_modified field to all nodes. Date_modified will be set equal to the date of the last log.
"""
import sys
import logging
from website import models
from website.app import init_app
logger = logging.getLogger(__name__)
def main():
init_app(routes=False)
dry_run = 'dry' in sys.argv
logger.warn('Date_modified field will be added to all nodes.')
if dry_run:
logger.warn('Dry_run mode')
for node in models.Node.find():
logger.info('Node {0} "date_modified" added'.format(node._id))
if not dry_run:
node.date_modified = node.date_updated()
node.save()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add migration for date_modified field.<commit_after>"""
This will add a date_modified field to all nodes. Date_modified will be set equal to the date of the last log.
"""
import sys
import logging
from website import models
from website.app import init_app
logger = logging.getLogger(__name__)
def main():
init_app(routes=False)
dry_run = 'dry' in sys.argv
logger.warn('Date_modified field will be added to all nodes.')
if dry_run:
logger.warn('Dry_run mode')
for node in models.Node.find():
logger.info('Node {0} "date_modified" added'.format(node._id))
if not dry_run:
node.date_modified = node.date_updated()
node.save()
if __name__ == '__main__':
main()
|
|
8d0fc0e317f6176764051eddd3d16237b65789cc
|
tests/api/test_roles.py
|
tests/api/test_roles.py
|
# -*- coding: utf-8 -*-
"""pytest Roles API wrapper tests and fixtures."""
import pytest
import ciscosparkapi
# Helper Functions
def get_list_of_roles(api, max=None):
return api.roles.list(max=max)
def get_role_by_id(api, roleId):
return api.roles.get(roleId)
def is_valid_role(obj):
return isinstance(obj, ciscosparkapi.Role) and obj.id is not None
def are_valid_roles(iterable):
return all([is_valid_role(obj) for obj in iterable])
# pytest Fixtures
@pytest.fixture(scope="session")
def roles_list(api):
return list(get_list_of_roles(api))
@pytest.fixture(scope="session")
def roles_dict(roles_list):
return {role.name: role for role in roles_list}
# Tests
class TestRolesAPI(object):
"""Test RolesAPI methods."""
def test_list_roles(self, roles_list):
assert are_valid_roles(roles_list)
def test_list_roles_with_paging(self, api):
paging_generator = get_list_of_roles(api, max=1)
roles = list(paging_generator)
assert roles > 1
assert are_valid_roles(roles)
def test_get_role_by_id(self, api, roles_list):
assert len(roles_list) >= 1
role_id = roles_list[0].id
role = get_role_by_id(api, roleId=role_id)
assert is_valid_role(role)
|
Add tests and fixtures for the Roles API wrapper
|
Add tests and fixtures for the Roles API wrapper
|
Python
|
mit
|
jbogarin/ciscosparkapi
|
Add tests and fixtures for the Roles API wrapper
|
# -*- coding: utf-8 -*-
"""pytest Roles API wrapper tests and fixtures."""
import pytest
import ciscosparkapi
# Helper Functions
def get_list_of_roles(api, max=None):
return api.roles.list(max=max)
def get_role_by_id(api, roleId):
return api.roles.get(roleId)
def is_valid_role(obj):
return isinstance(obj, ciscosparkapi.Role) and obj.id is not None
def are_valid_roles(iterable):
return all([is_valid_role(obj) for obj in iterable])
# pytest Fixtures
@pytest.fixture(scope="session")
def roles_list(api):
return list(get_list_of_roles(api))
@pytest.fixture(scope="session")
def roles_dict(roles_list):
return {role.name: role for role in roles_list}
# Tests
class TestRolesAPI(object):
"""Test RolesAPI methods."""
def test_list_roles(self, roles_list):
assert are_valid_roles(roles_list)
def test_list_roles_with_paging(self, api):
paging_generator = get_list_of_roles(api, max=1)
roles = list(paging_generator)
assert roles > 1
assert are_valid_roles(roles)
def test_get_role_by_id(self, api, roles_list):
assert len(roles_list) >= 1
role_id = roles_list[0].id
role = get_role_by_id(api, roleId=role_id)
assert is_valid_role(role)
|
<commit_before><commit_msg>Add tests and fixtures for the Roles API wrapper<commit_after>
|
# -*- coding: utf-8 -*-
"""pytest Roles API wrapper tests and fixtures."""
import pytest
import ciscosparkapi
# Helper Functions
def get_list_of_roles(api, max=None):
return api.roles.list(max=max)
def get_role_by_id(api, roleId):
return api.roles.get(roleId)
def is_valid_role(obj):
return isinstance(obj, ciscosparkapi.Role) and obj.id is not None
def are_valid_roles(iterable):
return all([is_valid_role(obj) for obj in iterable])
# pytest Fixtures
@pytest.fixture(scope="session")
def roles_list(api):
return list(get_list_of_roles(api))
@pytest.fixture(scope="session")
def roles_dict(roles_list):
return {role.name: role for role in roles_list}
# Tests
class TestRolesAPI(object):
"""Test RolesAPI methods."""
def test_list_roles(self, roles_list):
assert are_valid_roles(roles_list)
def test_list_roles_with_paging(self, api):
paging_generator = get_list_of_roles(api, max=1)
roles = list(paging_generator)
assert roles > 1
assert are_valid_roles(roles)
def test_get_role_by_id(self, api, roles_list):
assert len(roles_list) >= 1
role_id = roles_list[0].id
role = get_role_by_id(api, roleId=role_id)
assert is_valid_role(role)
|
Add tests and fixtures for the Roles API wrapper# -*- coding: utf-8 -*-
"""pytest Roles API wrapper tests and fixtures."""
import pytest
import ciscosparkapi
# Helper Functions
def get_list_of_roles(api, max=None):
return api.roles.list(max=max)
def get_role_by_id(api, roleId):
return api.roles.get(roleId)
def is_valid_role(obj):
return isinstance(obj, ciscosparkapi.Role) and obj.id is not None
def are_valid_roles(iterable):
return all([is_valid_role(obj) for obj in iterable])
# pytest Fixtures
@pytest.fixture(scope="session")
def roles_list(api):
return list(get_list_of_roles(api))
@pytest.fixture(scope="session")
def roles_dict(roles_list):
return {role.name: role for role in roles_list}
# Tests
class TestRolesAPI(object):
"""Test RolesAPI methods."""
def test_list_roles(self, roles_list):
assert are_valid_roles(roles_list)
def test_list_roles_with_paging(self, api):
paging_generator = get_list_of_roles(api, max=1)
roles = list(paging_generator)
assert roles > 1
assert are_valid_roles(roles)
def test_get_role_by_id(self, api, roles_list):
assert len(roles_list) >= 1
role_id = roles_list[0].id
role = get_role_by_id(api, roleId=role_id)
assert is_valid_role(role)
|
<commit_before><commit_msg>Add tests and fixtures for the Roles API wrapper<commit_after># -*- coding: utf-8 -*-
"""pytest Roles API wrapper tests and fixtures."""
import pytest
import ciscosparkapi
# Helper Functions
def get_list_of_roles(api, max=None):
return api.roles.list(max=max)
def get_role_by_id(api, roleId):
return api.roles.get(roleId)
def is_valid_role(obj):
return isinstance(obj, ciscosparkapi.Role) and obj.id is not None
def are_valid_roles(iterable):
return all([is_valid_role(obj) for obj in iterable])
# pytest Fixtures
@pytest.fixture(scope="session")
def roles_list(api):
return list(get_list_of_roles(api))
@pytest.fixture(scope="session")
def roles_dict(roles_list):
return {role.name: role for role in roles_list}
# Tests
class TestRolesAPI(object):
"""Test RolesAPI methods."""
def test_list_roles(self, roles_list):
assert are_valid_roles(roles_list)
def test_list_roles_with_paging(self, api):
paging_generator = get_list_of_roles(api, max=1)
roles = list(paging_generator)
assert roles > 1
assert are_valid_roles(roles)
def test_get_role_by_id(self, api, roles_list):
assert len(roles_list) >= 1
role_id = roles_list[0].id
role = get_role_by_id(api, roleId=role_id)
assert is_valid_role(role)
|
|
8fcc475f1891cf760ae3535cfbe0eb49bf367497
|
whimbrel-client-core/python2_3/test/port_forward.py
|
whimbrel-client-core/python2_3/test/port_forward.py
|
import socket
import sys
import threading
import time
SERVER_THREADS = []
def start_server(local_port, remote_host, remote_port, out_file=None):
t = threading.Thread(target=server, kwargs={
"local_port": local_port,
"remote_host": remote_host,
"remote_port": remote_port,
"out_file": out_file
})
SERVER_THREADS.append(t)
t.start()
t.join()
SERVER_THREADS.remove(t)
def server(local_port=None, remote_host=None, remote_port=None, out_file=None):
try:
dock_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dock_socket.bind(('', local_port))
dock_socket.listen(5)
while True:
client_socket = dock_socket.accept()[0]
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.connect((remote_host, remote_port))
t1 = threading.Thread(target=forward, args=(
client_socket,
server_socket,
"[{0}->{1}:{2}] ".format(local_port, remote_host, remote_port),
out_file))
t2 = threading.Thread(target=forward, args=(
server_socket,
client_socket,
"[{1}:{2}->{0}] ".format(local_port, remote_host, remote_port),
out_file))
t1.start()
t2.start()
finally:
print("Terminated server thread {0} -> {1}:{2}".format(local_port, remote_host, remote_port))
def forward(source, destination, prefix, out_file):
string = ' '
while string:
string = source.recv(1024)
if string:
if out_file is None:
for line in string.splitlines():
print(prefix + str(line))
else:
with open(out_file, "w") as f:
for line in string.splitlines():
f.write(prefix + str(line) + "\n")
destination.sendall(string)
else:
try:
source.shutdown(socket.SHUT_RD)
except OSError:
pass
try:
destination.shutdown(socket.SHUT_WR)
except OSError:
pass
if __name__ == '__main__':
if len(sys.argv) < 4:
print("Usage: {0} local_port remote_host remote_port (out_file)".format(sys.argv[0]))
sys.exit(1)
(local_port, remote_host, remote_port) = sys.argv[1:4]
out_file = None
if len(sys.argv) > 4:
out_file = sys.argv[4]
try:
server(int(local_port), remote_host, int(remote_port), out_file)
except KeyboardInterrupt:
print("Ctrl-C pressed")
|
Test for Amazon requests via a port forwarder.
|
Test for Amazon requests via a port forwarder.
|
Python
|
apache-2.0
|
groboclown/whimbrel,groboclown/whimbrel,groboclown/whimbrel,groboclown/whimbrel
|
Test for Amazon requests via a port forwarder.
|
import socket
import sys
import threading
import time
SERVER_THREADS = []
def start_server(local_port, remote_host, remote_port, out_file=None):
t = threading.Thread(target=server, kwargs={
"local_port": local_port,
"remote_host": remote_host,
"remote_port": remote_port,
"out_file": out_file
})
SERVER_THREADS.append(t)
t.start()
t.join()
SERVER_THREADS.remove(t)
def server(local_port=None, remote_host=None, remote_port=None, out_file=None):
try:
dock_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dock_socket.bind(('', local_port))
dock_socket.listen(5)
while True:
client_socket = dock_socket.accept()[0]
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.connect((remote_host, remote_port))
t1 = threading.Thread(target=forward, args=(
client_socket,
server_socket,
"[{0}->{1}:{2}] ".format(local_port, remote_host, remote_port),
out_file))
t2 = threading.Thread(target=forward, args=(
server_socket,
client_socket,
"[{1}:{2}->{0}] ".format(local_port, remote_host, remote_port),
out_file))
t1.start()
t2.start()
finally:
print("Terminated server thread {0} -> {1}:{2}".format(local_port, remote_host, remote_port))
def forward(source, destination, prefix, out_file):
string = ' '
while string:
string = source.recv(1024)
if string:
if out_file is None:
for line in string.splitlines():
print(prefix + str(line))
else:
with open(out_file, "w") as f:
for line in string.splitlines():
f.write(prefix + str(line) + "\n")
destination.sendall(string)
else:
try:
source.shutdown(socket.SHUT_RD)
except OSError:
pass
try:
destination.shutdown(socket.SHUT_WR)
except OSError:
pass
if __name__ == '__main__':
if len(sys.argv) < 4:
print("Usage: {0} local_port remote_host remote_port (out_file)".format(sys.argv[0]))
sys.exit(1)
(local_port, remote_host, remote_port) = sys.argv[1:4]
out_file = None
if len(sys.argv) > 4:
out_file = sys.argv[4]
try:
server(int(local_port), remote_host, int(remote_port), out_file)
except KeyboardInterrupt:
print("Ctrl-C pressed")
|
<commit_before><commit_msg>Test for Amazon requests via a port forwarder.<commit_after>
|
import socket
import sys
import threading
import time
SERVER_THREADS = []
def start_server(local_port, remote_host, remote_port, out_file=None):
t = threading.Thread(target=server, kwargs={
"local_port": local_port,
"remote_host": remote_host,
"remote_port": remote_port,
"out_file": out_file
})
SERVER_THREADS.append(t)
t.start()
t.join()
SERVER_THREADS.remove(t)
def server(local_port=None, remote_host=None, remote_port=None, out_file=None):
try:
dock_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dock_socket.bind(('', local_port))
dock_socket.listen(5)
while True:
client_socket = dock_socket.accept()[0]
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.connect((remote_host, remote_port))
t1 = threading.Thread(target=forward, args=(
client_socket,
server_socket,
"[{0}->{1}:{2}] ".format(local_port, remote_host, remote_port),
out_file))
t2 = threading.Thread(target=forward, args=(
server_socket,
client_socket,
"[{1}:{2}->{0}] ".format(local_port, remote_host, remote_port),
out_file))
t1.start()
t2.start()
finally:
print("Terminated server thread {0} -> {1}:{2}".format(local_port, remote_host, remote_port))
def forward(source, destination, prefix, out_file):
string = ' '
while string:
string = source.recv(1024)
if string:
if out_file is None:
for line in string.splitlines():
print(prefix + str(line))
else:
with open(out_file, "w") as f:
for line in string.splitlines():
f.write(prefix + str(line) + "\n")
destination.sendall(string)
else:
try:
source.shutdown(socket.SHUT_RD)
except OSError:
pass
try:
destination.shutdown(socket.SHUT_WR)
except OSError:
pass
if __name__ == '__main__':
if len(sys.argv) < 4:
print("Usage: {0} local_port remote_host remote_port (out_file)".format(sys.argv[0]))
sys.exit(1)
(local_port, remote_host, remote_port) = sys.argv[1:4]
out_file = None
if len(sys.argv) > 4:
out_file = sys.argv[4]
try:
server(int(local_port), remote_host, int(remote_port), out_file)
except KeyboardInterrupt:
print("Ctrl-C pressed")
|
Test for Amazon requests via a port forwarder.import socket
import sys
import threading
import time
SERVER_THREADS = []
def start_server(local_port, remote_host, remote_port, out_file=None):
t = threading.Thread(target=server, kwargs={
"local_port": local_port,
"remote_host": remote_host,
"remote_port": remote_port,
"out_file": out_file
})
SERVER_THREADS.append(t)
t.start()
t.join()
SERVER_THREADS.remove(t)
def server(local_port=None, remote_host=None, remote_port=None, out_file=None):
try:
dock_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dock_socket.bind(('', local_port))
dock_socket.listen(5)
while True:
client_socket = dock_socket.accept()[0]
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.connect((remote_host, remote_port))
t1 = threading.Thread(target=forward, args=(
client_socket,
server_socket,
"[{0}->{1}:{2}] ".format(local_port, remote_host, remote_port),
out_file))
t2 = threading.Thread(target=forward, args=(
server_socket,
client_socket,
"[{1}:{2}->{0}] ".format(local_port, remote_host, remote_port),
out_file))
t1.start()
t2.start()
finally:
print("Terminated server thread {0} -> {1}:{2}".format(local_port, remote_host, remote_port))
def forward(source, destination, prefix, out_file):
string = ' '
while string:
string = source.recv(1024)
if string:
if out_file is None:
for line in string.splitlines():
print(prefix + str(line))
else:
with open(out_file, "w") as f:
for line in string.splitlines():
f.write(prefix + str(line) + "\n")
destination.sendall(string)
else:
try:
source.shutdown(socket.SHUT_RD)
except OSError:
pass
try:
destination.shutdown(socket.SHUT_WR)
except OSError:
pass
if __name__ == '__main__':
if len(sys.argv) < 4:
print("Usage: {0} local_port remote_host remote_port (out_file)".format(sys.argv[0]))
sys.exit(1)
(local_port, remote_host, remote_port) = sys.argv[1:4]
out_file = None
if len(sys.argv) > 4:
out_file = sys.argv[4]
try:
server(int(local_port), remote_host, int(remote_port), out_file)
except KeyboardInterrupt:
print("Ctrl-C pressed")
|
<commit_before><commit_msg>Test for Amazon requests via a port forwarder.<commit_after>import socket
import sys
import threading
import time
SERVER_THREADS = []
def start_server(local_port, remote_host, remote_port, out_file=None):
t = threading.Thread(target=server, kwargs={
"local_port": local_port,
"remote_host": remote_host,
"remote_port": remote_port,
"out_file": out_file
})
SERVER_THREADS.append(t)
t.start()
t.join()
SERVER_THREADS.remove(t)
def server(local_port=None, remote_host=None, remote_port=None, out_file=None):
try:
dock_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
dock_socket.bind(('', local_port))
dock_socket.listen(5)
while True:
client_socket = dock_socket.accept()[0]
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.connect((remote_host, remote_port))
t1 = threading.Thread(target=forward, args=(
client_socket,
server_socket,
"[{0}->{1}:{2}] ".format(local_port, remote_host, remote_port),
out_file))
t2 = threading.Thread(target=forward, args=(
server_socket,
client_socket,
"[{1}:{2}->{0}] ".format(local_port, remote_host, remote_port),
out_file))
t1.start()
t2.start()
finally:
print("Terminated server thread {0} -> {1}:{2}".format(local_port, remote_host, remote_port))
def forward(source, destination, prefix, out_file):
string = ' '
while string:
string = source.recv(1024)
if string:
if out_file is None:
for line in string.splitlines():
print(prefix + str(line))
else:
with open(out_file, "w") as f:
for line in string.splitlines():
f.write(prefix + str(line) + "\n")
destination.sendall(string)
else:
try:
source.shutdown(socket.SHUT_RD)
except OSError:
pass
try:
destination.shutdown(socket.SHUT_WR)
except OSError:
pass
if __name__ == '__main__':
if len(sys.argv) < 4:
print("Usage: {0} local_port remote_host remote_port (out_file)".format(sys.argv[0]))
sys.exit(1)
(local_port, remote_host, remote_port) = sys.argv[1:4]
out_file = None
if len(sys.argv) > 4:
out_file = sys.argv[4]
try:
server(int(local_port), remote_host, int(remote_port), out_file)
except KeyboardInterrupt:
print("Ctrl-C pressed")
|
|
a64f9c780000785f4dc9a08109b91ec9b2dfefc4
|
tests/projects/test_models.py
|
tests/projects/test_models.py
|
import pytest
from frigg.projects.models import EnvironmentVariable, Project
@pytest.fixture
def project():
return Project(owner='frigg', name='frigg-hq')
def test_environment_variable__str__(project):
variable = EnvironmentVariable(project=project, key='PYPI_PASSWORD')
assert str(variable) == 'frigg / frigg-hq - PYPI_PASSWORD'
|
Add test for str in EnvironmentVariable
|
test: Add test for str in EnvironmentVariable
|
Python
|
mit
|
frigg/frigg-hq,frigg/frigg-hq,frigg/frigg-hq
|
test: Add test for str in EnvironmentVariable
|
import pytest
from frigg.projects.models import EnvironmentVariable, Project
@pytest.fixture
def project():
return Project(owner='frigg', name='frigg-hq')
def test_environment_variable__str__(project):
variable = EnvironmentVariable(project=project, key='PYPI_PASSWORD')
assert str(variable) == 'frigg / frigg-hq - PYPI_PASSWORD'
|
<commit_before><commit_msg>test: Add test for str in EnvironmentVariable<commit_after>
|
import pytest
from frigg.projects.models import EnvironmentVariable, Project
@pytest.fixture
def project():
return Project(owner='frigg', name='frigg-hq')
def test_environment_variable__str__(project):
variable = EnvironmentVariable(project=project, key='PYPI_PASSWORD')
assert str(variable) == 'frigg / frigg-hq - PYPI_PASSWORD'
|
test: Add test for str in EnvironmentVariableimport pytest
from frigg.projects.models import EnvironmentVariable, Project
@pytest.fixture
def project():
return Project(owner='frigg', name='frigg-hq')
def test_environment_variable__str__(project):
variable = EnvironmentVariable(project=project, key='PYPI_PASSWORD')
assert str(variable) == 'frigg / frigg-hq - PYPI_PASSWORD'
|
<commit_before><commit_msg>test: Add test for str in EnvironmentVariable<commit_after>import pytest
from frigg.projects.models import EnvironmentVariable, Project
@pytest.fixture
def project():
return Project(owner='frigg', name='frigg-hq')
def test_environment_variable__str__(project):
variable = EnvironmentVariable(project=project, key='PYPI_PASSWORD')
assert str(variable) == 'frigg / frigg-hq - PYPI_PASSWORD'
|
|
90ae4c1bf85128df46cc13fa30f9b2198006282d
|
utilities/unicode_to_ascii.py
|
utilities/unicode_to_ascii.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unicode to ASCII Utility
========================
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import codecs
import os
import unicodedata
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['SUBSTITUTIONS', 'unicode_to_ascii']
SUBSTITUTIONS = {
'–': '-',
'“': '"',
'”': '"',
'‘': "'",
'’': "'",
}
def unicode_to_ascii(root_directory):
"""
Recursively converts from unicode to ASCII *.py*, *.bib* and *.rst* files
in given directory.
Parameters
----------
root_directory : unicode
Directory to convert the files from unicode to ASCII.
"""
for root, dirnames, filenames in os.walk(root_directory):
for filename in filenames:
if (not filename.endswith('.py') and
not filename.endswith('.bib') and
not filename.endswith('.rst') and
filename != 'unicode_to_ascii.py'):
continue
filename = os.path.join(root, filename)
with codecs.open(filename, encoding='utf8') as file_handle:
content = file_handle.read()
with codecs.open(filename, 'w', encoding='utf8') as file_handle:
for key, value in SUBSTITUTIONS.items():
content = content.replace(key, value)
content = unicodedata.normalize('NFD', content).encode(
'ascii', 'ignore')
file_handle.write(content)
if __name__ == '__main__':
unicode_to_ascii(os.path.join('..', 'colour'))
|
Convert unicode characters to ASCII using Canonical Decomposition (NFD).
|
Convert unicode characters to ASCII using Canonical Decomposition (NFD).
|
Python
|
bsd-3-clause
|
colour-science/colour-demosaicing
|
Convert unicode characters to ASCII using Canonical Decomposition (NFD).
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unicode to ASCII Utility
========================
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import codecs
import os
import unicodedata
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['SUBSTITUTIONS', 'unicode_to_ascii']
SUBSTITUTIONS = {
'–': '-',
'“': '"',
'”': '"',
'‘': "'",
'’': "'",
}
def unicode_to_ascii(root_directory):
"""
Recursively converts from unicode to ASCII *.py*, *.bib* and *.rst* files
in given directory.
Parameters
----------
root_directory : unicode
Directory to convert the files from unicode to ASCII.
"""
for root, dirnames, filenames in os.walk(root_directory):
for filename in filenames:
if (not filename.endswith('.py') and
not filename.endswith('.bib') and
not filename.endswith('.rst') and
filename != 'unicode_to_ascii.py'):
continue
filename = os.path.join(root, filename)
with codecs.open(filename, encoding='utf8') as file_handle:
content = file_handle.read()
with codecs.open(filename, 'w', encoding='utf8') as file_handle:
for key, value in SUBSTITUTIONS.items():
content = content.replace(key, value)
content = unicodedata.normalize('NFD', content).encode(
'ascii', 'ignore')
file_handle.write(content)
if __name__ == '__main__':
unicode_to_ascii(os.path.join('..', 'colour'))
|
<commit_before><commit_msg>Convert unicode characters to ASCII using Canonical Decomposition (NFD).<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unicode to ASCII Utility
========================
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import codecs
import os
import unicodedata
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['SUBSTITUTIONS', 'unicode_to_ascii']
SUBSTITUTIONS = {
'–': '-',
'“': '"',
'”': '"',
'‘': "'",
'’': "'",
}
def unicode_to_ascii(root_directory):
"""
Recursively converts from unicode to ASCII *.py*, *.bib* and *.rst* files
in given directory.
Parameters
----------
root_directory : unicode
Directory to convert the files from unicode to ASCII.
"""
for root, dirnames, filenames in os.walk(root_directory):
for filename in filenames:
if (not filename.endswith('.py') and
not filename.endswith('.bib') and
not filename.endswith('.rst') and
filename != 'unicode_to_ascii.py'):
continue
filename = os.path.join(root, filename)
with codecs.open(filename, encoding='utf8') as file_handle:
content = file_handle.read()
with codecs.open(filename, 'w', encoding='utf8') as file_handle:
for key, value in SUBSTITUTIONS.items():
content = content.replace(key, value)
content = unicodedata.normalize('NFD', content).encode(
'ascii', 'ignore')
file_handle.write(content)
if __name__ == '__main__':
unicode_to_ascii(os.path.join('..', 'colour'))
|
Convert unicode characters to ASCII using Canonical Decomposition (NFD).#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unicode to ASCII Utility
========================
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import codecs
import os
import unicodedata
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['SUBSTITUTIONS', 'unicode_to_ascii']
SUBSTITUTIONS = {
'–': '-',
'“': '"',
'”': '"',
'‘': "'",
'’': "'",
}
def unicode_to_ascii(root_directory):
"""
Recursively converts from unicode to ASCII *.py*, *.bib* and *.rst* files
in given directory.
Parameters
----------
root_directory : unicode
Directory to convert the files from unicode to ASCII.
"""
for root, dirnames, filenames in os.walk(root_directory):
for filename in filenames:
if (not filename.endswith('.py') and
not filename.endswith('.bib') and
not filename.endswith('.rst') and
filename != 'unicode_to_ascii.py'):
continue
filename = os.path.join(root, filename)
with codecs.open(filename, encoding='utf8') as file_handle:
content = file_handle.read()
with codecs.open(filename, 'w', encoding='utf8') as file_handle:
for key, value in SUBSTITUTIONS.items():
content = content.replace(key, value)
content = unicodedata.normalize('NFD', content).encode(
'ascii', 'ignore')
file_handle.write(content)
if __name__ == '__main__':
unicode_to_ascii(os.path.join('..', 'colour'))
|
<commit_before><commit_msg>Convert unicode characters to ASCII using Canonical Decomposition (NFD).<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unicode to ASCII Utility
========================
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import codecs
import os
import unicodedata
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['SUBSTITUTIONS', 'unicode_to_ascii']
SUBSTITUTIONS = {
'–': '-',
'“': '"',
'”': '"',
'‘': "'",
'’': "'",
}
def unicode_to_ascii(root_directory):
"""
Recursively converts from unicode to ASCII *.py*, *.bib* and *.rst* files
in given directory.
Parameters
----------
root_directory : unicode
Directory to convert the files from unicode to ASCII.
"""
for root, dirnames, filenames in os.walk(root_directory):
for filename in filenames:
if (not filename.endswith('.py') and
not filename.endswith('.bib') and
not filename.endswith('.rst') and
filename != 'unicode_to_ascii.py'):
continue
filename = os.path.join(root, filename)
with codecs.open(filename, encoding='utf8') as file_handle:
content = file_handle.read()
with codecs.open(filename, 'w', encoding='utf8') as file_handle:
for key, value in SUBSTITUTIONS.items():
content = content.replace(key, value)
content = unicodedata.normalize('NFD', content).encode(
'ascii', 'ignore')
file_handle.write(content)
if __name__ == '__main__':
unicode_to_ascii(os.path.join('..', 'colour'))
|
|
5700368356c0d3209cd7a60a67c471a936d645f9
|
gitignore-update.py
|
gitignore-update.py
|
# encoding: utf-8
import os
import sys
from sh import git, pwd, sh
from workflow import Workflow, ICON_SYNC, web
def main(wf):
return_value = 0
if not repo_exists():
return_value = clone_repo()
else:
return_value = pull_repo()
if return_value:
print "ERROR. Templates could not be downloaded."
else:
print "Templates have been successfully updated."
def clone_repo():
return_value = 0
try:
return_value = git.clone("https://github.com/github/gitignore.git")
except:
return_value = -1
return return_value
def pull_repo():
return_value = 0
try:
os.chdir("./gitignore")
return_code = git.pull()
except:
return_value = -1
return return_value
def repo_exists():
return os.path.isdir("./gitignore")
if __name__ == u"__main__":
wf = Workflow()
sys.exit(wf.run(main))
|
Add script that downloads templates
|
Add script that downloads templates
A script has been added to download the templates from GitHub. The
templates are checked out as a Git repository, making it really easy to
download changes later on.
If the repository does not exist yet, "git clone" is executed to create
a local copy of the it. If it exists, "git pull" is invoked to get the
latest version of the templates.
|
Python
|
mit
|
jdno/alfred2-gitignore
|
Add script that downloads templates
A script has been added to download the templates from GitHub. The
templates are checked out as a Git repository, making it really easy to
download changes later on.
If the repository does not exist yet, "git clone" is executed to create
a local copy of the it. If it exists, "git pull" is invoked to get the
latest version of the templates.
|
# encoding: utf-8
import os
import sys
from sh import git, pwd, sh
from workflow import Workflow, ICON_SYNC, web
def main(wf):
return_value = 0
if not repo_exists():
return_value = clone_repo()
else:
return_value = pull_repo()
if return_value:
print "ERROR. Templates could not be downloaded."
else:
print "Templates have been successfully updated."
def clone_repo():
return_value = 0
try:
return_value = git.clone("https://github.com/github/gitignore.git")
except:
return_value = -1
return return_value
def pull_repo():
return_value = 0
try:
os.chdir("./gitignore")
return_code = git.pull()
except:
return_value = -1
return return_value
def repo_exists():
return os.path.isdir("./gitignore")
if __name__ == u"__main__":
wf = Workflow()
sys.exit(wf.run(main))
|
<commit_before><commit_msg>Add script that downloads templates
A script has been added to download the templates from GitHub. The
templates are checked out as a Git repository, making it really easy to
download changes later on.
If the repository does not exist yet, "git clone" is executed to create
a local copy of the it. If it exists, "git pull" is invoked to get the
latest version of the templates.<commit_after>
|
# encoding: utf-8
import os
import sys
from sh import git, pwd, sh
from workflow import Workflow, ICON_SYNC, web
def main(wf):
return_value = 0
if not repo_exists():
return_value = clone_repo()
else:
return_value = pull_repo()
if return_value:
print "ERROR. Templates could not be downloaded."
else:
print "Templates have been successfully updated."
def clone_repo():
return_value = 0
try:
return_value = git.clone("https://github.com/github/gitignore.git")
except:
return_value = -1
return return_value
def pull_repo():
return_value = 0
try:
os.chdir("./gitignore")
return_code = git.pull()
except:
return_value = -1
return return_value
def repo_exists():
return os.path.isdir("./gitignore")
if __name__ == u"__main__":
wf = Workflow()
sys.exit(wf.run(main))
|
Add script that downloads templates
A script has been added to download the templates from GitHub. The
templates are checked out as a Git repository, making it really easy to
download changes later on.
If the repository does not exist yet, "git clone" is executed to create
a local copy of the it. If it exists, "git pull" is invoked to get the
latest version of the templates.# encoding: utf-8
import os
import sys
from sh import git, pwd, sh
from workflow import Workflow, ICON_SYNC, web
def main(wf):
return_value = 0
if not repo_exists():
return_value = clone_repo()
else:
return_value = pull_repo()
if return_value:
print "ERROR. Templates could not be downloaded."
else:
print "Templates have been successfully updated."
def clone_repo():
return_value = 0
try:
return_value = git.clone("https://github.com/github/gitignore.git")
except:
return_value = -1
return return_value
def pull_repo():
return_value = 0
try:
os.chdir("./gitignore")
return_code = git.pull()
except:
return_value = -1
return return_value
def repo_exists():
return os.path.isdir("./gitignore")
if __name__ == u"__main__":
wf = Workflow()
sys.exit(wf.run(main))
|
<commit_before><commit_msg>Add script that downloads templates
A script has been added to download the templates from GitHub. The
templates are checked out as a Git repository, making it really easy to
download changes later on.
If the repository does not exist yet, "git clone" is executed to create
a local copy of the it. If it exists, "git pull" is invoked to get the
latest version of the templates.<commit_after># encoding: utf-8
import os
import sys
from sh import git, pwd, sh
from workflow import Workflow, ICON_SYNC, web
def main(wf):
return_value = 0
if not repo_exists():
return_value = clone_repo()
else:
return_value = pull_repo()
if return_value:
print "ERROR. Templates could not be downloaded."
else:
print "Templates have been successfully updated."
def clone_repo():
return_value = 0
try:
return_value = git.clone("https://github.com/github/gitignore.git")
except:
return_value = -1
return return_value
def pull_repo():
return_value = 0
try:
os.chdir("./gitignore")
return_code = git.pull()
except:
return_value = -1
return return_value
def repo_exists():
return os.path.isdir("./gitignore")
if __name__ == u"__main__":
wf = Workflow()
sys.exit(wf.run(main))
|
|
a554be928f6123ad78d97c0e988bf78c2978e877
|
geotrek/infrastructure/migrations/0028_infrastructure_published_translation.py
|
geotrek/infrastructure/migrations/0028_infrastructure_published_translation.py
|
# Generated by Django 3.1.14 on 2022-03-11 15:06
from django.conf import settings
from django.db import migrations
def forward(apps, schema_editor):
with schema_editor.connection.cursor() as cursor:
cursor.execute(
f"SELECT 1 FROM information_schema.columns WHERE table_name='infrastructure_infrastructure' AND column_name='published'"
)
if cursor.fetchone():
for lang in settings.MODELTRANSLATION_LANGUAGES:
cursor.execute(
f"SELECT 1 FROM information_schema.columns WHERE table_name='infrastructure_infrastructure' AND column_name='published_{lang}'"
)
if not cursor.fetchone():
cursor.execute(
f"ALTER TABLE infrastructure_infrastructure ADD published_{lang} Boolean DEFAULT True;"
)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0027_infrastructure_accessibility'),
]
operations = [
migrations.RunPython(forward, backward),
]
|
Add migration fix published language
|
Add migration fix published language
|
Python
|
bsd-2-clause
|
makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek
|
Add migration fix published language
|
# Generated by Django 3.1.14 on 2022-03-11 15:06
from django.conf import settings
from django.db import migrations
def forward(apps, schema_editor):
with schema_editor.connection.cursor() as cursor:
cursor.execute(
f"SELECT 1 FROM information_schema.columns WHERE table_name='infrastructure_infrastructure' AND column_name='published'"
)
if cursor.fetchone():
for lang in settings.MODELTRANSLATION_LANGUAGES:
cursor.execute(
f"SELECT 1 FROM information_schema.columns WHERE table_name='infrastructure_infrastructure' AND column_name='published_{lang}'"
)
if not cursor.fetchone():
cursor.execute(
f"ALTER TABLE infrastructure_infrastructure ADD published_{lang} Boolean DEFAULT True;"
)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0027_infrastructure_accessibility'),
]
operations = [
migrations.RunPython(forward, backward),
]
|
<commit_before><commit_msg>Add migration fix published language<commit_after>
|
# Generated by Django 3.1.14 on 2022-03-11 15:06
from django.conf import settings
from django.db import migrations
def forward(apps, schema_editor):
with schema_editor.connection.cursor() as cursor:
cursor.execute(
f"SELECT 1 FROM information_schema.columns WHERE table_name='infrastructure_infrastructure' AND column_name='published'"
)
if cursor.fetchone():
for lang in settings.MODELTRANSLATION_LANGUAGES:
cursor.execute(
f"SELECT 1 FROM information_schema.columns WHERE table_name='infrastructure_infrastructure' AND column_name='published_{lang}'"
)
if not cursor.fetchone():
cursor.execute(
f"ALTER TABLE infrastructure_infrastructure ADD published_{lang} Boolean DEFAULT True;"
)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0027_infrastructure_accessibility'),
]
operations = [
migrations.RunPython(forward, backward),
]
|
Add migration fix published language# Generated by Django 3.1.14 on 2022-03-11 15:06
from django.conf import settings
from django.db import migrations
def forward(apps, schema_editor):
with schema_editor.connection.cursor() as cursor:
cursor.execute(
f"SELECT 1 FROM information_schema.columns WHERE table_name='infrastructure_infrastructure' AND column_name='published'"
)
if cursor.fetchone():
for lang in settings.MODELTRANSLATION_LANGUAGES:
cursor.execute(
f"SELECT 1 FROM information_schema.columns WHERE table_name='infrastructure_infrastructure' AND column_name='published_{lang}'"
)
if not cursor.fetchone():
cursor.execute(
f"ALTER TABLE infrastructure_infrastructure ADD published_{lang} Boolean DEFAULT True;"
)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0027_infrastructure_accessibility'),
]
operations = [
migrations.RunPython(forward, backward),
]
|
<commit_before><commit_msg>Add migration fix published language<commit_after># Generated by Django 3.1.14 on 2022-03-11 15:06
from django.conf import settings
from django.db import migrations
def forward(apps, schema_editor):
with schema_editor.connection.cursor() as cursor:
cursor.execute(
f"SELECT 1 FROM information_schema.columns WHERE table_name='infrastructure_infrastructure' AND column_name='published'"
)
if cursor.fetchone():
for lang in settings.MODELTRANSLATION_LANGUAGES:
cursor.execute(
f"SELECT 1 FROM information_schema.columns WHERE table_name='infrastructure_infrastructure' AND column_name='published_{lang}'"
)
if not cursor.fetchone():
cursor.execute(
f"ALTER TABLE infrastructure_infrastructure ADD published_{lang} Boolean DEFAULT True;"
)
def backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0027_infrastructure_accessibility'),
]
operations = [
migrations.RunPython(forward, backward),
]
|
|
33e5b3ea0567100d3145a566bd86f95ac6eb7d95
|
survey/tests/test_pre_commit_normalization.py
|
survey/tests/test_pre_commit_normalization.py
|
import subprocess
import unittest
class TestPreCommitNormalization(unittest.TestCase):
def test_normalization(self):
""" We test if the code was properly formatted with pre-commit. """
pre_commit_command = ["pre-commit", "run", "--all-files"]
try:
subprocess.check_call(pre_commit_command)
except subprocess.CalledProcessError:
msg = (
"You did not apply pre-commit hook to your code, or you did not fix all the problems. "
"We launched pre-commit during tests but there might still be some warnings or errors"
"to silence with pragma."
)
self.fail(msg)
self.assertTrue(True)
|
Test - Add a test for pre-commit compliance
|
Test - Add a test for pre-commit compliance
|
Python
|
agpl-3.0
|
Pierre-Sassoulas/django-survey,Pierre-Sassoulas/django-survey,Pierre-Sassoulas/django-survey
|
Test - Add a test for pre-commit compliance
|
import subprocess
import unittest
class TestPreCommitNormalization(unittest.TestCase):
def test_normalization(self):
""" We test if the code was properly formatted with pre-commit. """
pre_commit_command = ["pre-commit", "run", "--all-files"]
try:
subprocess.check_call(pre_commit_command)
except subprocess.CalledProcessError:
msg = (
"You did not apply pre-commit hook to your code, or you did not fix all the problems. "
"We launched pre-commit during tests but there might still be some warnings or errors"
"to silence with pragma."
)
self.fail(msg)
self.assertTrue(True)
|
<commit_before><commit_msg>Test - Add a test for pre-commit compliance<commit_after>
|
import subprocess
import unittest
class TestPreCommitNormalization(unittest.TestCase):
def test_normalization(self):
""" We test if the code was properly formatted with pre-commit. """
pre_commit_command = ["pre-commit", "run", "--all-files"]
try:
subprocess.check_call(pre_commit_command)
except subprocess.CalledProcessError:
msg = (
"You did not apply pre-commit hook to your code, or you did not fix all the problems. "
"We launched pre-commit during tests but there might still be some warnings or errors"
"to silence with pragma."
)
self.fail(msg)
self.assertTrue(True)
|
Test - Add a test for pre-commit complianceimport subprocess
import unittest
class TestPreCommitNormalization(unittest.TestCase):
def test_normalization(self):
""" We test if the code was properly formatted with pre-commit. """
pre_commit_command = ["pre-commit", "run", "--all-files"]
try:
subprocess.check_call(pre_commit_command)
except subprocess.CalledProcessError:
msg = (
"You did not apply pre-commit hook to your code, or you did not fix all the problems. "
"We launched pre-commit during tests but there might still be some warnings or errors"
"to silence with pragma."
)
self.fail(msg)
self.assertTrue(True)
|
<commit_before><commit_msg>Test - Add a test for pre-commit compliance<commit_after>import subprocess
import unittest
class TestPreCommitNormalization(unittest.TestCase):
def test_normalization(self):
""" We test if the code was properly formatted with pre-commit. """
pre_commit_command = ["pre-commit", "run", "--all-files"]
try:
subprocess.check_call(pre_commit_command)
except subprocess.CalledProcessError:
msg = (
"You did not apply pre-commit hook to your code, or you did not fix all the problems. "
"We launched pre-commit during tests but there might still be some warnings or errors"
"to silence with pragma."
)
self.fail(msg)
self.assertTrue(True)
|
|
2c4154f17af584dfa019ea9c2e71f0cc53389901
|
pombola/south_africa/management/commands/south_africa_import_bios_from_json.py
|
pombola/south_africa/management/commands/south_africa_import_bios_from_json.py
|
from warnings import warn
import json
from django.core.management.base import LabelCommand, CommandError
from pombola.core.models import Person
# Expected JSON is an array of entries:
#
# [
# {
# "name": "John Smith",
# "bio": "Blah blah"
# },
# ....
# ]
class Command(LabelCommand):
help = 'Set profiles (in summary field)'
args = '<profile JSON file>'
def handle_label(self, input_filename, **options):
input_entries = json.loads( open(input_filename, 'r').read() )
for entry in input_entries:
try:
person = Person.objects.get(legal_name=entry['name'])
except Person.DoesNotExist:
warn("Could not find person matching '%s'" % entry['name'])
continue
print "Setting summary for '%s'" % person
bio = entry['bio']
bio = bio.replace('\n', '\n\n')
person.summary = bio
person.save()
|
Add import script to read in profile json.
|
Add import script to read in profile json.
|
Python
|
agpl-3.0
|
mysociety/pombola,geoffkilpin/pombola,geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,hzj123/56th,ken-muturi/pombola,geoffkilpin/pombola,mysociety/pombola,hzj123/56th,hzj123/56th,mysociety/pombola,geoffkilpin/pombola,patricmutwiri/pombola,geoffkilpin/pombola,ken-muturi/pombola,hzj123/56th,mysociety/pombola,mysociety/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,hzj123/56th,hzj123/56th,patricmutwiri/pombola,ken-muturi/pombola,mysociety/pombola,ken-muturi/pombola,ken-muturi/pombola,patricmutwiri/pombola
|
Add import script to read in profile json.
|
from warnings import warn
import json
from django.core.management.base import LabelCommand, CommandError
from pombola.core.models import Person
# Expected JSON is an array of entries:
#
# [
# {
# "name": "John Smith",
# "bio": "Blah blah"
# },
# ....
# ]
class Command(LabelCommand):
help = 'Set profiles (in summary field)'
args = '<profile JSON file>'
def handle_label(self, input_filename, **options):
input_entries = json.loads( open(input_filename, 'r').read() )
for entry in input_entries:
try:
person = Person.objects.get(legal_name=entry['name'])
except Person.DoesNotExist:
warn("Could not find person matching '%s'" % entry['name'])
continue
print "Setting summary for '%s'" % person
bio = entry['bio']
bio = bio.replace('\n', '\n\n')
person.summary = bio
person.save()
|
<commit_before><commit_msg>Add import script to read in profile json.<commit_after>
|
from warnings import warn
import json
from django.core.management.base import LabelCommand, CommandError
from pombola.core.models import Person
# Expected JSON is an array of entries:
#
# [
# {
# "name": "John Smith",
# "bio": "Blah blah"
# },
# ....
# ]
class Command(LabelCommand):
help = 'Set profiles (in summary field)'
args = '<profile JSON file>'
def handle_label(self, input_filename, **options):
input_entries = json.loads( open(input_filename, 'r').read() )
for entry in input_entries:
try:
person = Person.objects.get(legal_name=entry['name'])
except Person.DoesNotExist:
warn("Could not find person matching '%s'" % entry['name'])
continue
print "Setting summary for '%s'" % person
bio = entry['bio']
bio = bio.replace('\n', '\n\n')
person.summary = bio
person.save()
|
Add import script to read in profile json.from warnings import warn
import json
from django.core.management.base import LabelCommand, CommandError
from pombola.core.models import Person
# Expected JSON is an array of entries:
#
# [
# {
# "name": "John Smith",
# "bio": "Blah blah"
# },
# ....
# ]
class Command(LabelCommand):
help = 'Set profiles (in summary field)'
args = '<profile JSON file>'
def handle_label(self, input_filename, **options):
input_entries = json.loads( open(input_filename, 'r').read() )
for entry in input_entries:
try:
person = Person.objects.get(legal_name=entry['name'])
except Person.DoesNotExist:
warn("Could not find person matching '%s'" % entry['name'])
continue
print "Setting summary for '%s'" % person
bio = entry['bio']
bio = bio.replace('\n', '\n\n')
person.summary = bio
person.save()
|
<commit_before><commit_msg>Add import script to read in profile json.<commit_after>from warnings import warn
import json
from django.core.management.base import LabelCommand, CommandError
from pombola.core.models import Person
# Expected JSON is an array of entries:
#
# [
# {
# "name": "John Smith",
# "bio": "Blah blah"
# },
# ....
# ]
class Command(LabelCommand):
help = 'Set profiles (in summary field)'
args = '<profile JSON file>'
def handle_label(self, input_filename, **options):
input_entries = json.loads( open(input_filename, 'r').read() )
for entry in input_entries:
try:
person = Person.objects.get(legal_name=entry['name'])
except Person.DoesNotExist:
warn("Could not find person matching '%s'" % entry['name'])
continue
print "Setting summary for '%s'" % person
bio = entry['bio']
bio = bio.replace('\n', '\n\n')
person.summary = bio
person.save()
|
|
f1fc422ef4d300fc1f4e47f26e03e762c7f7102d
|
meltingpot/python/utils/substrates/map_helpers.py
|
meltingpot/python/utils/substrates/map_helpers.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to help with parsing and procedurally generating ascii maps."""
from collections.abc import Mapping, Sequence
from typing import Any, Union
def a_or_b_with_odds(a_descriptor: Union[str, Mapping[str, Any]],
b_descriptor: Union[str, Mapping[str, Any]],
odds: Sequence[int]) -> Mapping[str, Any]:
"""Return a versus b with specified odds.
Args:
a_descriptor: One possibility. May be either a string or a dict that can
be read by the map parser.
b_descriptor: The other possibility. May be either a string or a dict that
can be read by the map parser.
odds: odds[0] is the number of outcomes where a is returned. odds[1] is
the number of outcomes where b is returned. Thus the probability of
returning a is odds[0] / sum(odds) and the probability of returning
b is odds[1] / sum(odds).
Returns:
The dict descriptor that can be used with the map parser to sample either
a or b at the specified odds.
"""
a_odds, b_odds = odds
choices = [a_descriptor] * a_odds + [b_descriptor] * b_odds
return {"type": "choice", "list": choices}
|
Add map helpers for use in future melting pot version.
|
Add map helpers for use in future melting pot version.
PiperOrigin-RevId: 485877335
Change-Id: I852247fa319f3aefb0cf82a914df674254001e3b
|
Python
|
apache-2.0
|
deepmind/meltingpot,deepmind/meltingpot
|
Add map helpers for use in future melting pot version.
PiperOrigin-RevId: 485877335
Change-Id: I852247fa319f3aefb0cf82a914df674254001e3b
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to help with parsing and procedurally generating ascii maps."""
from collections.abc import Mapping, Sequence
from typing import Any, Union
def a_or_b_with_odds(a_descriptor: Union[str, Mapping[str, Any]],
b_descriptor: Union[str, Mapping[str, Any]],
odds: Sequence[int]) -> Mapping[str, Any]:
"""Return a versus b with specified odds.
Args:
a_descriptor: One possibility. May be either a string or a dict that can
be read by the map parser.
b_descriptor: The other possibility. May be either a string or a dict that
can be read by the map parser.
odds: odds[0] is the number of outcomes where a is returned. odds[1] is
the number of outcomes where b is returned. Thus the probability of
returning a is odds[0] / sum(odds) and the probability of returning
b is odds[1] / sum(odds).
Returns:
The dict descriptor that can be used with the map parser to sample either
a or b at the specified odds.
"""
a_odds, b_odds = odds
choices = [a_descriptor] * a_odds + [b_descriptor] * b_odds
return {"type": "choice", "list": choices}
|
<commit_before><commit_msg>Add map helpers for use in future melting pot version.
PiperOrigin-RevId: 485877335
Change-Id: I852247fa319f3aefb0cf82a914df674254001e3b<commit_after>
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to help with parsing and procedurally generating ascii maps."""
from collections.abc import Mapping, Sequence
from typing import Any, Union
def a_or_b_with_odds(a_descriptor: Union[str, Mapping[str, Any]],
b_descriptor: Union[str, Mapping[str, Any]],
odds: Sequence[int]) -> Mapping[str, Any]:
"""Return a versus b with specified odds.
Args:
a_descriptor: One possibility. May be either a string or a dict that can
be read by the map parser.
b_descriptor: The other possibility. May be either a string or a dict that
can be read by the map parser.
odds: odds[0] is the number of outcomes where a is returned. odds[1] is
the number of outcomes where b is returned. Thus the probability of
returning a is odds[0] / sum(odds) and the probability of returning
b is odds[1] / sum(odds).
Returns:
The dict descriptor that can be used with the map parser to sample either
a or b at the specified odds.
"""
a_odds, b_odds = odds
choices = [a_descriptor] * a_odds + [b_descriptor] * b_odds
return {"type": "choice", "list": choices}
|
Add map helpers for use in future melting pot version.
PiperOrigin-RevId: 485877335
Change-Id: I852247fa319f3aefb0cf82a914df674254001e3b# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to help with parsing and procedurally generating ascii maps."""
from collections.abc import Mapping, Sequence
from typing import Any, Union
def a_or_b_with_odds(a_descriptor: Union[str, Mapping[str, Any]],
b_descriptor: Union[str, Mapping[str, Any]],
odds: Sequence[int]) -> Mapping[str, Any]:
"""Return a versus b with specified odds.
Args:
a_descriptor: One possibility. May be either a string or a dict that can
be read by the map parser.
b_descriptor: The other possibility. May be either a string or a dict that
can be read by the map parser.
odds: odds[0] is the number of outcomes where a is returned. odds[1] is
the number of outcomes where b is returned. Thus the probability of
returning a is odds[0] / sum(odds) and the probability of returning
b is odds[1] / sum(odds).
Returns:
The dict descriptor that can be used with the map parser to sample either
a or b at the specified odds.
"""
a_odds, b_odds = odds
choices = [a_descriptor] * a_odds + [b_descriptor] * b_odds
return {"type": "choice", "list": choices}
|
<commit_before><commit_msg>Add map helpers for use in future melting pot version.
PiperOrigin-RevId: 485877335
Change-Id: I852247fa319f3aefb0cf82a914df674254001e3b<commit_after># Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to help with parsing and procedurally generating ascii maps."""
from collections.abc import Mapping, Sequence
from typing import Any, Union
def a_or_b_with_odds(a_descriptor: Union[str, Mapping[str, Any]],
b_descriptor: Union[str, Mapping[str, Any]],
odds: Sequence[int]) -> Mapping[str, Any]:
"""Return a versus b with specified odds.
Args:
a_descriptor: One possibility. May be either a string or a dict that can
be read by the map parser.
b_descriptor: The other possibility. May be either a string or a dict that
can be read by the map parser.
odds: odds[0] is the number of outcomes where a is returned. odds[1] is
the number of outcomes where b is returned. Thus the probability of
returning a is odds[0] / sum(odds) and the probability of returning
b is odds[1] / sum(odds).
Returns:
The dict descriptor that can be used with the map parser to sample either
a or b at the specified odds.
"""
a_odds, b_odds = odds
choices = [a_descriptor] * a_odds + [b_descriptor] * b_odds
return {"type": "choice", "list": choices}
|
|
3667a7c6f79228086968ccc4785dfaebd6f1928d
|
data/file_level_aggregation_test.py
|
data/file_level_aggregation_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from data.file_level_aggregation import *
import numpy as np
class FileLevelAggregationTest(unittest.TestCase):
def test_flatten_lst(self):
mock_lst = ["[1,2,3]", "[]", "[4,5]"]
flattened_lst = []
for lst in mock_lst:
if not pd.isna(lst):
for e in eval(lst):
flattened_lst.append(e)
expected_results = [1, 2, 3, 4, 5]
self.assertEqual(flattened_lst, expected_results)
def test_remove_nan(self):
mock_lst = [1, 2, None, np.nan, 5]
results = []
for e in mock_lst:
if not pd.isna(e):
results.append(e)
expected_results = [1, 2, 5]
self.assertEqual(results, expected_results)
def test_date_calculation(self):
date_time = "2020-08-01T00:52:38"
previous_date = datetime.fromisoformat(date_time) - timedelta(days=1)
previous_date_str = previous_date.strftime("%Y-%m-%d")
self.assertEqual(previous_date_str, "2020-07-31")
def test_date_range(self):
start_date = datetime.fromisoformat("2020-07-28T00:00:00")
end_date = datetime.fromisoformat("2020-08-01T00:00:00")
date_range = pd.date_range(start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d"))\
.to_pydatetime().tolist()
expected_results = ["2020-07-28", "2020-07-29", "2020-07-30",
"2020-07-31", "2020-08-01"]
for i in range(len(date_range)):
date = date_range[i]
date_str = date.strftime("%Y-%m-%d")
self.assertEqual(date_str, expected_results[i])
if __name__ == '__main__':
unittest.main()
|
Add file level aggregation tests
|
Add file level aggregation tests
|
Python
|
apache-2.0
|
googleinterns/cl_analysis
|
Add file level aggregation tests
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from data.file_level_aggregation import *
import numpy as np
class FileLevelAggregationTest(unittest.TestCase):
def test_flatten_lst(self):
mock_lst = ["[1,2,3]", "[]", "[4,5]"]
flattened_lst = []
for lst in mock_lst:
if not pd.isna(lst):
for e in eval(lst):
flattened_lst.append(e)
expected_results = [1, 2, 3, 4, 5]
self.assertEqual(flattened_lst, expected_results)
def test_remove_nan(self):
mock_lst = [1, 2, None, np.nan, 5]
results = []
for e in mock_lst:
if not pd.isna(e):
results.append(e)
expected_results = [1, 2, 5]
self.assertEqual(results, expected_results)
def test_date_calculation(self):
date_time = "2020-08-01T00:52:38"
previous_date = datetime.fromisoformat(date_time) - timedelta(days=1)
previous_date_str = previous_date.strftime("%Y-%m-%d")
self.assertEqual(previous_date_str, "2020-07-31")
def test_date_range(self):
start_date = datetime.fromisoformat("2020-07-28T00:00:00")
end_date = datetime.fromisoformat("2020-08-01T00:00:00")
date_range = pd.date_range(start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d"))\
.to_pydatetime().tolist()
expected_results = ["2020-07-28", "2020-07-29", "2020-07-30",
"2020-07-31", "2020-08-01"]
for i in range(len(date_range)):
date = date_range[i]
date_str = date.strftime("%Y-%m-%d")
self.assertEqual(date_str, expected_results[i])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add file level aggregation tests<commit_after>
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from data.file_level_aggregation import *
import numpy as np
class FileLevelAggregationTest(unittest.TestCase):
def test_flatten_lst(self):
mock_lst = ["[1,2,3]", "[]", "[4,5]"]
flattened_lst = []
for lst in mock_lst:
if not pd.isna(lst):
for e in eval(lst):
flattened_lst.append(e)
expected_results = [1, 2, 3, 4, 5]
self.assertEqual(flattened_lst, expected_results)
def test_remove_nan(self):
mock_lst = [1, 2, None, np.nan, 5]
results = []
for e in mock_lst:
if not pd.isna(e):
results.append(e)
expected_results = [1, 2, 5]
self.assertEqual(results, expected_results)
def test_date_calculation(self):
date_time = "2020-08-01T00:52:38"
previous_date = datetime.fromisoformat(date_time) - timedelta(days=1)
previous_date_str = previous_date.strftime("%Y-%m-%d")
self.assertEqual(previous_date_str, "2020-07-31")
def test_date_range(self):
start_date = datetime.fromisoformat("2020-07-28T00:00:00")
end_date = datetime.fromisoformat("2020-08-01T00:00:00")
date_range = pd.date_range(start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d"))\
.to_pydatetime().tolist()
expected_results = ["2020-07-28", "2020-07-29", "2020-07-30",
"2020-07-31", "2020-08-01"]
for i in range(len(date_range)):
date = date_range[i]
date_str = date.strftime("%Y-%m-%d")
self.assertEqual(date_str, expected_results[i])
if __name__ == '__main__':
unittest.main()
|
Add file level aggregation tests# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from data.file_level_aggregation import *
import numpy as np
class FileLevelAggregationTest(unittest.TestCase):
def test_flatten_lst(self):
mock_lst = ["[1,2,3]", "[]", "[4,5]"]
flattened_lst = []
for lst in mock_lst:
if not pd.isna(lst):
for e in eval(lst):
flattened_lst.append(e)
expected_results = [1, 2, 3, 4, 5]
self.assertEqual(flattened_lst, expected_results)
def test_remove_nan(self):
mock_lst = [1, 2, None, np.nan, 5]
results = []
for e in mock_lst:
if not pd.isna(e):
results.append(e)
expected_results = [1, 2, 5]
self.assertEqual(results, expected_results)
def test_date_calculation(self):
date_time = "2020-08-01T00:52:38"
previous_date = datetime.fromisoformat(date_time) - timedelta(days=1)
previous_date_str = previous_date.strftime("%Y-%m-%d")
self.assertEqual(previous_date_str, "2020-07-31")
def test_date_range(self):
start_date = datetime.fromisoformat("2020-07-28T00:00:00")
end_date = datetime.fromisoformat("2020-08-01T00:00:00")
date_range = pd.date_range(start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d"))\
.to_pydatetime().tolist()
expected_results = ["2020-07-28", "2020-07-29", "2020-07-30",
"2020-07-31", "2020-08-01"]
for i in range(len(date_range)):
date = date_range[i]
date_str = date.strftime("%Y-%m-%d")
self.assertEqual(date_str, expected_results[i])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add file level aggregation tests<commit_after># Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from data.file_level_aggregation import *
import numpy as np
class FileLevelAggregationTest(unittest.TestCase):
def test_flatten_lst(self):
mock_lst = ["[1,2,3]", "[]", "[4,5]"]
flattened_lst = []
for lst in mock_lst:
if not pd.isna(lst):
for e in eval(lst):
flattened_lst.append(e)
expected_results = [1, 2, 3, 4, 5]
self.assertEqual(flattened_lst, expected_results)
def test_remove_nan(self):
mock_lst = [1, 2, None, np.nan, 5]
results = []
for e in mock_lst:
if not pd.isna(e):
results.append(e)
expected_results = [1, 2, 5]
self.assertEqual(results, expected_results)
def test_date_calculation(self):
date_time = "2020-08-01T00:52:38"
previous_date = datetime.fromisoformat(date_time) - timedelta(days=1)
previous_date_str = previous_date.strftime("%Y-%m-%d")
self.assertEqual(previous_date_str, "2020-07-31")
def test_date_range(self):
start_date = datetime.fromisoformat("2020-07-28T00:00:00")
end_date = datetime.fromisoformat("2020-08-01T00:00:00")
date_range = pd.date_range(start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d"))\
.to_pydatetime().tolist()
expected_results = ["2020-07-28", "2020-07-29", "2020-07-30",
"2020-07-31", "2020-08-01"]
for i in range(len(date_range)):
date = date_range[i]
date_str = date.strftime("%Y-%m-%d")
self.assertEqual(date_str, expected_results[i])
if __name__ == '__main__':
unittest.main()
|
|
60524d1681725a84491fd1f0de79d7557c4412ae
|
tests/data_layer/test_model_loader.py
|
tests/data_layer/test_model_loader.py
|
from pytest import raises
from smif.data_layer.model_loader import ModelLoader
def test_path_not_found():
"""Should error if module file is missing at path
"""
loader = ModelLoader()
with raises(FileNotFoundError) as ex:
loader.load({
'name': 'test',
'path': '/path/to/model.py',
'classname': 'WaterSupplySectorModel'
})
msg = "Cannot find '/path/to/model.py' for the 'test' model"
assert msg in str(ex)
|
Test FileNotFound error from ModelLoader
|
Test FileNotFound error from ModelLoader
|
Python
|
mit
|
tomalrussell/smif,willu47/smif,willu47/smif,nismod/smif,tomalrussell/smif,nismod/smif,nismod/smif,nismod/smif,tomalrussell/smif,willu47/smif,willu47/smif,tomalrussell/smif
|
Test FileNotFound error from ModelLoader
|
from pytest import raises
from smif.data_layer.model_loader import ModelLoader
def test_path_not_found():
"""Should error if module file is missing at path
"""
loader = ModelLoader()
with raises(FileNotFoundError) as ex:
loader.load({
'name': 'test',
'path': '/path/to/model.py',
'classname': 'WaterSupplySectorModel'
})
msg = "Cannot find '/path/to/model.py' for the 'test' model"
assert msg in str(ex)
|
<commit_before><commit_msg>Test FileNotFound error from ModelLoader<commit_after>
|
from pytest import raises
from smif.data_layer.model_loader import ModelLoader
def test_path_not_found():
"""Should error if module file is missing at path
"""
loader = ModelLoader()
with raises(FileNotFoundError) as ex:
loader.load({
'name': 'test',
'path': '/path/to/model.py',
'classname': 'WaterSupplySectorModel'
})
msg = "Cannot find '/path/to/model.py' for the 'test' model"
assert msg in str(ex)
|
Test FileNotFound error from ModelLoaderfrom pytest import raises
from smif.data_layer.model_loader import ModelLoader
def test_path_not_found():
"""Should error if module file is missing at path
"""
loader = ModelLoader()
with raises(FileNotFoundError) as ex:
loader.load({
'name': 'test',
'path': '/path/to/model.py',
'classname': 'WaterSupplySectorModel'
})
msg = "Cannot find '/path/to/model.py' for the 'test' model"
assert msg in str(ex)
|
<commit_before><commit_msg>Test FileNotFound error from ModelLoader<commit_after>from pytest import raises
from smif.data_layer.model_loader import ModelLoader
def test_path_not_found():
"""Should error if module file is missing at path
"""
loader = ModelLoader()
with raises(FileNotFoundError) as ex:
loader.load({
'name': 'test',
'path': '/path/to/model.py',
'classname': 'WaterSupplySectorModel'
})
msg = "Cannot find '/path/to/model.py' for the 'test' model"
assert msg in str(ex)
|
|
02d1c7a49b0813de4d2f7e7533f81d70f094175e
|
billjobs/tests/tests_user_admin_api.py
|
billjobs/tests/tests_user_admin_api.py
|
from django.test import TestCase
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APIClient, APIRequestFactory, \
force_authenticate
from billjobs.views import UserAdmin
class UserAdminAPI(TestCase):
""" Test User Admin API REST endpoint """
fixtures=['account_test.yaml']
def setUp(self):
self.client = APIClient()
self.factory = APIRequestFactory()
self.admin = User.objects.get(pk=1)
def test_admin_list_user(self):
request = self.factory.get('/billjobs/users/')
force_authenticate(request, user=self.admin)
view = UserAdmin.as_view()
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
Add test for user admin api
|
Add test for user admin api
|
Python
|
mit
|
ioO/billjobs
|
Add test for user admin api
|
from django.test import TestCase
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APIClient, APIRequestFactory, \
force_authenticate
from billjobs.views import UserAdmin
class UserAdminAPI(TestCase):
""" Test User Admin API REST endpoint """
fixtures=['account_test.yaml']
def setUp(self):
self.client = APIClient()
self.factory = APIRequestFactory()
self.admin = User.objects.get(pk=1)
def test_admin_list_user(self):
request = self.factory.get('/billjobs/users/')
force_authenticate(request, user=self.admin)
view = UserAdmin.as_view()
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
<commit_before><commit_msg>Add test for user admin api<commit_after>
|
from django.test import TestCase
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APIClient, APIRequestFactory, \
force_authenticate
from billjobs.views import UserAdmin
class UserAdminAPI(TestCase):
""" Test User Admin API REST endpoint """
fixtures=['account_test.yaml']
def setUp(self):
self.client = APIClient()
self.factory = APIRequestFactory()
self.admin = User.objects.get(pk=1)
def test_admin_list_user(self):
request = self.factory.get('/billjobs/users/')
force_authenticate(request, user=self.admin)
view = UserAdmin.as_view()
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
Add test for user admin apifrom django.test import TestCase
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APIClient, APIRequestFactory, \
force_authenticate
from billjobs.views import UserAdmin
class UserAdminAPI(TestCase):
""" Test User Admin API REST endpoint """
fixtures=['account_test.yaml']
def setUp(self):
self.client = APIClient()
self.factory = APIRequestFactory()
self.admin = User.objects.get(pk=1)
def test_admin_list_user(self):
request = self.factory.get('/billjobs/users/')
force_authenticate(request, user=self.admin)
view = UserAdmin.as_view()
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
<commit_before><commit_msg>Add test for user admin api<commit_after>from django.test import TestCase
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APIClient, APIRequestFactory, \
force_authenticate
from billjobs.views import UserAdmin
class UserAdminAPI(TestCase):
""" Test User Admin API REST endpoint """
fixtures=['account_test.yaml']
def setUp(self):
self.client = APIClient()
self.factory = APIRequestFactory()
self.admin = User.objects.get(pk=1)
def test_admin_list_user(self):
request = self.factory.get('/billjobs/users/')
force_authenticate(request, user=self.admin)
view = UserAdmin.as_view()
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
|
5711ff4b8cf17dabc5c0a35fa8ccc52f8e4a6322
|
tools/find-iam-user-for-access-key.py
|
tools/find-iam-user-for-access-key.py
|
import boto.iam
import sys
TARGET_ACCESS_KEY = sys.argv[1]
iam = boto.connect_iam()
users = iam.get_all_users('/')['list_users_response']['list_users_result']['users']
def find_key():
for user in users:
for key_result in iam.get_all_access_keys(user['user_name'])['list_access_keys_response']['list_access_keys_result']['access_key_metadata']:
aws_access_key = key_result['access_key_id']
if aws_access_key == TARGET_ACCESS_KEY:
print 'Target key belongs to:'
print 'user : ' + user['user_name']
return True
return False
if not find_key():
print 'Did not find access key (' + TARGET_ACCESS_KEY + ') in ' + str(len(users)) + ' IAM users.'
|
Add tool to find users for compromised keys on aws
|
Add tool to find users for compromised keys on aws
|
Python
|
agpl-3.0
|
cyplo/dotfiles,cyplo/dotfiles
|
Add tool to find users for compromised keys on aws
|
import boto.iam
import sys
TARGET_ACCESS_KEY = sys.argv[1]
iam = boto.connect_iam()
users = iam.get_all_users('/')['list_users_response']['list_users_result']['users']
def find_key():
for user in users:
for key_result in iam.get_all_access_keys(user['user_name'])['list_access_keys_response']['list_access_keys_result']['access_key_metadata']:
aws_access_key = key_result['access_key_id']
if aws_access_key == TARGET_ACCESS_KEY:
print 'Target key belongs to:'
print 'user : ' + user['user_name']
return True
return False
if not find_key():
print 'Did not find access key (' + TARGET_ACCESS_KEY + ') in ' + str(len(users)) + ' IAM users.'
|
<commit_before><commit_msg>Add tool to find users for compromised keys on aws<commit_after>
|
import boto.iam
import sys
TARGET_ACCESS_KEY = sys.argv[1]
iam = boto.connect_iam()
users = iam.get_all_users('/')['list_users_response']['list_users_result']['users']
def find_key():
for user in users:
for key_result in iam.get_all_access_keys(user['user_name'])['list_access_keys_response']['list_access_keys_result']['access_key_metadata']:
aws_access_key = key_result['access_key_id']
if aws_access_key == TARGET_ACCESS_KEY:
print 'Target key belongs to:'
print 'user : ' + user['user_name']
return True
return False
if not find_key():
print 'Did not find access key (' + TARGET_ACCESS_KEY + ') in ' + str(len(users)) + ' IAM users.'
|
Add tool to find users for compromised keys on awsimport boto.iam
import sys
TARGET_ACCESS_KEY = sys.argv[1]
iam = boto.connect_iam()
users = iam.get_all_users('/')['list_users_response']['list_users_result']['users']
def find_key():
for user in users:
for key_result in iam.get_all_access_keys(user['user_name'])['list_access_keys_response']['list_access_keys_result']['access_key_metadata']:
aws_access_key = key_result['access_key_id']
if aws_access_key == TARGET_ACCESS_KEY:
print 'Target key belongs to:'
print 'user : ' + user['user_name']
return True
return False
if not find_key():
print 'Did not find access key (' + TARGET_ACCESS_KEY + ') in ' + str(len(users)) + ' IAM users.'
|
<commit_before><commit_msg>Add tool to find users for compromised keys on aws<commit_after>import boto.iam
import sys
TARGET_ACCESS_KEY = sys.argv[1]
iam = boto.connect_iam()
users = iam.get_all_users('/')['list_users_response']['list_users_result']['users']
def find_key():
for user in users:
for key_result in iam.get_all_access_keys(user['user_name'])['list_access_keys_response']['list_access_keys_result']['access_key_metadata']:
aws_access_key = key_result['access_key_id']
if aws_access_key == TARGET_ACCESS_KEY:
print 'Target key belongs to:'
print 'user : ' + user['user_name']
return True
return False
if not find_key():
print 'Did not find access key (' + TARGET_ACCESS_KEY + ') in ' + str(len(users)) + ' IAM users.'
|
|
d6f6d41665f58e68833b57d8b0d04d113f2c86a9
|
ideascube/conf/idb_jor_zaatari.py
|
ideascube/conf/idb_jor_zaatari.py
|
"""Ideaxbox for Zaatari, Jordan"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SY', 'JO']
TIME_ZONE = 'Asia/Amman'
LANGUAGE_CODE = 'ar'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
|
"""Ideaxbox for Zaatari, Jordan"""
from .idb_jor_azraq import * # noqa
ENTRY_ACTIVITY_CHOICES = []
|
Make zaatari import from azraq
|
Make zaatari import from azraq
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
"""Ideaxbox for Zaatari, Jordan"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SY', 'JO']
TIME_ZONE = 'Asia/Amman'
LANGUAGE_CODE = 'ar'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
Make zaatari import from azraq
|
"""Ideaxbox for Zaatari, Jordan"""
from .idb_jor_azraq import * # noqa
ENTRY_ACTIVITY_CHOICES = []
|
<commit_before>"""Ideaxbox for Zaatari, Jordan"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SY', 'JO']
TIME_ZONE = 'Asia/Amman'
LANGUAGE_CODE = 'ar'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
<commit_msg>Make zaatari import from azraq<commit_after>
|
"""Ideaxbox for Zaatari, Jordan"""
from .idb_jor_azraq import * # noqa
ENTRY_ACTIVITY_CHOICES = []
|
"""Ideaxbox for Zaatari, Jordan"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SY', 'JO']
TIME_ZONE = 'Asia/Amman'
LANGUAGE_CODE = 'ar'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
Make zaatari import from azraq"""Ideaxbox for Zaatari, Jordan"""
from .idb_jor_azraq import * # noqa
ENTRY_ACTIVITY_CHOICES = []
|
<commit_before>"""Ideaxbox for Zaatari, Jordan"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SY', 'JO']
TIME_ZONE = 'Asia/Amman'
LANGUAGE_CODE = 'ar'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
<commit_msg>Make zaatari import from azraq<commit_after>"""Ideaxbox for Zaatari, Jordan"""
from .idb_jor_azraq import * # noqa
ENTRY_ACTIVITY_CHOICES = []
|
af5c2b786f83532f1df8f72cc5074ce998ba69cb
|
examples/mayavi/spherical_harmonics.py
|
examples/mayavi/spherical_harmonics.py
|
from enthought.mayavi import mlab
import numpy as np
from scipy.special import sph_harm
# Create a sphere
r = 0.3
pi = np.pi
cos = np.cos
sin = np.sin
phi, theta = np.mgrid[0:pi:101j, 0:2*pi:101j]
x = r*sin(phi)*cos(theta)
y = r*sin(phi)*sin(theta)
z = r*cos(phi)
mlab.figure(1, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 300))
mlab.clf()
# Represent spherical harmonics on the surface of the sphere
for n in range(1, 6):
for m in range(n):
s = sph_harm(m, n, theta, phi).real
mlab.mesh(x-m, y-n, z, scalars=s, colormap='jet')
s[s<0] *= 0.97
s /= s.max()
mlab.mesh(s*x-m, s*y-n, s*z+1.3, scalars=s, colormap='Spectral')
mlab.view(90, 70, 6.2, (-1.3, -2.9, 0.25))
mlab.draw()
|
Add a spherical harmonics example
|
Add a spherical harmonics example
|
Python
|
bsd-3-clause
|
liulion/mayavi,dmsurti/mayavi,alexandreleroux/mayavi,alexandreleroux/mayavi,dmsurti/mayavi,liulion/mayavi
|
Add a spherical harmonics example
|
from enthought.mayavi import mlab
import numpy as np
from scipy.special import sph_harm
# Create a sphere
r = 0.3
pi = np.pi
cos = np.cos
sin = np.sin
phi, theta = np.mgrid[0:pi:101j, 0:2*pi:101j]
x = r*sin(phi)*cos(theta)
y = r*sin(phi)*sin(theta)
z = r*cos(phi)
mlab.figure(1, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 300))
mlab.clf()
# Represent spherical harmonics on the surface of the sphere
for n in range(1, 6):
for m in range(n):
s = sph_harm(m, n, theta, phi).real
mlab.mesh(x-m, y-n, z, scalars=s, colormap='jet')
s[s<0] *= 0.97
s /= s.max()
mlab.mesh(s*x-m, s*y-n, s*z+1.3, scalars=s, colormap='Spectral')
mlab.view(90, 70, 6.2, (-1.3, -2.9, 0.25))
mlab.draw()
|
<commit_before><commit_msg>Add a spherical harmonics example<commit_after>
|
from enthought.mayavi import mlab
import numpy as np
from scipy.special import sph_harm
# Create a sphere
r = 0.3
pi = np.pi
cos = np.cos
sin = np.sin
phi, theta = np.mgrid[0:pi:101j, 0:2*pi:101j]
x = r*sin(phi)*cos(theta)
y = r*sin(phi)*sin(theta)
z = r*cos(phi)
mlab.figure(1, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 300))
mlab.clf()
# Represent spherical harmonics on the surface of the sphere
for n in range(1, 6):
for m in range(n):
s = sph_harm(m, n, theta, phi).real
mlab.mesh(x-m, y-n, z, scalars=s, colormap='jet')
s[s<0] *= 0.97
s /= s.max()
mlab.mesh(s*x-m, s*y-n, s*z+1.3, scalars=s, colormap='Spectral')
mlab.view(90, 70, 6.2, (-1.3, -2.9, 0.25))
mlab.draw()
|
Add a spherical harmonics example
from enthought.mayavi import mlab
import numpy as np
from scipy.special import sph_harm
# Create a sphere
r = 0.3
pi = np.pi
cos = np.cos
sin = np.sin
phi, theta = np.mgrid[0:pi:101j, 0:2*pi:101j]
x = r*sin(phi)*cos(theta)
y = r*sin(phi)*sin(theta)
z = r*cos(phi)
mlab.figure(1, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 300))
mlab.clf()
# Represent spherical harmonics on the surface of the sphere
for n in range(1, 6):
for m in range(n):
s = sph_harm(m, n, theta, phi).real
mlab.mesh(x-m, y-n, z, scalars=s, colormap='jet')
s[s<0] *= 0.97
s /= s.max()
mlab.mesh(s*x-m, s*y-n, s*z+1.3, scalars=s, colormap='Spectral')
mlab.view(90, 70, 6.2, (-1.3, -2.9, 0.25))
mlab.draw()
|
<commit_before><commit_msg>Add a spherical harmonics example<commit_after>
from enthought.mayavi import mlab
import numpy as np
from scipy.special import sph_harm
# Create a sphere
r = 0.3
pi = np.pi
cos = np.cos
sin = np.sin
phi, theta = np.mgrid[0:pi:101j, 0:2*pi:101j]
x = r*sin(phi)*cos(theta)
y = r*sin(phi)*sin(theta)
z = r*cos(phi)
mlab.figure(1, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 300))
mlab.clf()
# Represent spherical harmonics on the surface of the sphere
for n in range(1, 6):
for m in range(n):
s = sph_harm(m, n, theta, phi).real
mlab.mesh(x-m, y-n, z, scalars=s, colormap='jet')
s[s<0] *= 0.97
s /= s.max()
mlab.mesh(s*x-m, s*y-n, s*z+1.3, scalars=s, colormap='Spectral')
mlab.view(90, 70, 6.2, (-1.3, -2.9, 0.25))
mlab.draw()
|
|
df1a411c7bd818b741e05edc9d0a94187c3ea102
|
numba/tests/test_array_manipulation.py
|
numba/tests/test_array_manipulation.py
|
from __future__ import print_function
import unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def reshape_array(a, control):
return (a.reshape(3, 3) == control).all()
def flatten_array(a, control):
return (a.flatten() == control).all()
def transpose_array(a, control):
return (a.transpose() == control).all()
def convert_array(a, control):
return (a.astype(dtype='f4') == control).all()
class TestArrayManipulation(unittest.TestCase):
def test_reshape_array(self):
pyfunc = reshape_array
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9)
control = np.arange(9).reshape(3, 3)
self.assertTrue(cfunc(a, control))
def test_flatten_array(self):
pyfunc = flatten_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
control = np.arange(9).reshape(3, 3).flatten()
self.assertTrue(cfunc(a, control))
def test_transpose_array(self):
pyfunc = transpose_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
control = np.arange(9).reshape(3, 3).transpose()
self.assertTrue(cfunc(a, control))
def test_convert_array(self):
pyfunc = convert_array
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.float32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9, dtype='i4')
control = np.arange(9, dtype='f4')
self.assertTrue(cfunc(a, control))
if __name__ == '__main__':
unittest.main()
|
Add tests for numpy array manipulation
|
Add tests for numpy array manipulation
|
Python
|
bsd-2-clause
|
GaZ3ll3/numba,pombredanne/numba,jriehl/numba,pitrou/numba,pitrou/numba,GaZ3ll3/numba,pitrou/numba,sklam/numba,seibert/numba,numba/numba,sklam/numba,gdementen/numba,ssarangi/numba,sklam/numba,IntelLabs/numba,gmarkall/numba,jriehl/numba,stuartarchibald/numba,ssarangi/numba,ssarangi/numba,stonebig/numba,cpcloud/numba,ssarangi/numba,pombredanne/numba,ssarangi/numba,numba/numba,stonebig/numba,stefanseefeld/numba,cpcloud/numba,gdementen/numba,stefanseefeld/numba,IntelLabs/numba,sklam/numba,gmarkall/numba,GaZ3ll3/numba,stuartarchibald/numba,gmarkall/numba,sklam/numba,pombredanne/numba,seibert/numba,jriehl/numba,seibert/numba,IntelLabs/numba,gdementen/numba,pitrou/numba,gmarkall/numba,GaZ3ll3/numba,stonebig/numba,stefanseefeld/numba,cpcloud/numba,IntelLabs/numba,seibert/numba,stonebig/numba,pombredanne/numba,jriehl/numba,numba/numba,jriehl/numba,numba/numba,stonebig/numba,cpcloud/numba,stuartarchibald/numba,IntelLabs/numba,gdementen/numba,gmarkall/numba,GaZ3ll3/numba,stefanseefeld/numba,seibert/numba,numba/numba,cpcloud/numba,pitrou/numba,stefanseefeld/numba,gdementen/numba,pombredanne/numba,stuartarchibald/numba,stuartarchibald/numba
|
Add tests for numpy array manipulation
|
from __future__ import print_function
import unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def reshape_array(a, control):
return (a.reshape(3, 3) == control).all()
def flatten_array(a, control):
return (a.flatten() == control).all()
def transpose_array(a, control):
return (a.transpose() == control).all()
def convert_array(a, control):
return (a.astype(dtype='f4') == control).all()
class TestArrayManipulation(unittest.TestCase):
def test_reshape_array(self):
pyfunc = reshape_array
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9)
control = np.arange(9).reshape(3, 3)
self.assertTrue(cfunc(a, control))
def test_flatten_array(self):
pyfunc = flatten_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
control = np.arange(9).reshape(3, 3).flatten()
self.assertTrue(cfunc(a, control))
def test_transpose_array(self):
pyfunc = transpose_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
control = np.arange(9).reshape(3, 3).transpose()
self.assertTrue(cfunc(a, control))
def test_convert_array(self):
pyfunc = convert_array
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.float32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9, dtype='i4')
control = np.arange(9, dtype='f4')
self.assertTrue(cfunc(a, control))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for numpy array manipulation<commit_after>
|
from __future__ import print_function
import unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def reshape_array(a, control):
return (a.reshape(3, 3) == control).all()
def flatten_array(a, control):
return (a.flatten() == control).all()
def transpose_array(a, control):
return (a.transpose() == control).all()
def convert_array(a, control):
return (a.astype(dtype='f4') == control).all()
class TestArrayManipulation(unittest.TestCase):
def test_reshape_array(self):
pyfunc = reshape_array
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9)
control = np.arange(9).reshape(3, 3)
self.assertTrue(cfunc(a, control))
def test_flatten_array(self):
pyfunc = flatten_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
control = np.arange(9).reshape(3, 3).flatten()
self.assertTrue(cfunc(a, control))
def test_transpose_array(self):
pyfunc = transpose_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
control = np.arange(9).reshape(3, 3).transpose()
self.assertTrue(cfunc(a, control))
def test_convert_array(self):
pyfunc = convert_array
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.float32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9, dtype='i4')
control = np.arange(9, dtype='f4')
self.assertTrue(cfunc(a, control))
if __name__ == '__main__':
unittest.main()
|
Add tests for numpy array manipulationfrom __future__ import print_function
import unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def reshape_array(a, control):
return (a.reshape(3, 3) == control).all()
def flatten_array(a, control):
return (a.flatten() == control).all()
def transpose_array(a, control):
return (a.transpose() == control).all()
def convert_array(a, control):
return (a.astype(dtype='f4') == control).all()
class TestArrayManipulation(unittest.TestCase):
def test_reshape_array(self):
pyfunc = reshape_array
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9)
control = np.arange(9).reshape(3, 3)
self.assertTrue(cfunc(a, control))
def test_flatten_array(self):
pyfunc = flatten_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
control = np.arange(9).reshape(3, 3).flatten()
self.assertTrue(cfunc(a, control))
def test_transpose_array(self):
pyfunc = transpose_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
control = np.arange(9).reshape(3, 3).transpose()
self.assertTrue(cfunc(a, control))
def test_convert_array(self):
pyfunc = convert_array
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.float32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9, dtype='i4')
control = np.arange(9, dtype='f4')
self.assertTrue(cfunc(a, control))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for numpy array manipulation<commit_after>from __future__ import print_function
import unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def reshape_array(a, control):
return (a.reshape(3, 3) == control).all()
def flatten_array(a, control):
return (a.flatten() == control).all()
def transpose_array(a, control):
return (a.transpose() == control).all()
def convert_array(a, control):
return (a.astype(dtype='f4') == control).all()
class TestArrayManipulation(unittest.TestCase):
def test_reshape_array(self):
pyfunc = reshape_array
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9)
control = np.arange(9).reshape(3, 3)
self.assertTrue(cfunc(a, control))
def test_flatten_array(self):
pyfunc = flatten_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
control = np.arange(9).reshape(3, 3).flatten()
self.assertTrue(cfunc(a, control))
def test_transpose_array(self):
pyfunc = transpose_array
arraytype1 = types.Array(types.int32, 2, 'C')
arraytype2 = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9).reshape(3, 3)
control = np.arange(9).reshape(3, 3).transpose()
self.assertTrue(cfunc(a, control))
def test_convert_array(self):
pyfunc = convert_array
arraytype1 = types.Array(types.int32, 1, 'C')
arraytype2 = types.Array(types.float32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype1, arraytype2))
cfunc = cr.entry_point
a = np.arange(9, dtype='i4')
control = np.arange(9, dtype='f4')
self.assertTrue(cfunc(a, control))
if __name__ == '__main__':
unittest.main()
|
|
a8bae714090e6cfdc7541ea46a61b8802eba5d2b
|
numba/tests/test_create_arrays.py
|
numba/tests/test_create_arrays.py
|
from __future__ import print_function
import unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def create_array(control):
return (np.array([1,2,3]) == control).all()
def create_empty_array(control):
return (np.array([]) == control).all()
def create_arange(control):
return (np.arange(10) == control).all()
def create_empty(control):
return (np.empty(10) == control).all()
def create_ones(control):
return (np.ones(10) == control).all()
def create_zeros(control):
return (np.zeros(10) == control).all()
class TestArray(unittest.TestCase):
def test_create_arrays(self):
arraytype = types.Array(types.int32, 1, 'C')
pyfunc = create_array
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.array([1,2,3])
self.assertTrue(cfunc(control))
pyfunc = create_empty_array
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.array([])
self.assertTrue(cfunc(control))
pyfunc = create_arange
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.arange(10)
self.assertTrue(cfunc(control))
pyfunc = create_empty
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.empty(10)
self.assertTrue(cfunc(control))
pyfunc = create_ones
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.ones(10)
self.assertTrue(cfunc(control))
pyfunc = create_zeros
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.zeros(10)
self.assertTrue(cfunc(control))
if __name__ == '__main__':
unittest.main()
|
Add tests for numpy array creation
|
Add tests for numpy array creation
|
Python
|
bsd-2-clause
|
stuartarchibald/numba,sklam/numba,pombredanne/numba,gdementen/numba,gdementen/numba,seibert/numba,ssarangi/numba,sklam/numba,ssarangi/numba,gmarkall/numba,IntelLabs/numba,stuartarchibald/numba,IntelLabs/numba,seibert/numba,jriehl/numba,stonebig/numba,IntelLabs/numba,pombredanne/numba,cpcloud/numba,stonebig/numba,cpcloud/numba,numba/numba,numba/numba,gmarkall/numba,ssarangi/numba,GaZ3ll3/numba,numba/numba,seibert/numba,stonebig/numba,stefanseefeld/numba,cpcloud/numba,stefanseefeld/numba,gdementen/numba,pombredanne/numba,pombredanne/numba,GaZ3ll3/numba,GaZ3ll3/numba,GaZ3ll3/numba,stuartarchibald/numba,pitrou/numba,gmarkall/numba,cpcloud/numba,ssarangi/numba,sklam/numba,gdementen/numba,stefanseefeld/numba,stuartarchibald/numba,gdementen/numba,seibert/numba,jriehl/numba,sklam/numba,stefanseefeld/numba,gmarkall/numba,pitrou/numba,jriehl/numba,stuartarchibald/numba,sklam/numba,stonebig/numba,ssarangi/numba,pitrou/numba,IntelLabs/numba,IntelLabs/numba,jriehl/numba,pitrou/numba,stonebig/numba,seibert/numba,cpcloud/numba,jriehl/numba,numba/numba,stefanseefeld/numba,numba/numba,gmarkall/numba,GaZ3ll3/numba,pitrou/numba,pombredanne/numba
|
Add tests for numpy array creation
|
from __future__ import print_function
import unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def create_array(control):
return (np.array([1,2,3]) == control).all()
def create_empty_array(control):
return (np.array([]) == control).all()
def create_arange(control):
return (np.arange(10) == control).all()
def create_empty(control):
return (np.empty(10) == control).all()
def create_ones(control):
return (np.ones(10) == control).all()
def create_zeros(control):
return (np.zeros(10) == control).all()
class TestArray(unittest.TestCase):
def test_create_arrays(self):
arraytype = types.Array(types.int32, 1, 'C')
pyfunc = create_array
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.array([1,2,3])
self.assertTrue(cfunc(control))
pyfunc = create_empty_array
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.array([])
self.assertTrue(cfunc(control))
pyfunc = create_arange
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.arange(10)
self.assertTrue(cfunc(control))
pyfunc = create_empty
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.empty(10)
self.assertTrue(cfunc(control))
pyfunc = create_ones
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.ones(10)
self.assertTrue(cfunc(control))
pyfunc = create_zeros
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.zeros(10)
self.assertTrue(cfunc(control))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for numpy array creation<commit_after>
|
from __future__ import print_function
import unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def create_array(control):
return (np.array([1,2,3]) == control).all()
def create_empty_array(control):
return (np.array([]) == control).all()
def create_arange(control):
return (np.arange(10) == control).all()
def create_empty(control):
return (np.empty(10) == control).all()
def create_ones(control):
return (np.ones(10) == control).all()
def create_zeros(control):
return (np.zeros(10) == control).all()
class TestArray(unittest.TestCase):
def test_create_arrays(self):
arraytype = types.Array(types.int32, 1, 'C')
pyfunc = create_array
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.array([1,2,3])
self.assertTrue(cfunc(control))
pyfunc = create_empty_array
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.array([])
self.assertTrue(cfunc(control))
pyfunc = create_arange
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.arange(10)
self.assertTrue(cfunc(control))
pyfunc = create_empty
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.empty(10)
self.assertTrue(cfunc(control))
pyfunc = create_ones
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.ones(10)
self.assertTrue(cfunc(control))
pyfunc = create_zeros
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.zeros(10)
self.assertTrue(cfunc(control))
if __name__ == '__main__':
unittest.main()
|
Add tests for numpy array creationfrom __future__ import print_function
import unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def create_array(control):
return (np.array([1,2,3]) == control).all()
def create_empty_array(control):
return (np.array([]) == control).all()
def create_arange(control):
return (np.arange(10) == control).all()
def create_empty(control):
return (np.empty(10) == control).all()
def create_ones(control):
return (np.ones(10) == control).all()
def create_zeros(control):
return (np.zeros(10) == control).all()
class TestArray(unittest.TestCase):
def test_create_arrays(self):
arraytype = types.Array(types.int32, 1, 'C')
pyfunc = create_array
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.array([1,2,3])
self.assertTrue(cfunc(control))
pyfunc = create_empty_array
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.array([])
self.assertTrue(cfunc(control))
pyfunc = create_arange
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.arange(10)
self.assertTrue(cfunc(control))
pyfunc = create_empty
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.empty(10)
self.assertTrue(cfunc(control))
pyfunc = create_ones
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.ones(10)
self.assertTrue(cfunc(control))
pyfunc = create_zeros
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.zeros(10)
self.assertTrue(cfunc(control))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for numpy array creation<commit_after>from __future__ import print_function
import unittest
import numpy as np
from numba.compiler import compile_isolated, Flags
from numba import types, utils
from numba.tests import usecases
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
def create_array(control):
return (np.array([1,2,3]) == control).all()
def create_empty_array(control):
return (np.array([]) == control).all()
def create_arange(control):
return (np.arange(10) == control).all()
def create_empty(control):
return (np.empty(10) == control).all()
def create_ones(control):
return (np.ones(10) == control).all()
def create_zeros(control):
return (np.zeros(10) == control).all()
class TestArray(unittest.TestCase):
def test_create_arrays(self):
arraytype = types.Array(types.int32, 1, 'C')
pyfunc = create_array
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.array([1,2,3])
self.assertTrue(cfunc(control))
pyfunc = create_empty_array
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.array([])
self.assertTrue(cfunc(control))
pyfunc = create_arange
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.arange(10)
self.assertTrue(cfunc(control))
pyfunc = create_empty
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.empty(10)
self.assertTrue(cfunc(control))
pyfunc = create_ones
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.ones(10)
self.assertTrue(cfunc(control))
pyfunc = create_zeros
cr = compile_isolated(pyfunc, (arraytype,))
cfunc = cr.entry_point
control = np.zeros(10)
self.assertTrue(cfunc(control))
if __name__ == '__main__':
unittest.main()
|
|
8065ca1d6ff59272d23e71de3cd7c07c4bbd1a39
|
examples/call_later.py
|
examples/call_later.py
|
import random
import transaction
from inevitable.core import Reactor
def callback():
for i in xrange(10000):
answer = i ** 3
if random.random() > 0.00001:
reactor.call_later(seconds=0, callback=callback)
reactor = Reactor(clock=transaction)
reactor.call_later(seconds=0, callback=callback)
reactor.call_later(seconds=0, callback=callback)
reactor.run_until_idle()
|
Add a time-based example for the functionality that exists so far.
|
Add a time-based example for the functionality that exists so far.
|
Python
|
mit
|
Julian/Inevitable
|
Add a time-based example for the functionality that exists so far.
|
import random
import transaction
from inevitable.core import Reactor
def callback():
for i in xrange(10000):
answer = i ** 3
if random.random() > 0.00001:
reactor.call_later(seconds=0, callback=callback)
reactor = Reactor(clock=transaction)
reactor.call_later(seconds=0, callback=callback)
reactor.call_later(seconds=0, callback=callback)
reactor.run_until_idle()
|
<commit_before><commit_msg>Add a time-based example for the functionality that exists so far.<commit_after>
|
import random
import transaction
from inevitable.core import Reactor
def callback():
for i in xrange(10000):
answer = i ** 3
if random.random() > 0.00001:
reactor.call_later(seconds=0, callback=callback)
reactor = Reactor(clock=transaction)
reactor.call_later(seconds=0, callback=callback)
reactor.call_later(seconds=0, callback=callback)
reactor.run_until_idle()
|
Add a time-based example for the functionality that exists so far.import random
import transaction
from inevitable.core import Reactor
def callback():
for i in xrange(10000):
answer = i ** 3
if random.random() > 0.00001:
reactor.call_later(seconds=0, callback=callback)
reactor = Reactor(clock=transaction)
reactor.call_later(seconds=0, callback=callback)
reactor.call_later(seconds=0, callback=callback)
reactor.run_until_idle()
|
<commit_before><commit_msg>Add a time-based example for the functionality that exists so far.<commit_after>import random
import transaction
from inevitable.core import Reactor
def callback():
for i in xrange(10000):
answer = i ** 3
if random.random() > 0.00001:
reactor.call_later(seconds=0, callback=callback)
reactor = Reactor(clock=transaction)
reactor.call_later(seconds=0, callback=callback)
reactor.call_later(seconds=0, callback=callback)
reactor.run_until_idle()
|
|
5efcdd6f74b842992a6479ec5d7978d84d2544a7
|
jinja2_for_django.py
|
jinja2_for_django.py
|
"""
A Jinja2 template loader for Django 1.2+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Add the following to your settings.py file. (The comma is important!)
TEMPLATE_LOADERS = ('jinja2_for_django.Loader',)
Now use your templates as usual (with render_to_response, generic views or
anything else that uses Django templates), and they will actually be
rendered by Jinja2.
See http://exyr.org/2010/Jinja-in-Django/
Author: Simon Sapin
License: BSD
"""
from django.template.loader import BaseLoader
from django.template.loaders.app_directories import app_template_dirs
from django.template import TemplateDoesNotExist
from django.core import urlresolvers
from django.conf import settings
import jinja2
class Template(jinja2.Template):
def render(self, context):
# flatten the Django Context into a single dictionary.
context_dict = {}
for d in context.dicts:
context_dict.update(d)
return super(Template, self).render(context_dict)
class Loader(BaseLoader):
is_usable = True
env = jinja2.Environment(loader=jinja2.FileSystemLoader(app_template_dirs))
env.template_class = Template
# These are available to all templates.
env.globals['url_for'] = urlresolvers.reverse
env.globals['MEDIA_URL'] = settings.MEDIA_URL
#env.globals['STATIC_URL'] = settings.STATIC_URL
def load_template(self, template_name, template_dirs=None):
try:
template = self.env.get_template(template_name)
except jinja2.TemplateNotFound:
raise TemplateDoesNotExist(template_name)
return template, template.filename
|
Add Jinja2 template loader for Django.
|
Add Jinja2 template loader for Django.
|
Python
|
bsd-3-clause
|
SimonSapin/snippets,SimonSapin/snippets
|
Add Jinja2 template loader for Django.
|
"""
A Jinja2 template loader for Django 1.2+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Add the following to your settings.py file. (The comma is important!)
TEMPLATE_LOADERS = ('jinja2_for_django.Loader',)
Now use your templates as usual (with render_to_response, generic views or
anything else that uses Django templates), and they will actually be
rendered by Jinja2.
See http://exyr.org/2010/Jinja-in-Django/
Author: Simon Sapin
License: BSD
"""
from django.template.loader import BaseLoader
from django.template.loaders.app_directories import app_template_dirs
from django.template import TemplateDoesNotExist
from django.core import urlresolvers
from django.conf import settings
import jinja2
class Template(jinja2.Template):
def render(self, context):
# flatten the Django Context into a single dictionary.
context_dict = {}
for d in context.dicts:
context_dict.update(d)
return super(Template, self).render(context_dict)
class Loader(BaseLoader):
is_usable = True
env = jinja2.Environment(loader=jinja2.FileSystemLoader(app_template_dirs))
env.template_class = Template
# These are available to all templates.
env.globals['url_for'] = urlresolvers.reverse
env.globals['MEDIA_URL'] = settings.MEDIA_URL
#env.globals['STATIC_URL'] = settings.STATIC_URL
def load_template(self, template_name, template_dirs=None):
try:
template = self.env.get_template(template_name)
except jinja2.TemplateNotFound:
raise TemplateDoesNotExist(template_name)
return template, template.filename
|
<commit_before><commit_msg>Add Jinja2 template loader for Django.<commit_after>
|
"""
A Jinja2 template loader for Django 1.2+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Add the following to your settings.py file. (The comma is important!)
TEMPLATE_LOADERS = ('jinja2_for_django.Loader',)
Now use your templates as usual (with render_to_response, generic views or
anything else that uses Django templates), and they will actually be
rendered by Jinja2.
See http://exyr.org/2010/Jinja-in-Django/
Author: Simon Sapin
License: BSD
"""
from django.template.loader import BaseLoader
from django.template.loaders.app_directories import app_template_dirs
from django.template import TemplateDoesNotExist
from django.core import urlresolvers
from django.conf import settings
import jinja2
class Template(jinja2.Template):
def render(self, context):
# flatten the Django Context into a single dictionary.
context_dict = {}
for d in context.dicts:
context_dict.update(d)
return super(Template, self).render(context_dict)
class Loader(BaseLoader):
is_usable = True
env = jinja2.Environment(loader=jinja2.FileSystemLoader(app_template_dirs))
env.template_class = Template
# These are available to all templates.
env.globals['url_for'] = urlresolvers.reverse
env.globals['MEDIA_URL'] = settings.MEDIA_URL
#env.globals['STATIC_URL'] = settings.STATIC_URL
def load_template(self, template_name, template_dirs=None):
try:
template = self.env.get_template(template_name)
except jinja2.TemplateNotFound:
raise TemplateDoesNotExist(template_name)
return template, template.filename
|
Add Jinja2 template loader for Django."""
A Jinja2 template loader for Django 1.2+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Add the following to your settings.py file. (The comma is important!)
TEMPLATE_LOADERS = ('jinja2_for_django.Loader',)
Now use your templates as usual (with render_to_response, generic views or
anything else that uses Django templates), and they will actually be
rendered by Jinja2.
See http://exyr.org/2010/Jinja-in-Django/
Author: Simon Sapin
License: BSD
"""
from django.template.loader import BaseLoader
from django.template.loaders.app_directories import app_template_dirs
from django.template import TemplateDoesNotExist
from django.core import urlresolvers
from django.conf import settings
import jinja2
class Template(jinja2.Template):
def render(self, context):
# flatten the Django Context into a single dictionary.
context_dict = {}
for d in context.dicts:
context_dict.update(d)
return super(Template, self).render(context_dict)
class Loader(BaseLoader):
is_usable = True
env = jinja2.Environment(loader=jinja2.FileSystemLoader(app_template_dirs))
env.template_class = Template
# These are available to all templates.
env.globals['url_for'] = urlresolvers.reverse
env.globals['MEDIA_URL'] = settings.MEDIA_URL
#env.globals['STATIC_URL'] = settings.STATIC_URL
def load_template(self, template_name, template_dirs=None):
try:
template = self.env.get_template(template_name)
except jinja2.TemplateNotFound:
raise TemplateDoesNotExist(template_name)
return template, template.filename
|
<commit_before><commit_msg>Add Jinja2 template loader for Django.<commit_after>"""
A Jinja2 template loader for Django 1.2+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Add the following to your settings.py file. (The comma is important!)
TEMPLATE_LOADERS = ('jinja2_for_django.Loader',)
Now use your templates as usual (with render_to_response, generic views or
anything else that uses Django templates), and they will actually be
rendered by Jinja2.
See http://exyr.org/2010/Jinja-in-Django/
Author: Simon Sapin
License: BSD
"""
from django.template.loader import BaseLoader
from django.template.loaders.app_directories import app_template_dirs
from django.template import TemplateDoesNotExist
from django.core import urlresolvers
from django.conf import settings
import jinja2
class Template(jinja2.Template):
def render(self, context):
# flatten the Django Context into a single dictionary.
context_dict = {}
for d in context.dicts:
context_dict.update(d)
return super(Template, self).render(context_dict)
class Loader(BaseLoader):
is_usable = True
env = jinja2.Environment(loader=jinja2.FileSystemLoader(app_template_dirs))
env.template_class = Template
# These are available to all templates.
env.globals['url_for'] = urlresolvers.reverse
env.globals['MEDIA_URL'] = settings.MEDIA_URL
#env.globals['STATIC_URL'] = settings.STATIC_URL
def load_template(self, template_name, template_dirs=None):
try:
template = self.env.get_template(template_name)
except jinja2.TemplateNotFound:
raise TemplateDoesNotExist(template_name)
return template, template.filename
|
|
0851e5b9ae19da9f97aadfdcdbf6eac1d67abf5a
|
solvebio/test/test_beacon.py
|
solvebio/test/test_beacon.py
|
from __future__ import absolute_import
from solvebio.resource import Dataset
from .helper import SolveBioTestCase
class BeaconTests(SolveBioTestCase):
def test_beacon_request(self):
"""
Check that current Clinvar/Variants returns correct
fields for beacon
"""
dataset = Dataset.retrieve('ClinVar/Variants')
beacon = dataset.beacon(genome_build='GRCh37',
chromosome='6',
coordinate=50432798,
allele='G')
check_fields = ['query', 'exist', 'total']
for f in check_fields:
self.assertTrue(f in beacon)
"""
Check that Clinvar/Variants version 3.7.0-2015-12-06
returns true for specific case
"""
dataset = Dataset.retrieve('ClinVar/3.7.0-2015-12-06/Variants')
beacontwo = dataset.beacon(genome_build='GRCh37',
chromosome='13',
coordinate=113803460,
allele='T')
final_beacon = {'query': {'coordinate': '113803460',
'allele': 'T',
'genome_build': 'GRCh37',
'chromosome': '13'},
'total': 1,
'exist': True}
self.assertTrue(final_beacon == beacontwo)
|
Test cases for beacon requests
|
Test cases for beacon requests
|
Python
|
mit
|
solvebio/solvebio-python,solvebio/solvebio-python,solvebio/solvebio-python
|
Test cases for beacon requests
|
from __future__ import absolute_import
from solvebio.resource import Dataset
from .helper import SolveBioTestCase
class BeaconTests(SolveBioTestCase):
def test_beacon_request(self):
"""
Check that current Clinvar/Variants returns correct
fields for beacon
"""
dataset = Dataset.retrieve('ClinVar/Variants')
beacon = dataset.beacon(genome_build='GRCh37',
chromosome='6',
coordinate=50432798,
allele='G')
check_fields = ['query', 'exist', 'total']
for f in check_fields:
self.assertTrue(f in beacon)
"""
Check that Clinvar/Variants version 3.7.0-2015-12-06
returns true for specific case
"""
dataset = Dataset.retrieve('ClinVar/3.7.0-2015-12-06/Variants')
beacontwo = dataset.beacon(genome_build='GRCh37',
chromosome='13',
coordinate=113803460,
allele='T')
final_beacon = {'query': {'coordinate': '113803460',
'allele': 'T',
'genome_build': 'GRCh37',
'chromosome': '13'},
'total': 1,
'exist': True}
self.assertTrue(final_beacon == beacontwo)
|
<commit_before><commit_msg>Test cases for beacon requests<commit_after>
|
from __future__ import absolute_import
from solvebio.resource import Dataset
from .helper import SolveBioTestCase
class BeaconTests(SolveBioTestCase):
def test_beacon_request(self):
"""
Check that current Clinvar/Variants returns correct
fields for beacon
"""
dataset = Dataset.retrieve('ClinVar/Variants')
beacon = dataset.beacon(genome_build='GRCh37',
chromosome='6',
coordinate=50432798,
allele='G')
check_fields = ['query', 'exist', 'total']
for f in check_fields:
self.assertTrue(f in beacon)
"""
Check that Clinvar/Variants version 3.7.0-2015-12-06
returns true for specific case
"""
dataset = Dataset.retrieve('ClinVar/3.7.0-2015-12-06/Variants')
beacontwo = dataset.beacon(genome_build='GRCh37',
chromosome='13',
coordinate=113803460,
allele='T')
final_beacon = {'query': {'coordinate': '113803460',
'allele': 'T',
'genome_build': 'GRCh37',
'chromosome': '13'},
'total': 1,
'exist': True}
self.assertTrue(final_beacon == beacontwo)
|
Test cases for beacon requestsfrom __future__ import absolute_import
from solvebio.resource import Dataset
from .helper import SolveBioTestCase
class BeaconTests(SolveBioTestCase):
def test_beacon_request(self):
"""
Check that current Clinvar/Variants returns correct
fields for beacon
"""
dataset = Dataset.retrieve('ClinVar/Variants')
beacon = dataset.beacon(genome_build='GRCh37',
chromosome='6',
coordinate=50432798,
allele='G')
check_fields = ['query', 'exist', 'total']
for f in check_fields:
self.assertTrue(f in beacon)
"""
Check that Clinvar/Variants version 3.7.0-2015-12-06
returns true for specific case
"""
dataset = Dataset.retrieve('ClinVar/3.7.0-2015-12-06/Variants')
beacontwo = dataset.beacon(genome_build='GRCh37',
chromosome='13',
coordinate=113803460,
allele='T')
final_beacon = {'query': {'coordinate': '113803460',
'allele': 'T',
'genome_build': 'GRCh37',
'chromosome': '13'},
'total': 1,
'exist': True}
self.assertTrue(final_beacon == beacontwo)
|
<commit_before><commit_msg>Test cases for beacon requests<commit_after>from __future__ import absolute_import
from solvebio.resource import Dataset
from .helper import SolveBioTestCase
class BeaconTests(SolveBioTestCase):
def test_beacon_request(self):
"""
Check that current Clinvar/Variants returns correct
fields for beacon
"""
dataset = Dataset.retrieve('ClinVar/Variants')
beacon = dataset.beacon(genome_build='GRCh37',
chromosome='6',
coordinate=50432798,
allele='G')
check_fields = ['query', 'exist', 'total']
for f in check_fields:
self.assertTrue(f in beacon)
"""
Check that Clinvar/Variants version 3.7.0-2015-12-06
returns true for specific case
"""
dataset = Dataset.retrieve('ClinVar/3.7.0-2015-12-06/Variants')
beacontwo = dataset.beacon(genome_build='GRCh37',
chromosome='13',
coordinate=113803460,
allele='T')
final_beacon = {'query': {'coordinate': '113803460',
'allele': 'T',
'genome_build': 'GRCh37',
'chromosome': '13'},
'total': 1,
'exist': True}
self.assertTrue(final_beacon == beacontwo)
|
|
f22113f925052eba881599aae62b79fe4dd6d715
|
indra/databases/phosphosite_client.py
|
indra/databases/phosphosite_client.py
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from os.path import dirname, abspath, join
from collections import namedtuple, defaultdict
from indra.util import read_unicode_csv
PhosphoSite = namedtuple('PhosphoSite',
['GENE', 'PROTEIN', 'ACC_ID', 'HU_CHR_LOC', 'MOD_RSD',
'SITE_GRP_ID', 'ORGANISM', 'MW_kD', 'DOMAIN',
'SITE_7_AA', 'LT_LIT', 'MS_LIT', 'MS_CST', 'CST_CAT'])
_data_by_up = None
_data_by_site_grp = None
def _read_phospho_site_dataset():
global _data_by_up
global _data_by_site_grp
if _data_by_up is None or _data_by_site_grp is None:
phosphosite_data_file = join(dirname(abspath(__file__)),
'../resources/Phosphorylation_site_dataset.tsv')
reader = read_unicode_csv(phosphosite_data_file, delimiter='\t',
skiprows=4)
# Build up a dict by protein
data_by_up = {}
data_by_site_grp = defaultdict(list)
for row in reader:
site = PhosphoSite(*row)
data_by_up[site.PROTEIN] = site
data_by_site_grp[site.SITE_GRP_ID].append(site)
_data_by_up = data_by_up
_data_by_site_grp = data_by_site_grp
return (_data_by_up, _data_by_site_grp)
if __name__ == '__main__':
(data_by_up, data_by_site_grp) = _read_phospho_site_dataset()
|
Load phosphosite data from dataset .tsv file
|
Load phosphosite data from dataset .tsv file
|
Python
|
bsd-2-clause
|
sorgerlab/indra,bgyori/indra,pvtodorov/indra,johnbachman/belpy,pvtodorov/indra,bgyori/indra,pvtodorov/indra,johnbachman/indra,sorgerlab/indra,sorgerlab/belpy,pvtodorov/indra,johnbachman/indra,sorgerlab/belpy,johnbachman/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/belpy,bgyori/indra,johnbachman/belpy
|
Load phosphosite data from dataset .tsv file
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from os.path import dirname, abspath, join
from collections import namedtuple, defaultdict
from indra.util import read_unicode_csv
PhosphoSite = namedtuple('PhosphoSite',
['GENE', 'PROTEIN', 'ACC_ID', 'HU_CHR_LOC', 'MOD_RSD',
'SITE_GRP_ID', 'ORGANISM', 'MW_kD', 'DOMAIN',
'SITE_7_AA', 'LT_LIT', 'MS_LIT', 'MS_CST', 'CST_CAT'])
_data_by_up = None
_data_by_site_grp = None
def _read_phospho_site_dataset():
global _data_by_up
global _data_by_site_grp
if _data_by_up is None or _data_by_site_grp is None:
phosphosite_data_file = join(dirname(abspath(__file__)),
'../resources/Phosphorylation_site_dataset.tsv')
reader = read_unicode_csv(phosphosite_data_file, delimiter='\t',
skiprows=4)
# Build up a dict by protein
data_by_up = {}
data_by_site_grp = defaultdict(list)
for row in reader:
site = PhosphoSite(*row)
data_by_up[site.PROTEIN] = site
data_by_site_grp[site.SITE_GRP_ID].append(site)
_data_by_up = data_by_up
_data_by_site_grp = data_by_site_grp
return (_data_by_up, _data_by_site_grp)
if __name__ == '__main__':
(data_by_up, data_by_site_grp) = _read_phospho_site_dataset()
|
<commit_before><commit_msg>Load phosphosite data from dataset .tsv file<commit_after>
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from os.path import dirname, abspath, join
from collections import namedtuple, defaultdict
from indra.util import read_unicode_csv
PhosphoSite = namedtuple('PhosphoSite',
['GENE', 'PROTEIN', 'ACC_ID', 'HU_CHR_LOC', 'MOD_RSD',
'SITE_GRP_ID', 'ORGANISM', 'MW_kD', 'DOMAIN',
'SITE_7_AA', 'LT_LIT', 'MS_LIT', 'MS_CST', 'CST_CAT'])
_data_by_up = None
_data_by_site_grp = None
def _read_phospho_site_dataset():
global _data_by_up
global _data_by_site_grp
if _data_by_up is None or _data_by_site_grp is None:
phosphosite_data_file = join(dirname(abspath(__file__)),
'../resources/Phosphorylation_site_dataset.tsv')
reader = read_unicode_csv(phosphosite_data_file, delimiter='\t',
skiprows=4)
# Build up a dict by protein
data_by_up = {}
data_by_site_grp = defaultdict(list)
for row in reader:
site = PhosphoSite(*row)
data_by_up[site.PROTEIN] = site
data_by_site_grp[site.SITE_GRP_ID].append(site)
_data_by_up = data_by_up
_data_by_site_grp = data_by_site_grp
return (_data_by_up, _data_by_site_grp)
if __name__ == '__main__':
(data_by_up, data_by_site_grp) = _read_phospho_site_dataset()
|
Load phosphosite data from dataset .tsv filefrom __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from os.path import dirname, abspath, join
from collections import namedtuple, defaultdict
from indra.util import read_unicode_csv
PhosphoSite = namedtuple('PhosphoSite',
['GENE', 'PROTEIN', 'ACC_ID', 'HU_CHR_LOC', 'MOD_RSD',
'SITE_GRP_ID', 'ORGANISM', 'MW_kD', 'DOMAIN',
'SITE_7_AA', 'LT_LIT', 'MS_LIT', 'MS_CST', 'CST_CAT'])
_data_by_up = None
_data_by_site_grp = None
def _read_phospho_site_dataset():
global _data_by_up
global _data_by_site_grp
if _data_by_up is None or _data_by_site_grp is None:
phosphosite_data_file = join(dirname(abspath(__file__)),
'../resources/Phosphorylation_site_dataset.tsv')
reader = read_unicode_csv(phosphosite_data_file, delimiter='\t',
skiprows=4)
# Build up a dict by protein
data_by_up = {}
data_by_site_grp = defaultdict(list)
for row in reader:
site = PhosphoSite(*row)
data_by_up[site.PROTEIN] = site
data_by_site_grp[site.SITE_GRP_ID].append(site)
_data_by_up = data_by_up
_data_by_site_grp = data_by_site_grp
return (_data_by_up, _data_by_site_grp)
if __name__ == '__main__':
(data_by_up, data_by_site_grp) = _read_phospho_site_dataset()
|
<commit_before><commit_msg>Load phosphosite data from dataset .tsv file<commit_after>from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from os.path import dirname, abspath, join
from collections import namedtuple, defaultdict
from indra.util import read_unicode_csv
PhosphoSite = namedtuple('PhosphoSite',
['GENE', 'PROTEIN', 'ACC_ID', 'HU_CHR_LOC', 'MOD_RSD',
'SITE_GRP_ID', 'ORGANISM', 'MW_kD', 'DOMAIN',
'SITE_7_AA', 'LT_LIT', 'MS_LIT', 'MS_CST', 'CST_CAT'])
_data_by_up = None
_data_by_site_grp = None
def _read_phospho_site_dataset():
global _data_by_up
global _data_by_site_grp
if _data_by_up is None or _data_by_site_grp is None:
phosphosite_data_file = join(dirname(abspath(__file__)),
'../resources/Phosphorylation_site_dataset.tsv')
reader = read_unicode_csv(phosphosite_data_file, delimiter='\t',
skiprows=4)
# Build up a dict by protein
data_by_up = {}
data_by_site_grp = defaultdict(list)
for row in reader:
site = PhosphoSite(*row)
data_by_up[site.PROTEIN] = site
data_by_site_grp[site.SITE_GRP_ID].append(site)
_data_by_up = data_by_up
_data_by_site_grp = data_by_site_grp
return (_data_by_up, _data_by_site_grp)
if __name__ == '__main__':
(data_by_up, data_by_site_grp) = _read_phospho_site_dataset()
|
|
8676c850b7bf011d3844d7fceb146d70f21dd218
|
nameless/lambda_calculus_ast.py
|
nameless/lambda_calculus_ast.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
lexer.py
@author ejnp
"""
class Expression(object):
"""Abstract class for any lambda calculus expression."""
def children(self):
"""Returns a list of Expression objects."""
pass
class Variable(Expression):
"""Encapsulates a lambda calculus variable.
Attributes:
name (str): The variable's ID
"""
def __init__(self, name):
self.name = name
def children(self):
return []
class Application(Expression):
"""Encapsulates a lambda calculus function call.
Attributes:
left_expression (Expression): A function to be evaluated
right_expression (Expression): The argument that's applied
"""
def __init__(self, left_expression, right_expression):
self.left_expression = left_expression
self.right_expression = right_expression
def children(self):
return [self.left_expression, self.right_expression]
class Abstraction(Expression):
"""Encapsulates a function in lambda calculus.
Attributes:
parameter (Variable): The argument variable
body (Expression): The scope of the function
"""
def __init__(self, parameter, body):
self.parameter = parameter
self.body = body
def children(self):
return [self.parameter, self.body]
|
Implement an object-oriented lambda calculus AST
|
Implement an object-oriented lambda calculus AST
|
Python
|
mit
|
ElliotPenson/nameless
|
Implement an object-oriented lambda calculus AST
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
lexer.py
@author ejnp
"""
class Expression(object):
"""Abstract class for any lambda calculus expression."""
def children(self):
"""Returns a list of Expression objects."""
pass
class Variable(Expression):
"""Encapsulates a lambda calculus variable.
Attributes:
name (str): The variable's ID
"""
def __init__(self, name):
self.name = name
def children(self):
return []
class Application(Expression):
"""Encapsulates a lambda calculus function call.
Attributes:
left_expression (Expression): A function to be evaluated
right_expression (Expression): The argument that's applied
"""
def __init__(self, left_expression, right_expression):
self.left_expression = left_expression
self.right_expression = right_expression
def children(self):
return [self.left_expression, self.right_expression]
class Abstraction(Expression):
"""Encapsulates a function in lambda calculus.
Attributes:
parameter (Variable): The argument variable
body (Expression): The scope of the function
"""
def __init__(self, parameter, body):
self.parameter = parameter
self.body = body
def children(self):
return [self.parameter, self.body]
|
<commit_before><commit_msg>Implement an object-oriented lambda calculus AST<commit_after>
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
lexer.py
@author ejnp
"""
class Expression(object):
"""Abstract class for any lambda calculus expression."""
def children(self):
"""Returns a list of Expression objects."""
pass
class Variable(Expression):
"""Encapsulates a lambda calculus variable.
Attributes:
name (str): The variable's ID
"""
def __init__(self, name):
self.name = name
def children(self):
return []
class Application(Expression):
"""Encapsulates a lambda calculus function call.
Attributes:
left_expression (Expression): A function to be evaluated
right_expression (Expression): The argument that's applied
"""
def __init__(self, left_expression, right_expression):
self.left_expression = left_expression
self.right_expression = right_expression
def children(self):
return [self.left_expression, self.right_expression]
class Abstraction(Expression):
"""Encapsulates a function in lambda calculus.
Attributes:
parameter (Variable): The argument variable
body (Expression): The scope of the function
"""
def __init__(self, parameter, body):
self.parameter = parameter
self.body = body
def children(self):
return [self.parameter, self.body]
|
Implement an object-oriented lambda calculus AST#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
lexer.py
@author ejnp
"""
class Expression(object):
"""Abstract class for any lambda calculus expression."""
def children(self):
"""Returns a list of Expression objects."""
pass
class Variable(Expression):
"""Encapsulates a lambda calculus variable.
Attributes:
name (str): The variable's ID
"""
def __init__(self, name):
self.name = name
def children(self):
return []
class Application(Expression):
"""Encapsulates a lambda calculus function call.
Attributes:
left_expression (Expression): A function to be evaluated
right_expression (Expression): The argument that's applied
"""
def __init__(self, left_expression, right_expression):
self.left_expression = left_expression
self.right_expression = right_expression
def children(self):
return [self.left_expression, self.right_expression]
class Abstraction(Expression):
"""Encapsulates a function in lambda calculus.
Attributes:
parameter (Variable): The argument variable
body (Expression): The scope of the function
"""
def __init__(self, parameter, body):
self.parameter = parameter
self.body = body
def children(self):
return [self.parameter, self.body]
|
<commit_before><commit_msg>Implement an object-oriented lambda calculus AST<commit_after>#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
lexer.py
@author ejnp
"""
class Expression(object):
"""Abstract class for any lambda calculus expression."""
def children(self):
"""Returns a list of Expression objects."""
pass
class Variable(Expression):
"""Encapsulates a lambda calculus variable.
Attributes:
name (str): The variable's ID
"""
def __init__(self, name):
self.name = name
def children(self):
return []
class Application(Expression):
"""Encapsulates a lambda calculus function call.
Attributes:
left_expression (Expression): A function to be evaluated
right_expression (Expression): The argument that's applied
"""
def __init__(self, left_expression, right_expression):
self.left_expression = left_expression
self.right_expression = right_expression
def children(self):
return [self.left_expression, self.right_expression]
class Abstraction(Expression):
"""Encapsulates a function in lambda calculus.
Attributes:
parameter (Variable): The argument variable
body (Expression): The scope of the function
"""
def __init__(self, parameter, body):
self.parameter = parameter
self.body = body
def children(self):
return [self.parameter, self.body]
|
|
a8834c9ed115e227b9d995685776dc5eda2991ee
|
luminoth/models/fasterrcnn/rcnn_target_test.py
|
luminoth/models/fasterrcnn/rcnn_target_test.py
|
import tensorflow as tf
import numpy as np
from easydict import EasyDict
from luminoth.models.fasterrcnn.rcnn_target import RCNNTarget
class RCNNTargetTest(tf.test.TestCase):
def setUp(self):
super(RCNNTargetTest, self).setUp()
# We don't care about the class labels or the batch number in most of these tests.
self._num_classes = 5
self._placeholder_label = 3.
self._batch_number = 1
self._image_size = (800, 600)
self._config = EasyDict({
'allowed_border': 0,
'clobber_positives': False,
'foreground_threshold': 0.7,
'background_threshold_high': 0.5,
'background_threshold_low': 0.1,
'foreground_fraction': 0.5,
'minibatch_size': 2,
})
def testBasic(self):
"""Tests a basic case.
We have one ground truth box and three proposals. One should be background, one foreground,
and one should be an ignored background (i.e. less IoU than whatever value is set as
config.background_threshold_low).
"""
model = RCNNTarget(self._num_classes, self._config)
gt_boxes = tf.constant([(20, 20, 80, 100, self._placeholder_label)])
proposed_boxes = tf.constant([
(self._batch_number, 55, 75, 85, 105), # Background box
(self._batch_number, 25, 21, 85, 105), # Foreground box
(self._batch_number, 78, 98, 99, 135), # Ignored box
])
rcnn_target_net = model(proposed_boxes, gt_boxes)
proposals_label = []
bbox_targets = []
with self.test_session() as sess:
(proposals_label, bbox_targets) = sess.run(rcnn_target_net)
# We test that all values are 'close' (up to 1e-03 distance) to avoid failing due to a
# floating point rounding error.
# We sum 1 to the placeholder label because rcnn_target does the same due to the fact that
# it uses 0 to signal 'background'.
self.assertAllClose(proposals_label, np.array([0., self._placeholder_label + 1, -1.]),
atol=1e-03)
if __name__ == '__main__':
tf.test.main()
|
Add a basic test for RCNNTarget
|
Add a basic test for RCNNTarget
|
Python
|
bsd-3-clause
|
tryolabs/luminoth,tryolabs/luminoth,tryolabs/luminoth
|
Add a basic test for RCNNTarget
|
import tensorflow as tf
import numpy as np
from easydict import EasyDict
from luminoth.models.fasterrcnn.rcnn_target import RCNNTarget
class RCNNTargetTest(tf.test.TestCase):
def setUp(self):
super(RCNNTargetTest, self).setUp()
# We don't care about the class labels or the batch number in most of these tests.
self._num_classes = 5
self._placeholder_label = 3.
self._batch_number = 1
self._image_size = (800, 600)
self._config = EasyDict({
'allowed_border': 0,
'clobber_positives': False,
'foreground_threshold': 0.7,
'background_threshold_high': 0.5,
'background_threshold_low': 0.1,
'foreground_fraction': 0.5,
'minibatch_size': 2,
})
def testBasic(self):
"""Tests a basic case.
We have one ground truth box and three proposals. One should be background, one foreground,
and one should be an ignored background (i.e. less IoU than whatever value is set as
config.background_threshold_low).
"""
model = RCNNTarget(self._num_classes, self._config)
gt_boxes = tf.constant([(20, 20, 80, 100, self._placeholder_label)])
proposed_boxes = tf.constant([
(self._batch_number, 55, 75, 85, 105), # Background box
(self._batch_number, 25, 21, 85, 105), # Foreground box
(self._batch_number, 78, 98, 99, 135), # Ignored box
])
rcnn_target_net = model(proposed_boxes, gt_boxes)
proposals_label = []
bbox_targets = []
with self.test_session() as sess:
(proposals_label, bbox_targets) = sess.run(rcnn_target_net)
# We test that all values are 'close' (up to 1e-03 distance) to avoid failing due to a
# floating point rounding error.
# We sum 1 to the placeholder label because rcnn_target does the same due to the fact that
# it uses 0 to signal 'background'.
self.assertAllClose(proposals_label, np.array([0., self._placeholder_label + 1, -1.]),
atol=1e-03)
if __name__ == '__main__':
tf.test.main()
|
<commit_before><commit_msg>Add a basic test for RCNNTarget<commit_after>
|
import tensorflow as tf
import numpy as np
from easydict import EasyDict
from luminoth.models.fasterrcnn.rcnn_target import RCNNTarget
class RCNNTargetTest(tf.test.TestCase):
def setUp(self):
super(RCNNTargetTest, self).setUp()
# We don't care about the class labels or the batch number in most of these tests.
self._num_classes = 5
self._placeholder_label = 3.
self._batch_number = 1
self._image_size = (800, 600)
self._config = EasyDict({
'allowed_border': 0,
'clobber_positives': False,
'foreground_threshold': 0.7,
'background_threshold_high': 0.5,
'background_threshold_low': 0.1,
'foreground_fraction': 0.5,
'minibatch_size': 2,
})
def testBasic(self):
"""Tests a basic case.
We have one ground truth box and three proposals. One should be background, one foreground,
and one should be an ignored background (i.e. less IoU than whatever value is set as
config.background_threshold_low).
"""
model = RCNNTarget(self._num_classes, self._config)
gt_boxes = tf.constant([(20, 20, 80, 100, self._placeholder_label)])
proposed_boxes = tf.constant([
(self._batch_number, 55, 75, 85, 105), # Background box
(self._batch_number, 25, 21, 85, 105), # Foreground box
(self._batch_number, 78, 98, 99, 135), # Ignored box
])
rcnn_target_net = model(proposed_boxes, gt_boxes)
proposals_label = []
bbox_targets = []
with self.test_session() as sess:
(proposals_label, bbox_targets) = sess.run(rcnn_target_net)
# We test that all values are 'close' (up to 1e-03 distance) to avoid failing due to a
# floating point rounding error.
# We sum 1 to the placeholder label because rcnn_target does the same due to the fact that
# it uses 0 to signal 'background'.
self.assertAllClose(proposals_label, np.array([0., self._placeholder_label + 1, -1.]),
atol=1e-03)
if __name__ == '__main__':
tf.test.main()
|
Add a basic test for RCNNTargetimport tensorflow as tf
import numpy as np
from easydict import EasyDict
from luminoth.models.fasterrcnn.rcnn_target import RCNNTarget
class RCNNTargetTest(tf.test.TestCase):
def setUp(self):
super(RCNNTargetTest, self).setUp()
# We don't care about the class labels or the batch number in most of these tests.
self._num_classes = 5
self._placeholder_label = 3.
self._batch_number = 1
self._image_size = (800, 600)
self._config = EasyDict({
'allowed_border': 0,
'clobber_positives': False,
'foreground_threshold': 0.7,
'background_threshold_high': 0.5,
'background_threshold_low': 0.1,
'foreground_fraction': 0.5,
'minibatch_size': 2,
})
def testBasic(self):
"""Tests a basic case.
We have one ground truth box and three proposals. One should be background, one foreground,
and one should be an ignored background (i.e. less IoU than whatever value is set as
config.background_threshold_low).
"""
model = RCNNTarget(self._num_classes, self._config)
gt_boxes = tf.constant([(20, 20, 80, 100, self._placeholder_label)])
proposed_boxes = tf.constant([
(self._batch_number, 55, 75, 85, 105), # Background box
(self._batch_number, 25, 21, 85, 105), # Foreground box
(self._batch_number, 78, 98, 99, 135), # Ignored box
])
rcnn_target_net = model(proposed_boxes, gt_boxes)
proposals_label = []
bbox_targets = []
with self.test_session() as sess:
(proposals_label, bbox_targets) = sess.run(rcnn_target_net)
# We test that all values are 'close' (up to 1e-03 distance) to avoid failing due to a
# floating point rounding error.
# We sum 1 to the placeholder label because rcnn_target does the same due to the fact that
# it uses 0 to signal 'background'.
self.assertAllClose(proposals_label, np.array([0., self._placeholder_label + 1, -1.]),
atol=1e-03)
if __name__ == '__main__':
tf.test.main()
|
<commit_before><commit_msg>Add a basic test for RCNNTarget<commit_after>import tensorflow as tf
import numpy as np
from easydict import EasyDict
from luminoth.models.fasterrcnn.rcnn_target import RCNNTarget
class RCNNTargetTest(tf.test.TestCase):
def setUp(self):
super(RCNNTargetTest, self).setUp()
# We don't care about the class labels or the batch number in most of these tests.
self._num_classes = 5
self._placeholder_label = 3.
self._batch_number = 1
self._image_size = (800, 600)
self._config = EasyDict({
'allowed_border': 0,
'clobber_positives': False,
'foreground_threshold': 0.7,
'background_threshold_high': 0.5,
'background_threshold_low': 0.1,
'foreground_fraction': 0.5,
'minibatch_size': 2,
})
def testBasic(self):
"""Tests a basic case.
We have one ground truth box and three proposals. One should be background, one foreground,
and one should be an ignored background (i.e. less IoU than whatever value is set as
config.background_threshold_low).
"""
model = RCNNTarget(self._num_classes, self._config)
gt_boxes = tf.constant([(20, 20, 80, 100, self._placeholder_label)])
proposed_boxes = tf.constant([
(self._batch_number, 55, 75, 85, 105), # Background box
(self._batch_number, 25, 21, 85, 105), # Foreground box
(self._batch_number, 78, 98, 99, 135), # Ignored box
])
rcnn_target_net = model(proposed_boxes, gt_boxes)
proposals_label = []
bbox_targets = []
with self.test_session() as sess:
(proposals_label, bbox_targets) = sess.run(rcnn_target_net)
# We test that all values are 'close' (up to 1e-03 distance) to avoid failing due to a
# floating point rounding error.
# We sum 1 to the placeholder label because rcnn_target does the same due to the fact that
# it uses 0 to signal 'background'.
self.assertAllClose(proposals_label, np.array([0., self._placeholder_label + 1, -1.]),
atol=1e-03)
if __name__ == '__main__':
tf.test.main()
|
|
fd114dbd5036735a9c3bcbd49fd8d31c2e750a8a
|
nailgun/nailgun/test/unit/test_requirements.py
|
nailgun/nailgun/test/unit/test_requirements.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pkg_resources import require
def test_check_requirements_conflicts():
require('nailgun')
|
Check if nailgun's requirements do not conflict
|
Check if nailgun's requirements do not conflict
Added simple test that will try to parse nailgun's
requirements and see if there are any problems with them.
Change-Id: I342eda0a3b019780e0d452455734591aab91f6e9
Closes-Bug: #1462281
|
Python
|
apache-2.0
|
huntxu/fuel-web,stackforge/fuel-web,eayunstack/fuel-web,prmtl/fuel-web,stackforge/fuel-web,prmtl/fuel-web,SmartInfrastructures/fuel-web-dev,prmtl/fuel-web,nebril/fuel-web,huntxu/fuel-web,eayunstack/fuel-web,eayunstack/fuel-web,SmartInfrastructures/fuel-web-dev,prmtl/fuel-web,eayunstack/fuel-web,SmartInfrastructures/fuel-web-dev,huntxu/fuel-web,nebril/fuel-web,nebril/fuel-web,eayunstack/fuel-web,huntxu/fuel-web,stackforge/fuel-web,nebril/fuel-web,nebril/fuel-web,SmartInfrastructures/fuel-web-dev,huntxu/fuel-web,prmtl/fuel-web,SmartInfrastructures/fuel-web-dev
|
Check if nailgun's requirements do not conflict
Added simple test that will try to parse nailgun's
requirements and see if there are any problems with them.
Change-Id: I342eda0a3b019780e0d452455734591aab91f6e9
Closes-Bug: #1462281
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pkg_resources import require
def test_check_requirements_conflicts():
require('nailgun')
|
<commit_before><commit_msg>Check if nailgun's requirements do not conflict
Added simple test that will try to parse nailgun's
requirements and see if there are any problems with them.
Change-Id: I342eda0a3b019780e0d452455734591aab91f6e9
Closes-Bug: #1462281<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pkg_resources import require
def test_check_requirements_conflicts():
require('nailgun')
|
Check if nailgun's requirements do not conflict
Added simple test that will try to parse nailgun's
requirements and see if there are any problems with them.
Change-Id: I342eda0a3b019780e0d452455734591aab91f6e9
Closes-Bug: #1462281# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pkg_resources import require
def test_check_requirements_conflicts():
require('nailgun')
|
<commit_before><commit_msg>Check if nailgun's requirements do not conflict
Added simple test that will try to parse nailgun's
requirements and see if there are any problems with them.
Change-Id: I342eda0a3b019780e0d452455734591aab91f6e9
Closes-Bug: #1462281<commit_after># -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pkg_resources import require
def test_check_requirements_conflicts():
require('nailgun')
|
|
cdd4357dbacdb16774916f746f129e534bc09866
|
utils/AccessDump.py
|
utils/AccessDump.py
|
#!/usr/bin/env python
#
# AccessDump.py
# A simple script to dump the contents of a Microsoft Access Database.
# It depends upon the mdbtools suite:
# http://sourceforge.net/projects/mdbtools/
import string, sys, subprocess # the subprocess module is new in python v 2.4
DATABASE = sys.argv[1]
# Get the list of table names with "mdb-tables"
table_names = subprocess.Popen(["mdb-tables", "-1", DATABASE],
stdout=subprocess.PIPE).communicate()[0]
tables = table_names.split('\n')
# Dump each table as a CSV file using "mdb-export",
# converting " " in table names to "_" for the CSV filenames.
for table in tables:
if table != '':
filename = table.replace(" ","_") + ".csv"
file = open(filename, 'w')
print("Dumping " + table)
contents = subprocess.Popen(["mdb-export", "-D", "%F", DATABASE, table],
stdout=subprocess.PIPE).communicate()[0]
file.write(string.replace(contents, '\r', ''))
file.close()
|
Add util script for dumping Access tables to .csv
|
Add util script for dumping Access tables to .csv
|
Python
|
bsd-3-clause
|
uq-eresearch/uqam,uq-eresearch/uqam,uq-eresearch/uqam,uq-eresearch/uqam
|
Add util script for dumping Access tables to .csv
|
#!/usr/bin/env python
#
# AccessDump.py
# A simple script to dump the contents of a Microsoft Access Database.
# It depends upon the mdbtools suite:
# http://sourceforge.net/projects/mdbtools/
import string, sys, subprocess # the subprocess module is new in python v 2.4
DATABASE = sys.argv[1]
# Get the list of table names with "mdb-tables"
table_names = subprocess.Popen(["mdb-tables", "-1", DATABASE],
stdout=subprocess.PIPE).communicate()[0]
tables = table_names.split('\n')
# Dump each table as a CSV file using "mdb-export",
# converting " " in table names to "_" for the CSV filenames.
for table in tables:
if table != '':
filename = table.replace(" ","_") + ".csv"
file = open(filename, 'w')
print("Dumping " + table)
contents = subprocess.Popen(["mdb-export", "-D", "%F", DATABASE, table],
stdout=subprocess.PIPE).communicate()[0]
file.write(string.replace(contents, '\r', ''))
file.close()
|
<commit_before><commit_msg>Add util script for dumping Access tables to .csv<commit_after>
|
#!/usr/bin/env python
#
# AccessDump.py
# A simple script to dump the contents of a Microsoft Access Database.
# It depends upon the mdbtools suite:
# http://sourceforge.net/projects/mdbtools/
import string, sys, subprocess # the subprocess module is new in python v 2.4
DATABASE = sys.argv[1]
# Get the list of table names with "mdb-tables"
table_names = subprocess.Popen(["mdb-tables", "-1", DATABASE],
stdout=subprocess.PIPE).communicate()[0]
tables = table_names.split('\n')
# Dump each table as a CSV file using "mdb-export",
# converting " " in table names to "_" for the CSV filenames.
for table in tables:
if table != '':
filename = table.replace(" ","_") + ".csv"
file = open(filename, 'w')
print("Dumping " + table)
contents = subprocess.Popen(["mdb-export", "-D", "%F", DATABASE, table],
stdout=subprocess.PIPE).communicate()[0]
file.write(string.replace(contents, '\r', ''))
file.close()
|
Add util script for dumping Access tables to .csv#!/usr/bin/env python
#
# AccessDump.py
# A simple script to dump the contents of a Microsoft Access Database.
# It depends upon the mdbtools suite:
# http://sourceforge.net/projects/mdbtools/
import string, sys, subprocess # the subprocess module is new in python v 2.4
DATABASE = sys.argv[1]
# Get the list of table names with "mdb-tables"
table_names = subprocess.Popen(["mdb-tables", "-1", DATABASE],
stdout=subprocess.PIPE).communicate()[0]
tables = table_names.split('\n')
# Dump each table as a CSV file using "mdb-export",
# converting " " in table names to "_" for the CSV filenames.
for table in tables:
if table != '':
filename = table.replace(" ","_") + ".csv"
file = open(filename, 'w')
print("Dumping " + table)
contents = subprocess.Popen(["mdb-export", "-D", "%F", DATABASE, table],
stdout=subprocess.PIPE).communicate()[0]
file.write(string.replace(contents, '\r', ''))
file.close()
|
<commit_before><commit_msg>Add util script for dumping Access tables to .csv<commit_after>#!/usr/bin/env python
#
# AccessDump.py
# A simple script to dump the contents of a Microsoft Access Database.
# It depends upon the mdbtools suite:
# http://sourceforge.net/projects/mdbtools/
import string, sys, subprocess # the subprocess module is new in python v 2.4
DATABASE = sys.argv[1]
# Get the list of table names with "mdb-tables"
table_names = subprocess.Popen(["mdb-tables", "-1", DATABASE],
stdout=subprocess.PIPE).communicate()[0]
tables = table_names.split('\n')
# Dump each table as a CSV file using "mdb-export",
# converting " " in table names to "_" for the CSV filenames.
for table in tables:
if table != '':
filename = table.replace(" ","_") + ".csv"
file = open(filename, 'w')
print("Dumping " + table)
contents = subprocess.Popen(["mdb-export", "-D", "%F", DATABASE, table],
stdout=subprocess.PIPE).communicate()[0]
file.write(string.replace(contents, '\r', ''))
file.close()
|
|
60e7beefe0c20e2da232d83efac853ec6a9f40d9
|
l10n_it_pec/partner.py
|
l10n_it_pec/partner.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class product_product(osv.Model):
_inherit = "res.partner"
_columns = {
'pec_mail': fields.char(
'PEC Mail'
),
}
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class ResPartner(orm.Model):
_inherit = "res.partner"
_columns = {
'pec_mail': fields.char(
'PEC Mail'
),
}
|
Fix CamelCase on PEC module
|
[Fix] Fix CamelCase on PEC module
|
Python
|
agpl-3.0
|
luca-vercelli/l10n-italy,alessandrocamilli/l10n-italy,scigghia/l10n-italy,OpenCode/l10n-italy,odoo-isa/l10n-italy,hurrinico/l10n-italy,abstract-open-solutions/l10n-italy,maxhome1/l10n-italy,linkitspa/l10n-italy,linkitspa/l10n-italy,ApuliaSoftware/l10n-italy,andrea4ever/l10n-italy,linkitspa/l10n-italy,yvaucher/l10n-italy
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class product_product(osv.Model):
_inherit = "res.partner"
_columns = {
'pec_mail': fields.char(
'PEC Mail'
),
}
[Fix] Fix CamelCase on PEC module
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class ResPartner(orm.Model):
_inherit = "res.partner"
_columns = {
'pec_mail': fields.char(
'PEC Mail'
),
}
|
<commit_before># -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class product_product(osv.Model):
_inherit = "res.partner"
_columns = {
'pec_mail': fields.char(
'PEC Mail'
),
}
<commit_msg>[Fix] Fix CamelCase on PEC module<commit_after>
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class ResPartner(orm.Model):
_inherit = "res.partner"
_columns = {
'pec_mail': fields.char(
'PEC Mail'
),
}
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class product_product(osv.Model):
_inherit = "res.partner"
_columns = {
'pec_mail': fields.char(
'PEC Mail'
),
}
[Fix] Fix CamelCase on PEC module# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class ResPartner(orm.Model):
_inherit = "res.partner"
_columns = {
'pec_mail': fields.char(
'PEC Mail'
),
}
|
<commit_before># -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class product_product(osv.Model):
_inherit = "res.partner"
_columns = {
'pec_mail': fields.char(
'PEC Mail'
),
}
<commit_msg>[Fix] Fix CamelCase on PEC module<commit_after># -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class ResPartner(orm.Model):
_inherit = "res.partner"
_columns = {
'pec_mail': fields.char(
'PEC Mail'
),
}
|
50182bf8bcfe5884820a61639924fbfa96aecccb
|
src/ggrc/migrations/versions/20151112145524_35e5344803b4_add_missing_constraints.py
|
src/ggrc/migrations/versions/20151112145524_35e5344803b4_add_missing_constraints.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from alembic import op
from ggrc.migrations.utils import resolve_duplicates
from ggrc.models import Vendor
"""Add missing constraints
Revision ID: 35e5344803b4
Revises: 27684e5f313a
Create Date: 2015-11-12 14:55:24.420680
"""
# revision identifiers, used by Alembic.
revision = '35e5344803b4'
down_revision = '27684e5f313a'
def upgrade():
resolve_duplicates(Vendor, "slug")
op.create_unique_constraint('uq_slug_vendors', 'vendors', ['slug'])
def downgrade():
op.drop_constraint('uq_slug_vendors', 'vendors', 'unique')
|
Add unique constraint to vendor slug
|
Add unique constraint to vendor slug
|
Python
|
apache-2.0
|
kr41/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,josthkko/ggrc-core
|
Add unique constraint to vendor slug
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from alembic import op
from ggrc.migrations.utils import resolve_duplicates
from ggrc.models import Vendor
"""Add missing constraints
Revision ID: 35e5344803b4
Revises: 27684e5f313a
Create Date: 2015-11-12 14:55:24.420680
"""
# revision identifiers, used by Alembic.
revision = '35e5344803b4'
down_revision = '27684e5f313a'
def upgrade():
resolve_duplicates(Vendor, "slug")
op.create_unique_constraint('uq_slug_vendors', 'vendors', ['slug'])
def downgrade():
op.drop_constraint('uq_slug_vendors', 'vendors', 'unique')
|
<commit_before><commit_msg>Add unique constraint to vendor slug<commit_after>
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from alembic import op
from ggrc.migrations.utils import resolve_duplicates
from ggrc.models import Vendor
"""Add missing constraints
Revision ID: 35e5344803b4
Revises: 27684e5f313a
Create Date: 2015-11-12 14:55:24.420680
"""
# revision identifiers, used by Alembic.
revision = '35e5344803b4'
down_revision = '27684e5f313a'
def upgrade():
resolve_duplicates(Vendor, "slug")
op.create_unique_constraint('uq_slug_vendors', 'vendors', ['slug'])
def downgrade():
op.drop_constraint('uq_slug_vendors', 'vendors', 'unique')
|
Add unique constraint to vendor slug# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from alembic import op
from ggrc.migrations.utils import resolve_duplicates
from ggrc.models import Vendor
"""Add missing constraints
Revision ID: 35e5344803b4
Revises: 27684e5f313a
Create Date: 2015-11-12 14:55:24.420680
"""
# revision identifiers, used by Alembic.
revision = '35e5344803b4'
down_revision = '27684e5f313a'
def upgrade():
resolve_duplicates(Vendor, "slug")
op.create_unique_constraint('uq_slug_vendors', 'vendors', ['slug'])
def downgrade():
op.drop_constraint('uq_slug_vendors', 'vendors', 'unique')
|
<commit_before><commit_msg>Add unique constraint to vendor slug<commit_after># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from alembic import op
from ggrc.migrations.utils import resolve_duplicates
from ggrc.models import Vendor
"""Add missing constraints
Revision ID: 35e5344803b4
Revises: 27684e5f313a
Create Date: 2015-11-12 14:55:24.420680
"""
# revision identifiers, used by Alembic.
revision = '35e5344803b4'
down_revision = '27684e5f313a'
def upgrade():
resolve_duplicates(Vendor, "slug")
op.create_unique_constraint('uq_slug_vendors', 'vendors', ['slug'])
def downgrade():
op.drop_constraint('uq_slug_vendors', 'vendors', 'unique')
|
|
584f13955cda8c1b92c1246417881a7a4458155c
|
account_product_fiscal_classification_test/tests/test_multicompany.py
|
account_product_fiscal_classification_test/tests/test_multicompany.py
|
# Copyright (C) 2020 David BEAL @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import TransactionCase
class MulticompanyTests(TransactionCase):
def setUp(self):
super().setUp()
def test_classif_in_multicompany(self):
prd = self.env.ref(
"account_product_fiscal_classification.product_template_all_cpnies"
)
self.env.user.write({"company_id": self.env.ref("base.main_company").id})
classif1 = self.env.ref(
"account_product_fiscal_classification.global_fiscal_classification_1"
)
classif2 = self.env.ref(
"account_product_fiscal_classification.global_fiscal_classification_2"
)
def check_sale_purchase_tax(classif, write_classif=True):
if write_classif:
prd.write({"fiscal_classification_id": classif.id})
self.assertEqual(prd.sudo().taxes_id, classif.sudo().sale_tax_ids)
self.assertEqual(
prd.sudo().supplier_taxes_id, classif.sudo().purchase_tax_ids
)
# test write with 2 classifications on the same company
check_sale_purchase_tax(classif1)
check_sale_purchase_tax(classif2)
# test the same data but from another company point of view
self.env.user.write(
{
"company_id": self.env.ref(
"account_product_fiscal_classification."
"cpny_onlyshare_classification"
).id
}
)
# check with same data
check_sale_purchase_tax(classif2, write_classif=False)
check_sale_purchase_tax(classif1)
|
FIX prd_classif: set taxes on all companies sharing classif
|
FIX prd_classif: set taxes on all companies sharing classif
|
Python
|
agpl-3.0
|
OCA/account-fiscal-rule,OCA/account-fiscal-rule
|
FIX prd_classif: set taxes on all companies sharing classif
|
# Copyright (C) 2020 David BEAL @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import TransactionCase
class MulticompanyTests(TransactionCase):
def setUp(self):
super().setUp()
def test_classif_in_multicompany(self):
prd = self.env.ref(
"account_product_fiscal_classification.product_template_all_cpnies"
)
self.env.user.write({"company_id": self.env.ref("base.main_company").id})
classif1 = self.env.ref(
"account_product_fiscal_classification.global_fiscal_classification_1"
)
classif2 = self.env.ref(
"account_product_fiscal_classification.global_fiscal_classification_2"
)
def check_sale_purchase_tax(classif, write_classif=True):
if write_classif:
prd.write({"fiscal_classification_id": classif.id})
self.assertEqual(prd.sudo().taxes_id, classif.sudo().sale_tax_ids)
self.assertEqual(
prd.sudo().supplier_taxes_id, classif.sudo().purchase_tax_ids
)
# test write with 2 classifications on the same company
check_sale_purchase_tax(classif1)
check_sale_purchase_tax(classif2)
# test the same data but from another company point of view
self.env.user.write(
{
"company_id": self.env.ref(
"account_product_fiscal_classification."
"cpny_onlyshare_classification"
).id
}
)
# check with same data
check_sale_purchase_tax(classif2, write_classif=False)
check_sale_purchase_tax(classif1)
|
<commit_before><commit_msg>FIX prd_classif: set taxes on all companies sharing classif<commit_after>
|
# Copyright (C) 2020 David BEAL @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import TransactionCase
class MulticompanyTests(TransactionCase):
def setUp(self):
super().setUp()
def test_classif_in_multicompany(self):
prd = self.env.ref(
"account_product_fiscal_classification.product_template_all_cpnies"
)
self.env.user.write({"company_id": self.env.ref("base.main_company").id})
classif1 = self.env.ref(
"account_product_fiscal_classification.global_fiscal_classification_1"
)
classif2 = self.env.ref(
"account_product_fiscal_classification.global_fiscal_classification_2"
)
def check_sale_purchase_tax(classif, write_classif=True):
if write_classif:
prd.write({"fiscal_classification_id": classif.id})
self.assertEqual(prd.sudo().taxes_id, classif.sudo().sale_tax_ids)
self.assertEqual(
prd.sudo().supplier_taxes_id, classif.sudo().purchase_tax_ids
)
# test write with 2 classifications on the same company
check_sale_purchase_tax(classif1)
check_sale_purchase_tax(classif2)
# test the same data but from another company point of view
self.env.user.write(
{
"company_id": self.env.ref(
"account_product_fiscal_classification."
"cpny_onlyshare_classification"
).id
}
)
# check with same data
check_sale_purchase_tax(classif2, write_classif=False)
check_sale_purchase_tax(classif1)
|
FIX prd_classif: set taxes on all companies sharing classif# Copyright (C) 2020 David BEAL @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import TransactionCase
class MulticompanyTests(TransactionCase):
def setUp(self):
super().setUp()
def test_classif_in_multicompany(self):
prd = self.env.ref(
"account_product_fiscal_classification.product_template_all_cpnies"
)
self.env.user.write({"company_id": self.env.ref("base.main_company").id})
classif1 = self.env.ref(
"account_product_fiscal_classification.global_fiscal_classification_1"
)
classif2 = self.env.ref(
"account_product_fiscal_classification.global_fiscal_classification_2"
)
def check_sale_purchase_tax(classif, write_classif=True):
if write_classif:
prd.write({"fiscal_classification_id": classif.id})
self.assertEqual(prd.sudo().taxes_id, classif.sudo().sale_tax_ids)
self.assertEqual(
prd.sudo().supplier_taxes_id, classif.sudo().purchase_tax_ids
)
# test write with 2 classifications on the same company
check_sale_purchase_tax(classif1)
check_sale_purchase_tax(classif2)
# test the same data but from another company point of view
self.env.user.write(
{
"company_id": self.env.ref(
"account_product_fiscal_classification."
"cpny_onlyshare_classification"
).id
}
)
# check with same data
check_sale_purchase_tax(classif2, write_classif=False)
check_sale_purchase_tax(classif1)
|
<commit_before><commit_msg>FIX prd_classif: set taxes on all companies sharing classif<commit_after># Copyright (C) 2020 David BEAL @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import TransactionCase
class MulticompanyTests(TransactionCase):
def setUp(self):
super().setUp()
def test_classif_in_multicompany(self):
prd = self.env.ref(
"account_product_fiscal_classification.product_template_all_cpnies"
)
self.env.user.write({"company_id": self.env.ref("base.main_company").id})
classif1 = self.env.ref(
"account_product_fiscal_classification.global_fiscal_classification_1"
)
classif2 = self.env.ref(
"account_product_fiscal_classification.global_fiscal_classification_2"
)
def check_sale_purchase_tax(classif, write_classif=True):
if write_classif:
prd.write({"fiscal_classification_id": classif.id})
self.assertEqual(prd.sudo().taxes_id, classif.sudo().sale_tax_ids)
self.assertEqual(
prd.sudo().supplier_taxes_id, classif.sudo().purchase_tax_ids
)
# test write with 2 classifications on the same company
check_sale_purchase_tax(classif1)
check_sale_purchase_tax(classif2)
# test the same data but from another company point of view
self.env.user.write(
{
"company_id": self.env.ref(
"account_product_fiscal_classification."
"cpny_onlyshare_classification"
).id
}
)
# check with same data
check_sale_purchase_tax(classif2, write_classif=False)
check_sale_purchase_tax(classif1)
|
|
3c948669c63be48c6762f4606c9bdae54328fdbc
|
education/migrations/0015_mark_implausible_responses.py
|
education/migrations/0015_mark_implausible_responses.py
|
# -*- coding: utf-8 -*-
from south.v2 import DataMigration
from education.models import Response
class Migration(DataMigration):
def forwards(self, orm):
implausibles = Response.objects.filter(has_errors=False,
message__direction='I',
eav_values__value_float__gt=5000) \
.update(has_errors=True)
def backwards(self, orm):
originals = Response.objects.filter(has_errors = True,
message__direction='I',
eav_values__value_float__gt = 5000) \
.update(has_errors=False)
|
Mark all implausibly large responses as having errors.
|
Mark all implausibly large responses as having errors.
|
Python
|
bsd-3-clause
|
unicefuganda/edtrac,unicefuganda/edtrac,unicefuganda/edtrac
|
Mark all implausibly large responses as having errors.
|
# -*- coding: utf-8 -*-
from south.v2 import DataMigration
from education.models import Response
class Migration(DataMigration):
def forwards(self, orm):
implausibles = Response.objects.filter(has_errors=False,
message__direction='I',
eav_values__value_float__gt=5000) \
.update(has_errors=True)
def backwards(self, orm):
originals = Response.objects.filter(has_errors = True,
message__direction='I',
eav_values__value_float__gt = 5000) \
.update(has_errors=False)
|
<commit_before><commit_msg>Mark all implausibly large responses as having errors.<commit_after>
|
# -*- coding: utf-8 -*-
from south.v2 import DataMigration
from education.models import Response
class Migration(DataMigration):
def forwards(self, orm):
implausibles = Response.objects.filter(has_errors=False,
message__direction='I',
eav_values__value_float__gt=5000) \
.update(has_errors=True)
def backwards(self, orm):
originals = Response.objects.filter(has_errors = True,
message__direction='I',
eav_values__value_float__gt = 5000) \
.update(has_errors=False)
|
Mark all implausibly large responses as having errors.# -*- coding: utf-8 -*-
from south.v2 import DataMigration
from education.models import Response
class Migration(DataMigration):
def forwards(self, orm):
implausibles = Response.objects.filter(has_errors=False,
message__direction='I',
eav_values__value_float__gt=5000) \
.update(has_errors=True)
def backwards(self, orm):
originals = Response.objects.filter(has_errors = True,
message__direction='I',
eav_values__value_float__gt = 5000) \
.update(has_errors=False)
|
<commit_before><commit_msg>Mark all implausibly large responses as having errors.<commit_after># -*- coding: utf-8 -*-
from south.v2 import DataMigration
from education.models import Response
class Migration(DataMigration):
def forwards(self, orm):
implausibles = Response.objects.filter(has_errors=False,
message__direction='I',
eav_values__value_float__gt=5000) \
.update(has_errors=True)
def backwards(self, orm):
originals = Response.objects.filter(has_errors = True,
message__direction='I',
eav_values__value_float__gt = 5000) \
.update(has_errors=False)
|
|
4ff83e0ee299dc521599ce38ef4eee27dcb15c3c
|
check_overcloud_controller_settings.py
|
check_overcloud_controller_settings.py
|
#!/usr/bin/env python
import os
import ConfigParser
MARIADB_MAX_CONNECTIONS_MIN = 4096
def find_mariadb_config_file():
potential_locations = [
'/etc/my.cnf.d/galera.cnf',
'/etc/my.cnf.d/server.cnf',
'/etc/my.cnf',
]
for filepath in potential_locations:
if os.access(filepath, os.R_OK):
return filepath
raise Exception(
"Can't find mariadb config at %s" %
potential_locations
)
def check_mariadb_config():
config_file = find_mariadb_config_file()
config = ConfigParser.SafeConfigParser()
config.read(config_file)
print "Checking settings in {}".format(config_file)
if not config.has_option('mysqld', 'max_connections'):
print "WARNING max_connections is unset, it should be at least {}" \
.format(MARIADB_MAX_CONNECTIONS_MIN)
elif config.getint('mysqld', 'max_connections') < MARIADB_MAX_CONNECTIONS_MIN:
print "WARNING max_connections is {}, it should be at least {}".format(
config.getint('mysqld', 'max_connections'),
MARIADB_MAX_CONNECTIONS_MIN)
check_mariadb_config()
|
Validate mariadb config on the overcloud controller
|
Validate mariadb config on the overcloud controller
The script is meant to grow to validate more settings on the overcloud
controller.
|
Python
|
apache-2.0
|
coolsvap/clapper,coolsvap/clapper,rthallisey/clapper,coolsvap/clapper,rthallisey/clapper
|
Validate mariadb config on the overcloud controller
The script is meant to grow to validate more settings on the overcloud
controller.
|
#!/usr/bin/env python
import os
import ConfigParser
MARIADB_MAX_CONNECTIONS_MIN = 4096
def find_mariadb_config_file():
potential_locations = [
'/etc/my.cnf.d/galera.cnf',
'/etc/my.cnf.d/server.cnf',
'/etc/my.cnf',
]
for filepath in potential_locations:
if os.access(filepath, os.R_OK):
return filepath
raise Exception(
"Can't find mariadb config at %s" %
potential_locations
)
def check_mariadb_config():
config_file = find_mariadb_config_file()
config = ConfigParser.SafeConfigParser()
config.read(config_file)
print "Checking settings in {}".format(config_file)
if not config.has_option('mysqld', 'max_connections'):
print "WARNING max_connections is unset, it should be at least {}" \
.format(MARIADB_MAX_CONNECTIONS_MIN)
elif config.getint('mysqld', 'max_connections') < MARIADB_MAX_CONNECTIONS_MIN:
print "WARNING max_connections is {}, it should be at least {}".format(
config.getint('mysqld', 'max_connections'),
MARIADB_MAX_CONNECTIONS_MIN)
check_mariadb_config()
|
<commit_before><commit_msg>Validate mariadb config on the overcloud controller
The script is meant to grow to validate more settings on the overcloud
controller.<commit_after>
|
#!/usr/bin/env python
import os
import ConfigParser
MARIADB_MAX_CONNECTIONS_MIN = 4096
def find_mariadb_config_file():
potential_locations = [
'/etc/my.cnf.d/galera.cnf',
'/etc/my.cnf.d/server.cnf',
'/etc/my.cnf',
]
for filepath in potential_locations:
if os.access(filepath, os.R_OK):
return filepath
raise Exception(
"Can't find mariadb config at %s" %
potential_locations
)
def check_mariadb_config():
config_file = find_mariadb_config_file()
config = ConfigParser.SafeConfigParser()
config.read(config_file)
print "Checking settings in {}".format(config_file)
if not config.has_option('mysqld', 'max_connections'):
print "WARNING max_connections is unset, it should be at least {}" \
.format(MARIADB_MAX_CONNECTIONS_MIN)
elif config.getint('mysqld', 'max_connections') < MARIADB_MAX_CONNECTIONS_MIN:
print "WARNING max_connections is {}, it should be at least {}".format(
config.getint('mysqld', 'max_connections'),
MARIADB_MAX_CONNECTIONS_MIN)
check_mariadb_config()
|
Validate mariadb config on the overcloud controller
The script is meant to grow to validate more settings on the overcloud
controller.#!/usr/bin/env python
import os
import ConfigParser
MARIADB_MAX_CONNECTIONS_MIN = 4096
def find_mariadb_config_file():
potential_locations = [
'/etc/my.cnf.d/galera.cnf',
'/etc/my.cnf.d/server.cnf',
'/etc/my.cnf',
]
for filepath in potential_locations:
if os.access(filepath, os.R_OK):
return filepath
raise Exception(
"Can't find mariadb config at %s" %
potential_locations
)
def check_mariadb_config():
config_file = find_mariadb_config_file()
config = ConfigParser.SafeConfigParser()
config.read(config_file)
print "Checking settings in {}".format(config_file)
if not config.has_option('mysqld', 'max_connections'):
print "WARNING max_connections is unset, it should be at least {}" \
.format(MARIADB_MAX_CONNECTIONS_MIN)
elif config.getint('mysqld', 'max_connections') < MARIADB_MAX_CONNECTIONS_MIN:
print "WARNING max_connections is {}, it should be at least {}".format(
config.getint('mysqld', 'max_connections'),
MARIADB_MAX_CONNECTIONS_MIN)
check_mariadb_config()
|
<commit_before><commit_msg>Validate mariadb config on the overcloud controller
The script is meant to grow to validate more settings on the overcloud
controller.<commit_after>#!/usr/bin/env python
import os
import ConfigParser
MARIADB_MAX_CONNECTIONS_MIN = 4096
def find_mariadb_config_file():
potential_locations = [
'/etc/my.cnf.d/galera.cnf',
'/etc/my.cnf.d/server.cnf',
'/etc/my.cnf',
]
for filepath in potential_locations:
if os.access(filepath, os.R_OK):
return filepath
raise Exception(
"Can't find mariadb config at %s" %
potential_locations
)
def check_mariadb_config():
config_file = find_mariadb_config_file()
config = ConfigParser.SafeConfigParser()
config.read(config_file)
print "Checking settings in {}".format(config_file)
if not config.has_option('mysqld', 'max_connections'):
print "WARNING max_connections is unset, it should be at least {}" \
.format(MARIADB_MAX_CONNECTIONS_MIN)
elif config.getint('mysqld', 'max_connections') < MARIADB_MAX_CONNECTIONS_MIN:
print "WARNING max_connections is {}, it should be at least {}".format(
config.getint('mysqld', 'max_connections'),
MARIADB_MAX_CONNECTIONS_MIN)
check_mariadb_config()
|
|
f44c98a837c9dbd48f32ba698c59ed6a6369c293
|
pygraphc/preprocess/CreateGraphModel.py
|
pygraphc/preprocess/CreateGraphModel.py
|
from pygraphc.preprocess.ParallelPreprocess import ParallelPreprocess
from pygraphc.similarity.JaroWinkler import JaroWinkler
import networkx as nx
from time import time
class CreateGraphModel(object):
def __init__(self, log_file):
self.log_file = log_file
self.unique_events = []
self.unique_events_length = 0
self.distances = []
self.graph = nx.MultiGraph()
def __get_nodes(self):
pp = ParallelPreprocess(self.log_file)
self.unique_events = pp.get_unique_events()
self.unique_events_length = pp.unique_events_length
self.event_attributes = pp.event_attributes
def __get_distances(self):
jw = JaroWinkler(self.event_attributes, self.unique_events_length)
self.distances = jw.get_jarowinkler()
def create_graph(self):
self.__get_nodes()
self.__get_distances()
self.graph.add_nodes_from(self.unique_events)
self.graph.add_weighted_edges_from(self.distances)
return self.graph
# open file
start = time()
logfile = '/home/hudan/Git/labeled-authlog/dataset/SecRepo/auth-perday/dec-1.log'
# preprocess
cgm = CreateGraphModel(logfile)
graph = cgm.create_graph()
nx.write_dot(graph, 'test.dot')
# print runtime
duration = time() - start
minute, second = divmod(duration, 60)
hour, minute = divmod(minute, 60)
print "Runtime: %d:%02d:%02d" % (hour, minute, second)
|
Add create graph class based on parallel preprocess
|
Add create graph class based on parallel preprocess
|
Python
|
mit
|
studiawan/pygraphc
|
Add create graph class based on parallel preprocess
|
from pygraphc.preprocess.ParallelPreprocess import ParallelPreprocess
from pygraphc.similarity.JaroWinkler import JaroWinkler
import networkx as nx
from time import time
class CreateGraphModel(object):
def __init__(self, log_file):
self.log_file = log_file
self.unique_events = []
self.unique_events_length = 0
self.distances = []
self.graph = nx.MultiGraph()
def __get_nodes(self):
pp = ParallelPreprocess(self.log_file)
self.unique_events = pp.get_unique_events()
self.unique_events_length = pp.unique_events_length
self.event_attributes = pp.event_attributes
def __get_distances(self):
jw = JaroWinkler(self.event_attributes, self.unique_events_length)
self.distances = jw.get_jarowinkler()
def create_graph(self):
self.__get_nodes()
self.__get_distances()
self.graph.add_nodes_from(self.unique_events)
self.graph.add_weighted_edges_from(self.distances)
return self.graph
# open file
start = time()
logfile = '/home/hudan/Git/labeled-authlog/dataset/SecRepo/auth-perday/dec-1.log'
# preprocess
cgm = CreateGraphModel(logfile)
graph = cgm.create_graph()
nx.write_dot(graph, 'test.dot')
# print runtime
duration = time() - start
minute, second = divmod(duration, 60)
hour, minute = divmod(minute, 60)
print "Runtime: %d:%02d:%02d" % (hour, minute, second)
|
<commit_before><commit_msg>Add create graph class based on parallel preprocess<commit_after>
|
from pygraphc.preprocess.ParallelPreprocess import ParallelPreprocess
from pygraphc.similarity.JaroWinkler import JaroWinkler
import networkx as nx
from time import time
class CreateGraphModel(object):
def __init__(self, log_file):
self.log_file = log_file
self.unique_events = []
self.unique_events_length = 0
self.distances = []
self.graph = nx.MultiGraph()
def __get_nodes(self):
pp = ParallelPreprocess(self.log_file)
self.unique_events = pp.get_unique_events()
self.unique_events_length = pp.unique_events_length
self.event_attributes = pp.event_attributes
def __get_distances(self):
jw = JaroWinkler(self.event_attributes, self.unique_events_length)
self.distances = jw.get_jarowinkler()
def create_graph(self):
self.__get_nodes()
self.__get_distances()
self.graph.add_nodes_from(self.unique_events)
self.graph.add_weighted_edges_from(self.distances)
return self.graph
# open file
start = time()
logfile = '/home/hudan/Git/labeled-authlog/dataset/SecRepo/auth-perday/dec-1.log'
# preprocess
cgm = CreateGraphModel(logfile)
graph = cgm.create_graph()
nx.write_dot(graph, 'test.dot')
# print runtime
duration = time() - start
minute, second = divmod(duration, 60)
hour, minute = divmod(minute, 60)
print "Runtime: %d:%02d:%02d" % (hour, minute, second)
|
Add create graph class based on parallel preprocessfrom pygraphc.preprocess.ParallelPreprocess import ParallelPreprocess
from pygraphc.similarity.JaroWinkler import JaroWinkler
import networkx as nx
from time import time
class CreateGraphModel(object):
def __init__(self, log_file):
self.log_file = log_file
self.unique_events = []
self.unique_events_length = 0
self.distances = []
self.graph = nx.MultiGraph()
def __get_nodes(self):
pp = ParallelPreprocess(self.log_file)
self.unique_events = pp.get_unique_events()
self.unique_events_length = pp.unique_events_length
self.event_attributes = pp.event_attributes
def __get_distances(self):
jw = JaroWinkler(self.event_attributes, self.unique_events_length)
self.distances = jw.get_jarowinkler()
def create_graph(self):
self.__get_nodes()
self.__get_distances()
self.graph.add_nodes_from(self.unique_events)
self.graph.add_weighted_edges_from(self.distances)
return self.graph
# open file
start = time()
logfile = '/home/hudan/Git/labeled-authlog/dataset/SecRepo/auth-perday/dec-1.log'
# preprocess
cgm = CreateGraphModel(logfile)
graph = cgm.create_graph()
nx.write_dot(graph, 'test.dot')
# print runtime
duration = time() - start
minute, second = divmod(duration, 60)
hour, minute = divmod(minute, 60)
print "Runtime: %d:%02d:%02d" % (hour, minute, second)
|
<commit_before><commit_msg>Add create graph class based on parallel preprocess<commit_after>from pygraphc.preprocess.ParallelPreprocess import ParallelPreprocess
from pygraphc.similarity.JaroWinkler import JaroWinkler
import networkx as nx
from time import time
class CreateGraphModel(object):
def __init__(self, log_file):
self.log_file = log_file
self.unique_events = []
self.unique_events_length = 0
self.distances = []
self.graph = nx.MultiGraph()
def __get_nodes(self):
pp = ParallelPreprocess(self.log_file)
self.unique_events = pp.get_unique_events()
self.unique_events_length = pp.unique_events_length
self.event_attributes = pp.event_attributes
def __get_distances(self):
jw = JaroWinkler(self.event_attributes, self.unique_events_length)
self.distances = jw.get_jarowinkler()
def create_graph(self):
self.__get_nodes()
self.__get_distances()
self.graph.add_nodes_from(self.unique_events)
self.graph.add_weighted_edges_from(self.distances)
return self.graph
# open file
start = time()
logfile = '/home/hudan/Git/labeled-authlog/dataset/SecRepo/auth-perday/dec-1.log'
# preprocess
cgm = CreateGraphModel(logfile)
graph = cgm.create_graph()
nx.write_dot(graph, 'test.dot')
# print runtime
duration = time() - start
minute, second = divmod(duration, 60)
hour, minute = divmod(minute, 60)
print "Runtime: %d:%02d:%02d" % (hour, minute, second)
|
|
8330636554c907054b171de55c9d5fe09a49d875
|
pyheufybot/module_interface.py
|
pyheufybot/module_interface.py
|
from pyheufybot.message import IRCMessage
from pyheufybot.serverinfo import ServerInfo
from pyheufybot.heufybot import HeufyBot
class Module(object):
def __init__(self):
self.trigger = ""
self.messageTypes = []
self.helpText = "No help available for this module"
def excecute(self, message=IRCMessage, serverInfo=ServerInfo):
pass
def onModuleLoaded(self):
pass
def onModuleUnloaded(self):
pass
class ModuleInterface(object):
def __init__(self, bot=HeufyBot):
self.bot = bot
self.modules = []
def loadModule(self, moduleName):
pass
def unloadModule(self, moduleName):
pass
def shouldExecute(self, module=Module, message=IRCMessage):
pass
def handleMessage(self, message=IRCMessage):
pass
|
Set up a proof of concept for the module API
|
Set up a proof of concept for the module API
|
Python
|
mit
|
Heufneutje/PyHeufyBot,Heufneutje/PyHeufyBot
|
Set up a proof of concept for the module API
|
from pyheufybot.message import IRCMessage
from pyheufybot.serverinfo import ServerInfo
from pyheufybot.heufybot import HeufyBot
class Module(object):
def __init__(self):
self.trigger = ""
self.messageTypes = []
self.helpText = "No help available for this module"
def excecute(self, message=IRCMessage, serverInfo=ServerInfo):
pass
def onModuleLoaded(self):
pass
def onModuleUnloaded(self):
pass
class ModuleInterface(object):
def __init__(self, bot=HeufyBot):
self.bot = bot
self.modules = []
def loadModule(self, moduleName):
pass
def unloadModule(self, moduleName):
pass
def shouldExecute(self, module=Module, message=IRCMessage):
pass
def handleMessage(self, message=IRCMessage):
pass
|
<commit_before><commit_msg>Set up a proof of concept for the module API<commit_after>
|
from pyheufybot.message import IRCMessage
from pyheufybot.serverinfo import ServerInfo
from pyheufybot.heufybot import HeufyBot
class Module(object):
def __init__(self):
self.trigger = ""
self.messageTypes = []
self.helpText = "No help available for this module"
def excecute(self, message=IRCMessage, serverInfo=ServerInfo):
pass
def onModuleLoaded(self):
pass
def onModuleUnloaded(self):
pass
class ModuleInterface(object):
def __init__(self, bot=HeufyBot):
self.bot = bot
self.modules = []
def loadModule(self, moduleName):
pass
def unloadModule(self, moduleName):
pass
def shouldExecute(self, module=Module, message=IRCMessage):
pass
def handleMessage(self, message=IRCMessage):
pass
|
Set up a proof of concept for the module APIfrom pyheufybot.message import IRCMessage
from pyheufybot.serverinfo import ServerInfo
from pyheufybot.heufybot import HeufyBot
class Module(object):
def __init__(self):
self.trigger = ""
self.messageTypes = []
self.helpText = "No help available for this module"
def excecute(self, message=IRCMessage, serverInfo=ServerInfo):
pass
def onModuleLoaded(self):
pass
def onModuleUnloaded(self):
pass
class ModuleInterface(object):
def __init__(self, bot=HeufyBot):
self.bot = bot
self.modules = []
def loadModule(self, moduleName):
pass
def unloadModule(self, moduleName):
pass
def shouldExecute(self, module=Module, message=IRCMessage):
pass
def handleMessage(self, message=IRCMessage):
pass
|
<commit_before><commit_msg>Set up a proof of concept for the module API<commit_after>from pyheufybot.message import IRCMessage
from pyheufybot.serverinfo import ServerInfo
from pyheufybot.heufybot import HeufyBot
class Module(object):
def __init__(self):
self.trigger = ""
self.messageTypes = []
self.helpText = "No help available for this module"
def excecute(self, message=IRCMessage, serverInfo=ServerInfo):
pass
def onModuleLoaded(self):
pass
def onModuleUnloaded(self):
pass
class ModuleInterface(object):
def __init__(self, bot=HeufyBot):
self.bot = bot
self.modules = []
def loadModule(self, moduleName):
pass
def unloadModule(self, moduleName):
pass
def shouldExecute(self, module=Module, message=IRCMessage):
pass
def handleMessage(self, message=IRCMessage):
pass
|
|
c5e370e2c226897c3a9863749f259a3735d2989a
|
apps/users/admin.py
|
apps/users/admin.py
|
from django.contrib import admin
from users.models import UserBan
class UserBanAdmin(admin.ModelAdmin):
fields = ('user', 'by', 'reason', 'is_active')
list_display = ('user', 'by', 'reason')
list_filter = ('is_active',)
raw_id_fields = ('user',)
search_fields = ('user__username', 'reason')
admin.site.register(UserBan, UserBanAdmin)
|
from django.contrib import admin
from users.models import UserBan
class UserBanAdmin(admin.ModelAdmin):
fields = ('user', 'by', 'reason', 'is_active')
list_display = ('user', 'by', 'reason')
list_filter = ('is_active', 'by')
raw_id_fields = ('user', 'by')
search_fields = ('user__username', 'reason')
admin.site.register(UserBan, UserBanAdmin)
|
Add some more use of raw_id_fields for user bans, and more filter options.
|
Add some more use of raw_id_fields for user bans, and more filter options.
|
Python
|
mpl-2.0
|
surajssd/kuma,davidyezsetz/kuma,anaran/kuma,FrankBian/kuma,surajssd/kuma,a2sheppy/kuma,whip112/Whip112,darkwing/kuma,robhudson/kuma,bluemini/kuma,FrankBian/kuma,FrankBian/kuma,jgmize/kuma,chirilo/kuma,surajssd/kuma,darkwing/kuma,chirilo/kuma,yfdyh000/kuma,hoosteeno/kuma,openjck/kuma,utkbansal/kuma,varunkamra/kuma,anaran/kuma,biswajitsahu/kuma,YOTOV-LIMITED/kuma,davidyezsetz/kuma,scrollback/kuma,biswajitsahu/kuma,Elchi3/kuma,RanadeepPolavarapu/kuma,RanadeepPolavarapu/kuma,SphinxKnight/kuma,ronakkhunt/kuma,tximikel/kuma,davehunt/kuma,escattone/kuma,carnell69/kuma,jgmize/kuma,ollie314/kuma,jezdez/kuma,safwanrahman/kuma,davehunt/kuma,biswajitsahu/kuma,openjck/kuma,cindyyu/kuma,chirilo/kuma,tximikel/kuma,openjck/kuma,ollie314/kuma,mastizada/kuma,varunkamra/kuma,carnell69/kuma,a2sheppy/kuma,darkwing/kuma,mozilla/kuma,groovecoder/kuma,SphinxKnight/kuma,YOTOV-LIMITED/kuma,YOTOV-LIMITED/kuma,davidyezsetz/kuma,varunkamra/kuma,nhenezi/kuma,yfdyh000/kuma,ronakkhunt/kuma,carnell69/kuma,groovecoder/kuma,robhudson/kuma,hoosteeno/kuma,nhenezi/kuma,escattone/kuma,jwhitlock/kuma,MenZil/kuma,tximikel/kuma,whip112/Whip112,darkwing/kuma,FrankBian/kuma,safwanrahman/kuma,RanadeepPolavarapu/kuma,chirilo/kuma,nhenezi/kuma,bluemini/kuma,davehunt/kuma,MenZil/kuma,anaran/kuma,mozilla/kuma,nhenezi/kuma,FrankBian/kuma,bluemini/kuma,YOTOV-LIMITED/kuma,mozilla/kuma,utkbansal/kuma,scrollback/kuma,cindyyu/kuma,davidyezsetz/kuma,anaran/kuma,MenZil/kuma,jwhitlock/kuma,mozilla/kuma,jezdez/kuma,varunkamra/kuma,varunkamra/kuma,mastizada/kuma,whip112/Whip112,SphinxKnight/kuma,groovecoder/kuma,mastizada/kuma,yfdyh000/kuma,robhudson/kuma,tximikel/kuma,surajssd/kuma,RanadeepPolavarapu/kuma,tximikel/kuma,Elchi3/kuma,groovecoder/kuma,cindyyu/kuma,ronakkhunt/kuma,jgmize/kuma,groovecoder/kuma,anaran/kuma,carnell69/kuma,SphinxKnight/kuma,tximikel/kuma,ollie314/kuma,chirilo/kuma,jezdez/kuma,whip112/Whip112,jezdez/kuma,MenZil/kuma,bluemini/kuma,surajssd/kuma,scrollback/kuma,RanadeepPolavarapu/kuma,openjck/kuma,jwhitlock/kuma,bluemini/kuma,MenZil/kuma,jgmize/kuma,hoosteeno/kuma,robhudson/kuma,biswajitsahu/kuma,davidyezsetz/kuma,ronakkhunt/kuma,utkbansal/kuma,ollie314/kuma,jgmize/kuma,safwanrahman/kuma,utkbansal/kuma,ollie314/kuma,jwhitlock/kuma,utkbansal/kuma,whip112/Whip112,safwanrahman/kuma,yfdyh000/kuma,scrollback/kuma,carnell69/kuma,bluemini/kuma,cindyyu/kuma,whip112/Whip112,SphinxKnight/kuma,YOTOV-LIMITED/kuma,hoosteeno/kuma,davehunt/kuma,Elchi3/kuma,escattone/kuma,cindyyu/kuma,chirilo/kuma,groovecoder/kuma,hoosteeno/kuma,darkwing/kuma,darkwing/kuma,Elchi3/kuma,biswajitsahu/kuma,anaran/kuma,RanadeepPolavarapu/kuma,safwanrahman/kuma,utkbansal/kuma,a2sheppy/kuma,ronakkhunt/kuma,ronakkhunt/kuma,yfdyh000/kuma,jwhitlock/kuma,openjck/kuma,ollie314/kuma,mozilla/kuma,a2sheppy/kuma,hoosteeno/kuma,davehunt/kuma,jezdez/kuma,jezdez/kuma,a2sheppy/kuma,robhudson/kuma,carnell69/kuma,nhenezi/kuma,SphinxKnight/kuma,robhudson/kuma,safwanrahman/kuma,Elchi3/kuma,davehunt/kuma,YOTOV-LIMITED/kuma,mastizada/kuma,jgmize/kuma,biswajitsahu/kuma,varunkamra/kuma,surajssd/kuma,scrollback/kuma,openjck/kuma,cindyyu/kuma,MenZil/kuma,yfdyh000/kuma
|
from django.contrib import admin
from users.models import UserBan
class UserBanAdmin(admin.ModelAdmin):
fields = ('user', 'by', 'reason', 'is_active')
list_display = ('user', 'by', 'reason')
list_filter = ('is_active',)
raw_id_fields = ('user',)
search_fields = ('user__username', 'reason')
admin.site.register(UserBan, UserBanAdmin)
Add some more use of raw_id_fields for user bans, and more filter options.
|
from django.contrib import admin
from users.models import UserBan
class UserBanAdmin(admin.ModelAdmin):
fields = ('user', 'by', 'reason', 'is_active')
list_display = ('user', 'by', 'reason')
list_filter = ('is_active', 'by')
raw_id_fields = ('user', 'by')
search_fields = ('user__username', 'reason')
admin.site.register(UserBan, UserBanAdmin)
|
<commit_before>from django.contrib import admin
from users.models import UserBan
class UserBanAdmin(admin.ModelAdmin):
fields = ('user', 'by', 'reason', 'is_active')
list_display = ('user', 'by', 'reason')
list_filter = ('is_active',)
raw_id_fields = ('user',)
search_fields = ('user__username', 'reason')
admin.site.register(UserBan, UserBanAdmin)
<commit_msg>Add some more use of raw_id_fields for user bans, and more filter options.<commit_after>
|
from django.contrib import admin
from users.models import UserBan
class UserBanAdmin(admin.ModelAdmin):
fields = ('user', 'by', 'reason', 'is_active')
list_display = ('user', 'by', 'reason')
list_filter = ('is_active', 'by')
raw_id_fields = ('user', 'by')
search_fields = ('user__username', 'reason')
admin.site.register(UserBan, UserBanAdmin)
|
from django.contrib import admin
from users.models import UserBan
class UserBanAdmin(admin.ModelAdmin):
fields = ('user', 'by', 'reason', 'is_active')
list_display = ('user', 'by', 'reason')
list_filter = ('is_active',)
raw_id_fields = ('user',)
search_fields = ('user__username', 'reason')
admin.site.register(UserBan, UserBanAdmin)
Add some more use of raw_id_fields for user bans, and more filter options.from django.contrib import admin
from users.models import UserBan
class UserBanAdmin(admin.ModelAdmin):
fields = ('user', 'by', 'reason', 'is_active')
list_display = ('user', 'by', 'reason')
list_filter = ('is_active', 'by')
raw_id_fields = ('user', 'by')
search_fields = ('user__username', 'reason')
admin.site.register(UserBan, UserBanAdmin)
|
<commit_before>from django.contrib import admin
from users.models import UserBan
class UserBanAdmin(admin.ModelAdmin):
fields = ('user', 'by', 'reason', 'is_active')
list_display = ('user', 'by', 'reason')
list_filter = ('is_active',)
raw_id_fields = ('user',)
search_fields = ('user__username', 'reason')
admin.site.register(UserBan, UserBanAdmin)
<commit_msg>Add some more use of raw_id_fields for user bans, and more filter options.<commit_after>from django.contrib import admin
from users.models import UserBan
class UserBanAdmin(admin.ModelAdmin):
fields = ('user', 'by', 'reason', 'is_active')
list_display = ('user', 'by', 'reason')
list_filter = ('is_active', 'by')
raw_id_fields = ('user', 'by')
search_fields = ('user__username', 'reason')
admin.site.register(UserBan, UserBanAdmin)
|
2c1ff4b06fb2e851231e27bccfea97a64e5659f5
|
senlin/tests/test_common_serializers.py
|
senlin/tests/test_common_serializers.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import webob
from senlin.common import serializers
from senlin.tests.common import base
class JSONResponseSerializerTest(base.SenlinTestCase):
def test_to_json(self):
fixture = {"key": "value"}
expected = '{"key": "value"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_date_format_value(self):
fixture = {"date": datetime.datetime(1, 3, 8, 2)}
expected = '{"date": "0001-03-08T02:00:00"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_more_deep_format(self):
fixture = {"is_public": True, "name": [{"name1": "test"}]}
expected = '{"is_public": true, "name": [{"name1": "test"}]}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_default(self):
fixture = {"key": "value"}
response = webob.Response()
serializers.JSONResponseSerializer().default(response, fixture)
self.assertEqual(200, response.status_int)
content_types = filter(lambda h: h[0] == 'Content-Type',
response.headerlist)
self.assertEqual(1, len(content_types))
self.assertEqual('application/json', response.content_type)
self.assertEqual('{"key": "value"}', response.body)
|
Test case for JSON serializer
|
Test case for JSON serializer
|
Python
|
apache-2.0
|
Alzon/senlin,stackforge/senlin,tengqm/senlin,tengqm/senlin-container,tengqm/senlin,openstack/senlin,Alzon/senlin,stackforge/senlin,tengqm/senlin-container,openstack/senlin,openstack/senlin
|
Test case for JSON serializer
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import webob
from senlin.common import serializers
from senlin.tests.common import base
class JSONResponseSerializerTest(base.SenlinTestCase):
def test_to_json(self):
fixture = {"key": "value"}
expected = '{"key": "value"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_date_format_value(self):
fixture = {"date": datetime.datetime(1, 3, 8, 2)}
expected = '{"date": "0001-03-08T02:00:00"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_more_deep_format(self):
fixture = {"is_public": True, "name": [{"name1": "test"}]}
expected = '{"is_public": true, "name": [{"name1": "test"}]}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_default(self):
fixture = {"key": "value"}
response = webob.Response()
serializers.JSONResponseSerializer().default(response, fixture)
self.assertEqual(200, response.status_int)
content_types = filter(lambda h: h[0] == 'Content-Type',
response.headerlist)
self.assertEqual(1, len(content_types))
self.assertEqual('application/json', response.content_type)
self.assertEqual('{"key": "value"}', response.body)
|
<commit_before><commit_msg>Test case for JSON serializer<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import webob
from senlin.common import serializers
from senlin.tests.common import base
class JSONResponseSerializerTest(base.SenlinTestCase):
def test_to_json(self):
fixture = {"key": "value"}
expected = '{"key": "value"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_date_format_value(self):
fixture = {"date": datetime.datetime(1, 3, 8, 2)}
expected = '{"date": "0001-03-08T02:00:00"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_more_deep_format(self):
fixture = {"is_public": True, "name": [{"name1": "test"}]}
expected = '{"is_public": true, "name": [{"name1": "test"}]}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_default(self):
fixture = {"key": "value"}
response = webob.Response()
serializers.JSONResponseSerializer().default(response, fixture)
self.assertEqual(200, response.status_int)
content_types = filter(lambda h: h[0] == 'Content-Type',
response.headerlist)
self.assertEqual(1, len(content_types))
self.assertEqual('application/json', response.content_type)
self.assertEqual('{"key": "value"}', response.body)
|
Test case for JSON serializer# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import webob
from senlin.common import serializers
from senlin.tests.common import base
class JSONResponseSerializerTest(base.SenlinTestCase):
def test_to_json(self):
fixture = {"key": "value"}
expected = '{"key": "value"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_date_format_value(self):
fixture = {"date": datetime.datetime(1, 3, 8, 2)}
expected = '{"date": "0001-03-08T02:00:00"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_more_deep_format(self):
fixture = {"is_public": True, "name": [{"name1": "test"}]}
expected = '{"is_public": true, "name": [{"name1": "test"}]}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_default(self):
fixture = {"key": "value"}
response = webob.Response()
serializers.JSONResponseSerializer().default(response, fixture)
self.assertEqual(200, response.status_int)
content_types = filter(lambda h: h[0] == 'Content-Type',
response.headerlist)
self.assertEqual(1, len(content_types))
self.assertEqual('application/json', response.content_type)
self.assertEqual('{"key": "value"}', response.body)
|
<commit_before><commit_msg>Test case for JSON serializer<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import webob
from senlin.common import serializers
from senlin.tests.common import base
class JSONResponseSerializerTest(base.SenlinTestCase):
def test_to_json(self):
fixture = {"key": "value"}
expected = '{"key": "value"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_date_format_value(self):
fixture = {"date": datetime.datetime(1, 3, 8, 2)}
expected = '{"date": "0001-03-08T02:00:00"}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_to_json_with_more_deep_format(self):
fixture = {"is_public": True, "name": [{"name1": "test"}]}
expected = '{"is_public": true, "name": [{"name1": "test"}]}'
actual = serializers.JSONResponseSerializer().to_json(fixture)
self.assertEqual(expected, actual)
def test_default(self):
fixture = {"key": "value"}
response = webob.Response()
serializers.JSONResponseSerializer().default(response, fixture)
self.assertEqual(200, response.status_int)
content_types = filter(lambda h: h[0] == 'Content-Type',
response.headerlist)
self.assertEqual(1, len(content_types))
self.assertEqual('application/json', response.content_type)
self.assertEqual('{"key": "value"}', response.body)
|
|
be8762886ae2157506f2960a42589ef3828f021e
|
numba/tests/test_itanium_mangler.py
|
numba/tests/test_itanium_mangler.py
|
from __future__ import print_function, absolute_import
from numba import unittest_support as unittest
from numba import itanium_mangler
from numba import int32, int64, uint32, uint64, float32, float64
from numba.types import range_iter32_type
class TestItaniumManager(unittest.TestCase):
def test_ident(self):
got = itanium_mangler.mangle_identifier("apple")
expect = "5apple"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_identifier("ap_ple")
expect = "6ap_ple"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_identifier("apple213")
expect = "8apple213"
self.assertEqual(expect, got)
def test_types(self):
got = itanium_mangler.mangle_type(int32)
expect = "i"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(int64)
expect = "x"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(uint32)
expect = "j"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(uint64)
expect = "y"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(float32)
expect = "f"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(float64)
expect = "d"
self.assertEqual(expect, got)
def test_function(self):
got = itanium_mangler.mangle("what", [int32, float32])
expect = "_Z4whatif"
self.assertEqual(expect, got)
got = itanium_mangler.mangle("a_little_brown_fox", [uint64,
uint32,
float64])
expect = "_Z18a_little_brown_foxyjd"
self.assertEqual(expect, got)
def test_custom_type(self):
got = itanium_mangler.mangle_type(range_iter32_type)
name = str(range_iter32_type)
expect = "u{n}{name}".format(n=len(name), name=name)
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
|
Add test for itanium mangler
|
Add test for itanium mangler
|
Python
|
bsd-2-clause
|
stuartarchibald/numba,cpcloud/numba,GaZ3ll3/numba,seibert/numba,stefanseefeld/numba,cpcloud/numba,stonebig/numba,stefanseefeld/numba,ssarangi/numba,GaZ3ll3/numba,stefanseefeld/numba,jriehl/numba,gmarkall/numba,gmarkall/numba,sklam/numba,cpcloud/numba,IntelLabs/numba,numba/numba,IntelLabs/numba,ssarangi/numba,ssarangi/numba,pombredanne/numba,pombredanne/numba,gdementen/numba,numba/numba,ssarangi/numba,pombredanne/numba,jriehl/numba,seibert/numba,seibert/numba,ssarangi/numba,numba/numba,pitrou/numba,stonebig/numba,stonebig/numba,cpcloud/numba,pitrou/numba,pombredanne/numba,gmarkall/numba,sklam/numba,gdementen/numba,GaZ3ll3/numba,seibert/numba,jriehl/numba,IntelLabs/numba,gmarkall/numba,sklam/numba,numba/numba,stefanseefeld/numba,stuartarchibald/numba,stuartarchibald/numba,pitrou/numba,stefanseefeld/numba,gmarkall/numba,cpcloud/numba,jriehl/numba,numba/numba,pombredanne/numba,gdementen/numba,gdementen/numba,seibert/numba,pitrou/numba,stuartarchibald/numba,GaZ3ll3/numba,IntelLabs/numba,IntelLabs/numba,stonebig/numba,stuartarchibald/numba,stonebig/numba,sklam/numba,gdementen/numba,jriehl/numba,sklam/numba,pitrou/numba,GaZ3ll3/numba
|
Add test for itanium mangler
|
from __future__ import print_function, absolute_import
from numba import unittest_support as unittest
from numba import itanium_mangler
from numba import int32, int64, uint32, uint64, float32, float64
from numba.types import range_iter32_type
class TestItaniumManager(unittest.TestCase):
def test_ident(self):
got = itanium_mangler.mangle_identifier("apple")
expect = "5apple"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_identifier("ap_ple")
expect = "6ap_ple"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_identifier("apple213")
expect = "8apple213"
self.assertEqual(expect, got)
def test_types(self):
got = itanium_mangler.mangle_type(int32)
expect = "i"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(int64)
expect = "x"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(uint32)
expect = "j"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(uint64)
expect = "y"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(float32)
expect = "f"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(float64)
expect = "d"
self.assertEqual(expect, got)
def test_function(self):
got = itanium_mangler.mangle("what", [int32, float32])
expect = "_Z4whatif"
self.assertEqual(expect, got)
got = itanium_mangler.mangle("a_little_brown_fox", [uint64,
uint32,
float64])
expect = "_Z18a_little_brown_foxyjd"
self.assertEqual(expect, got)
def test_custom_type(self):
got = itanium_mangler.mangle_type(range_iter32_type)
name = str(range_iter32_type)
expect = "u{n}{name}".format(n=len(name), name=name)
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for itanium mangler<commit_after>
|
from __future__ import print_function, absolute_import
from numba import unittest_support as unittest
from numba import itanium_mangler
from numba import int32, int64, uint32, uint64, float32, float64
from numba.types import range_iter32_type
class TestItaniumManager(unittest.TestCase):
def test_ident(self):
got = itanium_mangler.mangle_identifier("apple")
expect = "5apple"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_identifier("ap_ple")
expect = "6ap_ple"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_identifier("apple213")
expect = "8apple213"
self.assertEqual(expect, got)
def test_types(self):
got = itanium_mangler.mangle_type(int32)
expect = "i"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(int64)
expect = "x"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(uint32)
expect = "j"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(uint64)
expect = "y"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(float32)
expect = "f"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(float64)
expect = "d"
self.assertEqual(expect, got)
def test_function(self):
got = itanium_mangler.mangle("what", [int32, float32])
expect = "_Z4whatif"
self.assertEqual(expect, got)
got = itanium_mangler.mangle("a_little_brown_fox", [uint64,
uint32,
float64])
expect = "_Z18a_little_brown_foxyjd"
self.assertEqual(expect, got)
def test_custom_type(self):
got = itanium_mangler.mangle_type(range_iter32_type)
name = str(range_iter32_type)
expect = "u{n}{name}".format(n=len(name), name=name)
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
|
Add test for itanium manglerfrom __future__ import print_function, absolute_import
from numba import unittest_support as unittest
from numba import itanium_mangler
from numba import int32, int64, uint32, uint64, float32, float64
from numba.types import range_iter32_type
class TestItaniumManager(unittest.TestCase):
def test_ident(self):
got = itanium_mangler.mangle_identifier("apple")
expect = "5apple"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_identifier("ap_ple")
expect = "6ap_ple"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_identifier("apple213")
expect = "8apple213"
self.assertEqual(expect, got)
def test_types(self):
got = itanium_mangler.mangle_type(int32)
expect = "i"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(int64)
expect = "x"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(uint32)
expect = "j"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(uint64)
expect = "y"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(float32)
expect = "f"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(float64)
expect = "d"
self.assertEqual(expect, got)
def test_function(self):
got = itanium_mangler.mangle("what", [int32, float32])
expect = "_Z4whatif"
self.assertEqual(expect, got)
got = itanium_mangler.mangle("a_little_brown_fox", [uint64,
uint32,
float64])
expect = "_Z18a_little_brown_foxyjd"
self.assertEqual(expect, got)
def test_custom_type(self):
got = itanium_mangler.mangle_type(range_iter32_type)
name = str(range_iter32_type)
expect = "u{n}{name}".format(n=len(name), name=name)
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for itanium mangler<commit_after>from __future__ import print_function, absolute_import
from numba import unittest_support as unittest
from numba import itanium_mangler
from numba import int32, int64, uint32, uint64, float32, float64
from numba.types import range_iter32_type
class TestItaniumManager(unittest.TestCase):
def test_ident(self):
got = itanium_mangler.mangle_identifier("apple")
expect = "5apple"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_identifier("ap_ple")
expect = "6ap_ple"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_identifier("apple213")
expect = "8apple213"
self.assertEqual(expect, got)
def test_types(self):
got = itanium_mangler.mangle_type(int32)
expect = "i"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(int64)
expect = "x"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(uint32)
expect = "j"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(uint64)
expect = "y"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(float32)
expect = "f"
self.assertEqual(expect, got)
got = itanium_mangler.mangle_type(float64)
expect = "d"
self.assertEqual(expect, got)
def test_function(self):
got = itanium_mangler.mangle("what", [int32, float32])
expect = "_Z4whatif"
self.assertEqual(expect, got)
got = itanium_mangler.mangle("a_little_brown_fox", [uint64,
uint32,
float64])
expect = "_Z18a_little_brown_foxyjd"
self.assertEqual(expect, got)
def test_custom_type(self):
got = itanium_mangler.mangle_type(range_iter32_type)
name = str(range_iter32_type)
expect = "u{n}{name}".format(n=len(name), name=name)
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
|
|
c8e70d820dde964361b804811d6ec15ef0ede10f
|
packaging/osx/trim.py
|
packaging/osx/trim.py
|
import shutil
import os
"""
Instructions
------------
# Build the app
# Look at the timestamps in the generated app directory
# Wait a bit
pushd pai/dist/Content/MacOS
# Find all the files that haven't been accessed since they were created
find . ! -atime -4m > unused.txt
# Don't remove any of the files that we know we'll need
grep -v pentai unused.txt | grep -v media | grep -v Droid > unused.txt
mkdir bak
# Then run this script
python ../../../trim.py
# And check that ./pai runs
"""
dest = "bak"
for fn in file("unused3.txt"):
fn = fn.strip()
try:
shutil.move(fn, dest)
except:
print "Could not move %s, removing" % fn
try:
os.remove(fn)
except:
pass
|
Trim the package before it is turned into a .dmg file
|
Trim the package before it is turned into a .dmg file
|
Python
|
mit
|
cropleyb/pentai,cropleyb/pentai,cropleyb/pentai
|
Trim the package before it is turned into a .dmg file
|
import shutil
import os
"""
Instructions
------------
# Build the app
# Look at the timestamps in the generated app directory
# Wait a bit
pushd pai/dist/Content/MacOS
# Find all the files that haven't been accessed since they were created
find . ! -atime -4m > unused.txt
# Don't remove any of the files that we know we'll need
grep -v pentai unused.txt | grep -v media | grep -v Droid > unused.txt
mkdir bak
# Then run this script
python ../../../trim.py
# And check that ./pai runs
"""
dest = "bak"
for fn in file("unused3.txt"):
fn = fn.strip()
try:
shutil.move(fn, dest)
except:
print "Could not move %s, removing" % fn
try:
os.remove(fn)
except:
pass
|
<commit_before><commit_msg>Trim the package before it is turned into a .dmg file<commit_after>
|
import shutil
import os
"""
Instructions
------------
# Build the app
# Look at the timestamps in the generated app directory
# Wait a bit
pushd pai/dist/Content/MacOS
# Find all the files that haven't been accessed since they were created
find . ! -atime -4m > unused.txt
# Don't remove any of the files that we know we'll need
grep -v pentai unused.txt | grep -v media | grep -v Droid > unused.txt
mkdir bak
# Then run this script
python ../../../trim.py
# And check that ./pai runs
"""
dest = "bak"
for fn in file("unused3.txt"):
fn = fn.strip()
try:
shutil.move(fn, dest)
except:
print "Could not move %s, removing" % fn
try:
os.remove(fn)
except:
pass
|
Trim the package before it is turned into a .dmg fileimport shutil
import os
"""
Instructions
------------
# Build the app
# Look at the timestamps in the generated app directory
# Wait a bit
pushd pai/dist/Content/MacOS
# Find all the files that haven't been accessed since they were created
find . ! -atime -4m > unused.txt
# Don't remove any of the files that we know we'll need
grep -v pentai unused.txt | grep -v media | grep -v Droid > unused.txt
mkdir bak
# Then run this script
python ../../../trim.py
# And check that ./pai runs
"""
dest = "bak"
for fn in file("unused3.txt"):
fn = fn.strip()
try:
shutil.move(fn, dest)
except:
print "Could not move %s, removing" % fn
try:
os.remove(fn)
except:
pass
|
<commit_before><commit_msg>Trim the package before it is turned into a .dmg file<commit_after>import shutil
import os
"""
Instructions
------------
# Build the app
# Look at the timestamps in the generated app directory
# Wait a bit
pushd pai/dist/Content/MacOS
# Find all the files that haven't been accessed since they were created
find . ! -atime -4m > unused.txt
# Don't remove any of the files that we know we'll need
grep -v pentai unused.txt | grep -v media | grep -v Droid > unused.txt
mkdir bak
# Then run this script
python ../../../trim.py
# And check that ./pai runs
"""
dest = "bak"
for fn in file("unused3.txt"):
fn = fn.strip()
try:
shutil.move(fn, dest)
except:
print "Could not move %s, removing" % fn
try:
os.remove(fn)
except:
pass
|
|
64ccc46c9b48346aee20bf99f4ff0a3862947f70
|
syutil/crypto/signing_key.py
|
syutil/crypto/signing_key.py
|
from syutil.base64util import encode_base64, decode_base64
import nacl.signing
NACL_ED25519 = "ed25519"
def generate_singing_key(version):
"""Generate a new signing key
Args:
version (str): Identifies this key out the keys for this entity.
Returns:
A SigningKey object.
"""
key = nacl.signing.SigningKey.generate()
key.version = version
key.alg = NACL_ED25519
return key
def decode_signing_key_base64(algorithm, version, key_base64):
"""Decode a base64 encoded signing key
Args:
algorithm (str): The algorithm the key is for (currently "ed25519").
version (str): Identifies this key out of the keys for this entity.
key_base64 (str): Base64 encoded bytes of the key.
Returns:
A SigningKey object.
"""
if algorithm == NACL_ED25519:
key_bytes = decode_base64(key_base64)
key = nacl.signing.SigningKey(key_bytes)
key.version = version
key.alg = NACL_ED25519
return key
else:
raise ValueError("Unsupported algorithm %s" % (algorithm,))
def encode_signing_key_base64(key):
"""Encode a signing key as base64
Args:
key (SigningKey): A signing key to encode.
Returns:
base64 encoded string.
"""
return encode_base64(key.encode())
def read_signing_keys(stream):
"""Reads a list of keys from a stream
Args:
stream : A stream to iterate for keys.
Returns:
list of SigningKey objects.
"""
keys = []
for line in stream:
algorithm, version, key_base64 = line.split()
keys.append(decode_signing_key_base64(algorithm, version, key_base64))
return keys
def write_signing_keys(stream, keys):
"""Writes a list of keys to a stream.
Args:
stream: Stream to write keys to.
keys: List of SigningKey objects.
"""
for key in keys:
key_base64 = encode_signing_key_base64(key)
stream.write("%s %s %s\n" % (key.alg, key.version, key_base64,))
|
Add utils for generating, reading and writing signing keys
|
Add utils for generating, reading and writing signing keys
|
Python
|
apache-2.0
|
matrix-org/syutil
|
Add utils for generating, reading and writing signing keys
|
from syutil.base64util import encode_base64, decode_base64
import nacl.signing
NACL_ED25519 = "ed25519"
def generate_singing_key(version):
"""Generate a new signing key
Args:
version (str): Identifies this key out the keys for this entity.
Returns:
A SigningKey object.
"""
key = nacl.signing.SigningKey.generate()
key.version = version
key.alg = NACL_ED25519
return key
def decode_signing_key_base64(algorithm, version, key_base64):
"""Decode a base64 encoded signing key
Args:
algorithm (str): The algorithm the key is for (currently "ed25519").
version (str): Identifies this key out of the keys for this entity.
key_base64 (str): Base64 encoded bytes of the key.
Returns:
A SigningKey object.
"""
if algorithm == NACL_ED25519:
key_bytes = decode_base64(key_base64)
key = nacl.signing.SigningKey(key_bytes)
key.version = version
key.alg = NACL_ED25519
return key
else:
raise ValueError("Unsupported algorithm %s" % (algorithm,))
def encode_signing_key_base64(key):
"""Encode a signing key as base64
Args:
key (SigningKey): A signing key to encode.
Returns:
base64 encoded string.
"""
return encode_base64(key.encode())
def read_signing_keys(stream):
"""Reads a list of keys from a stream
Args:
stream : A stream to iterate for keys.
Returns:
list of SigningKey objects.
"""
keys = []
for line in stream:
algorithm, version, key_base64 = line.split()
keys.append(decode_signing_key_base64(algorithm, version, key_base64))
return keys
def write_signing_keys(stream, keys):
"""Writes a list of keys to a stream.
Args:
stream: Stream to write keys to.
keys: List of SigningKey objects.
"""
for key in keys:
key_base64 = encode_signing_key_base64(key)
stream.write("%s %s %s\n" % (key.alg, key.version, key_base64,))
|
<commit_before><commit_msg>Add utils for generating, reading and writing signing keys<commit_after>
|
from syutil.base64util import encode_base64, decode_base64
import nacl.signing
NACL_ED25519 = "ed25519"
def generate_singing_key(version):
"""Generate a new signing key
Args:
version (str): Identifies this key out the keys for this entity.
Returns:
A SigningKey object.
"""
key = nacl.signing.SigningKey.generate()
key.version = version
key.alg = NACL_ED25519
return key
def decode_signing_key_base64(algorithm, version, key_base64):
"""Decode a base64 encoded signing key
Args:
algorithm (str): The algorithm the key is for (currently "ed25519").
version (str): Identifies this key out of the keys for this entity.
key_base64 (str): Base64 encoded bytes of the key.
Returns:
A SigningKey object.
"""
if algorithm == NACL_ED25519:
key_bytes = decode_base64(key_base64)
key = nacl.signing.SigningKey(key_bytes)
key.version = version
key.alg = NACL_ED25519
return key
else:
raise ValueError("Unsupported algorithm %s" % (algorithm,))
def encode_signing_key_base64(key):
"""Encode a signing key as base64
Args:
key (SigningKey): A signing key to encode.
Returns:
base64 encoded string.
"""
return encode_base64(key.encode())
def read_signing_keys(stream):
"""Reads a list of keys from a stream
Args:
stream : A stream to iterate for keys.
Returns:
list of SigningKey objects.
"""
keys = []
for line in stream:
algorithm, version, key_base64 = line.split()
keys.append(decode_signing_key_base64(algorithm, version, key_base64))
return keys
def write_signing_keys(stream, keys):
"""Writes a list of keys to a stream.
Args:
stream: Stream to write keys to.
keys: List of SigningKey objects.
"""
for key in keys:
key_base64 = encode_signing_key_base64(key)
stream.write("%s %s %s\n" % (key.alg, key.version, key_base64,))
|
Add utils for generating, reading and writing signing keysfrom syutil.base64util import encode_base64, decode_base64
import nacl.signing
NACL_ED25519 = "ed25519"
def generate_singing_key(version):
"""Generate a new signing key
Args:
version (str): Identifies this key out the keys for this entity.
Returns:
A SigningKey object.
"""
key = nacl.signing.SigningKey.generate()
key.version = version
key.alg = NACL_ED25519
return key
def decode_signing_key_base64(algorithm, version, key_base64):
"""Decode a base64 encoded signing key
Args:
algorithm (str): The algorithm the key is for (currently "ed25519").
version (str): Identifies this key out of the keys for this entity.
key_base64 (str): Base64 encoded bytes of the key.
Returns:
A SigningKey object.
"""
if algorithm == NACL_ED25519:
key_bytes = decode_base64(key_base64)
key = nacl.signing.SigningKey(key_bytes)
key.version = version
key.alg = NACL_ED25519
return key
else:
raise ValueError("Unsupported algorithm %s" % (algorithm,))
def encode_signing_key_base64(key):
"""Encode a signing key as base64
Args:
key (SigningKey): A signing key to encode.
Returns:
base64 encoded string.
"""
return encode_base64(key.encode())
def read_signing_keys(stream):
"""Reads a list of keys from a stream
Args:
stream : A stream to iterate for keys.
Returns:
list of SigningKey objects.
"""
keys = []
for line in stream:
algorithm, version, key_base64 = line.split()
keys.append(decode_signing_key_base64(algorithm, version, key_base64))
return keys
def write_signing_keys(stream, keys):
"""Writes a list of keys to a stream.
Args:
stream: Stream to write keys to.
keys: List of SigningKey objects.
"""
for key in keys:
key_base64 = encode_signing_key_base64(key)
stream.write("%s %s %s\n" % (key.alg, key.version, key_base64,))
|
<commit_before><commit_msg>Add utils for generating, reading and writing signing keys<commit_after>from syutil.base64util import encode_base64, decode_base64
import nacl.signing
NACL_ED25519 = "ed25519"
def generate_singing_key(version):
"""Generate a new signing key
Args:
version (str): Identifies this key out the keys for this entity.
Returns:
A SigningKey object.
"""
key = nacl.signing.SigningKey.generate()
key.version = version
key.alg = NACL_ED25519
return key
def decode_signing_key_base64(algorithm, version, key_base64):
"""Decode a base64 encoded signing key
Args:
algorithm (str): The algorithm the key is for (currently "ed25519").
version (str): Identifies this key out of the keys for this entity.
key_base64 (str): Base64 encoded bytes of the key.
Returns:
A SigningKey object.
"""
if algorithm == NACL_ED25519:
key_bytes = decode_base64(key_base64)
key = nacl.signing.SigningKey(key_bytes)
key.version = version
key.alg = NACL_ED25519
return key
else:
raise ValueError("Unsupported algorithm %s" % (algorithm,))
def encode_signing_key_base64(key):
"""Encode a signing key as base64
Args:
key (SigningKey): A signing key to encode.
Returns:
base64 encoded string.
"""
return encode_base64(key.encode())
def read_signing_keys(stream):
"""Reads a list of keys from a stream
Args:
stream : A stream to iterate for keys.
Returns:
list of SigningKey objects.
"""
keys = []
for line in stream:
algorithm, version, key_base64 = line.split()
keys.append(decode_signing_key_base64(algorithm, version, key_base64))
return keys
def write_signing_keys(stream, keys):
"""Writes a list of keys to a stream.
Args:
stream: Stream to write keys to.
keys: List of SigningKey objects.
"""
for key in keys:
key_base64 = encode_signing_key_base64(key)
stream.write("%s %s %s\n" % (key.alg, key.version, key_base64,))
|
|
a97b70874144b0fc2ae1a794951729ef07d87c77
|
set1/challenge-1.py
|
set1/challenge-1.py
|
import base64
def hex_to_base64(hex_string):
return base64.b64encode(base64.b16decode(hex_string, True))
if __name__ == '__main__':
hex_string = raw_input("> ")
print hex_to_base64(hex_string)
|
Add solution to challenge 1.
|
Add solution to challenge 1.
|
Python
|
mit
|
ericnorris/cryptopals-solutions
|
Add solution to challenge 1.
|
import base64
def hex_to_base64(hex_string):
return base64.b64encode(base64.b16decode(hex_string, True))
if __name__ == '__main__':
hex_string = raw_input("> ")
print hex_to_base64(hex_string)
|
<commit_before><commit_msg>Add solution to challenge 1.<commit_after>
|
import base64
def hex_to_base64(hex_string):
return base64.b64encode(base64.b16decode(hex_string, True))
if __name__ == '__main__':
hex_string = raw_input("> ")
print hex_to_base64(hex_string)
|
Add solution to challenge 1.import base64
def hex_to_base64(hex_string):
return base64.b64encode(base64.b16decode(hex_string, True))
if __name__ == '__main__':
hex_string = raw_input("> ")
print hex_to_base64(hex_string)
|
<commit_before><commit_msg>Add solution to challenge 1.<commit_after>import base64
def hex_to_base64(hex_string):
return base64.b64encode(base64.b16decode(hex_string, True))
if __name__ == '__main__':
hex_string = raw_input("> ")
print hex_to_base64(hex_string)
|
|
d1436ac95bfa1276b49fb2f7a5d7139fda622af4
|
scripts/smoke-test.py
|
scripts/smoke-test.py
|
#!/usr/bin/env python
import sys
import urllib2
USER_AGENTS={
# iPhone 3 - gets smart styling
'smart': 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16',
# iPaq - gets dumb styling
'dumb': 'Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; 240x320; HP iPAQ h6300)',
}
URLS=[
'/',
'/contact/',
'/contact/results/',
'/service-status/',
'/weather/',
'/desktop/',
'/feature-suggestions/',
'/feedback/',
'/library/',
]
def verify_200(url, ua):
try:
request = urllib2.Request(url, headers={
'User-Agent': ua,
})
file = urllib2.urlopen(url)
if file.geturl() != url:
# Redirected
return file, 300
else:
return file, 200
except urllib2.HTTPError, e:
return None, e.code
def smoke_test(base_url):
tests = 0
status = 0
print "MOLLY SMOKER"
print "------------"
for type, ua in USER_AGENTS.items():
print
print "Simulating", type
for url in URLS:
tests += 1
file, code = verify_200(base_url + url, ua)
if code != 200:
status += 1
print "FAIL", code, url
else:
print " OK ", code, url
print "SUMMARY"
print "-------"
print
print "Ran", tests, "tests"
if status == 0:
print "All passed - well done"
else:
print status, "tests failed"
return status
if __name__ == '__main__':
if len(sys.argv) > 1:
base_url = sys.argv[1]
else:
base_url = "http://localhost:8000"
sys.exit(smoke_test(base_url))
|
Add a smoke testing script (incomplete)
|
Add a smoke testing script (incomplete)
|
Python
|
apache-2.0
|
mollyproject/mollyproject,mollyproject/mollyproject,mollyproject/mollyproject
|
Add a smoke testing script (incomplete)
|
#!/usr/bin/env python
import sys
import urllib2
USER_AGENTS={
# iPhone 3 - gets smart styling
'smart': 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16',
# iPaq - gets dumb styling
'dumb': 'Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; 240x320; HP iPAQ h6300)',
}
URLS=[
'/',
'/contact/',
'/contact/results/',
'/service-status/',
'/weather/',
'/desktop/',
'/feature-suggestions/',
'/feedback/',
'/library/',
]
def verify_200(url, ua):
try:
request = urllib2.Request(url, headers={
'User-Agent': ua,
})
file = urllib2.urlopen(url)
if file.geturl() != url:
# Redirected
return file, 300
else:
return file, 200
except urllib2.HTTPError, e:
return None, e.code
def smoke_test(base_url):
tests = 0
status = 0
print "MOLLY SMOKER"
print "------------"
for type, ua in USER_AGENTS.items():
print
print "Simulating", type
for url in URLS:
tests += 1
file, code = verify_200(base_url + url, ua)
if code != 200:
status += 1
print "FAIL", code, url
else:
print " OK ", code, url
print "SUMMARY"
print "-------"
print
print "Ran", tests, "tests"
if status == 0:
print "All passed - well done"
else:
print status, "tests failed"
return status
if __name__ == '__main__':
if len(sys.argv) > 1:
base_url = sys.argv[1]
else:
base_url = "http://localhost:8000"
sys.exit(smoke_test(base_url))
|
<commit_before><commit_msg>Add a smoke testing script (incomplete)<commit_after>
|
#!/usr/bin/env python
import sys
import urllib2
USER_AGENTS={
# iPhone 3 - gets smart styling
'smart': 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16',
# iPaq - gets dumb styling
'dumb': 'Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; 240x320; HP iPAQ h6300)',
}
URLS=[
'/',
'/contact/',
'/contact/results/',
'/service-status/',
'/weather/',
'/desktop/',
'/feature-suggestions/',
'/feedback/',
'/library/',
]
def verify_200(url, ua):
try:
request = urllib2.Request(url, headers={
'User-Agent': ua,
})
file = urllib2.urlopen(url)
if file.geturl() != url:
# Redirected
return file, 300
else:
return file, 200
except urllib2.HTTPError, e:
return None, e.code
def smoke_test(base_url):
tests = 0
status = 0
print "MOLLY SMOKER"
print "------------"
for type, ua in USER_AGENTS.items():
print
print "Simulating", type
for url in URLS:
tests += 1
file, code = verify_200(base_url + url, ua)
if code != 200:
status += 1
print "FAIL", code, url
else:
print " OK ", code, url
print "SUMMARY"
print "-------"
print
print "Ran", tests, "tests"
if status == 0:
print "All passed - well done"
else:
print status, "tests failed"
return status
if __name__ == '__main__':
if len(sys.argv) > 1:
base_url = sys.argv[1]
else:
base_url = "http://localhost:8000"
sys.exit(smoke_test(base_url))
|
Add a smoke testing script (incomplete)#!/usr/bin/env python
import sys
import urllib2
USER_AGENTS={
# iPhone 3 - gets smart styling
'smart': 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16',
# iPaq - gets dumb styling
'dumb': 'Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; 240x320; HP iPAQ h6300)',
}
URLS=[
'/',
'/contact/',
'/contact/results/',
'/service-status/',
'/weather/',
'/desktop/',
'/feature-suggestions/',
'/feedback/',
'/library/',
]
def verify_200(url, ua):
try:
request = urllib2.Request(url, headers={
'User-Agent': ua,
})
file = urllib2.urlopen(url)
if file.geturl() != url:
# Redirected
return file, 300
else:
return file, 200
except urllib2.HTTPError, e:
return None, e.code
def smoke_test(base_url):
tests = 0
status = 0
print "MOLLY SMOKER"
print "------------"
for type, ua in USER_AGENTS.items():
print
print "Simulating", type
for url in URLS:
tests += 1
file, code = verify_200(base_url + url, ua)
if code != 200:
status += 1
print "FAIL", code, url
else:
print " OK ", code, url
print "SUMMARY"
print "-------"
print
print "Ran", tests, "tests"
if status == 0:
print "All passed - well done"
else:
print status, "tests failed"
return status
if __name__ == '__main__':
if len(sys.argv) > 1:
base_url = sys.argv[1]
else:
base_url = "http://localhost:8000"
sys.exit(smoke_test(base_url))
|
<commit_before><commit_msg>Add a smoke testing script (incomplete)<commit_after>#!/usr/bin/env python
import sys
import urllib2
USER_AGENTS={
# iPhone 3 - gets smart styling
'smart': 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16',
# iPaq - gets dumb styling
'dumb': 'Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; 240x320; HP iPAQ h6300)',
}
URLS=[
'/',
'/contact/',
'/contact/results/',
'/service-status/',
'/weather/',
'/desktop/',
'/feature-suggestions/',
'/feedback/',
'/library/',
]
def verify_200(url, ua):
try:
request = urllib2.Request(url, headers={
'User-Agent': ua,
})
file = urllib2.urlopen(url)
if file.geturl() != url:
# Redirected
return file, 300
else:
return file, 200
except urllib2.HTTPError, e:
return None, e.code
def smoke_test(base_url):
tests = 0
status = 0
print "MOLLY SMOKER"
print "------------"
for type, ua in USER_AGENTS.items():
print
print "Simulating", type
for url in URLS:
tests += 1
file, code = verify_200(base_url + url, ua)
if code != 200:
status += 1
print "FAIL", code, url
else:
print " OK ", code, url
print "SUMMARY"
print "-------"
print
print "Ran", tests, "tests"
if status == 0:
print "All passed - well done"
else:
print status, "tests failed"
return status
if __name__ == '__main__':
if len(sys.argv) > 1:
base_url = sys.argv[1]
else:
base_url = "http://localhost:8000"
sys.exit(smoke_test(base_url))
|
|
8420a10364176f1b5fbb15774f72497b55b44d81
|
flaskapp/tests/test_getters_setters.py
|
flaskapp/tests/test_getters_setters.py
|
import pytest
from appname.models import db, Tag
@pytest.mark.usefixtures("testapp")
class TestModels:
def test_Tag(self, testapp):
t = Tag('tagName')
assert t.namex == 'tagName'
t.namex = 'blah'
assert t.namex == 'blah'
assert t.idx == t.id
|
Add get/set for Tags, and test
|
Add get/set for Tags, and test
|
Python
|
bsd-3-clause
|
ikinsella/squall,ikinsella/squall,ikinsella/squall,ikinsella/squall
|
Add get/set for Tags, and test
|
import pytest
from appname.models import db, Tag
@pytest.mark.usefixtures("testapp")
class TestModels:
def test_Tag(self, testapp):
t = Tag('tagName')
assert t.namex == 'tagName'
t.namex = 'blah'
assert t.namex == 'blah'
assert t.idx == t.id
|
<commit_before><commit_msg>Add get/set for Tags, and test<commit_after>
|
import pytest
from appname.models import db, Tag
@pytest.mark.usefixtures("testapp")
class TestModels:
def test_Tag(self, testapp):
t = Tag('tagName')
assert t.namex == 'tagName'
t.namex = 'blah'
assert t.namex == 'blah'
assert t.idx == t.id
|
Add get/set for Tags, and testimport pytest
from appname.models import db, Tag
@pytest.mark.usefixtures("testapp")
class TestModels:
def test_Tag(self, testapp):
t = Tag('tagName')
assert t.namex == 'tagName'
t.namex = 'blah'
assert t.namex == 'blah'
assert t.idx == t.id
|
<commit_before><commit_msg>Add get/set for Tags, and test<commit_after>import pytest
from appname.models import db, Tag
@pytest.mark.usefixtures("testapp")
class TestModels:
def test_Tag(self, testapp):
t = Tag('tagName')
assert t.namex == 'tagName'
t.namex = 'blah'
assert t.namex == 'blah'
assert t.idx == t.id
|
|
7268e528a39dafb28fe31e36b54af79b6fed4f93
|
py/test/unit/selenium/webdriver/wpewebkit/wpewebkit_options_tests.py
|
py/test/unit/selenium/webdriver/wpewebkit/wpewebkit_options_tests.py
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.wpewebkit.options import Options
@pytest.fixture
def options():
return Options()
def test_set_binary_location(options):
options.binary_location = '/foo/bar'
assert options._binary_location == '/foo/bar'
def test_get_binary_location(options):
options._binary_location = '/foo/bar'
assert options.binary_location == '/foo/bar'
def test_creates_capabilities(options):
options._arguments = ['foo']
options._binary_location = '/bar'
caps = options.to_capabilities()
opts = caps.get(Options.KEY)
assert opts
assert 'foo' in opts['args']
assert opts['binary'] == '/bar'
def test_is_a_baseoptions(options):
from selenium.webdriver.common.options import BaseOptions
assert isinstance(options, BaseOptions)
|
Add WPEWebKit options test case
|
[py] Add WPEWebKit options test case
|
Python
|
apache-2.0
|
joshmgrant/selenium,valfirst/selenium,SeleniumHQ/selenium,joshmgrant/selenium,Ardesco/selenium,titusfortner/selenium,valfirst/selenium,SeleniumHQ/selenium,titusfortner/selenium,HtmlUnit/selenium,HtmlUnit/selenium,SeleniumHQ/selenium,SeleniumHQ/selenium,joshmgrant/selenium,valfirst/selenium,SeleniumHQ/selenium,HtmlUnit/selenium,valfirst/selenium,joshmgrant/selenium,SeleniumHQ/selenium,Ardesco/selenium,HtmlUnit/selenium,valfirst/selenium,Ardesco/selenium,valfirst/selenium,HtmlUnit/selenium,titusfortner/selenium,HtmlUnit/selenium,titusfortner/selenium,valfirst/selenium,SeleniumHQ/selenium,titusfortner/selenium,Ardesco/selenium,joshmgrant/selenium,HtmlUnit/selenium,Ardesco/selenium,SeleniumHQ/selenium,titusfortner/selenium,SeleniumHQ/selenium,titusfortner/selenium,Ardesco/selenium,joshmgrant/selenium,valfirst/selenium,titusfortner/selenium,titusfortner/selenium,Ardesco/selenium,valfirst/selenium,titusfortner/selenium,Ardesco/selenium,joshmgrant/selenium,HtmlUnit/selenium,joshmgrant/selenium,Ardesco/selenium,SeleniumHQ/selenium,HtmlUnit/selenium,SeleniumHQ/selenium,joshmgrant/selenium,valfirst/selenium,joshmgrant/selenium,joshmgrant/selenium,valfirst/selenium,HtmlUnit/selenium,titusfortner/selenium
|
[py] Add WPEWebKit options test case
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.wpewebkit.options import Options
@pytest.fixture
def options():
return Options()
def test_set_binary_location(options):
options.binary_location = '/foo/bar'
assert options._binary_location == '/foo/bar'
def test_get_binary_location(options):
options._binary_location = '/foo/bar'
assert options.binary_location == '/foo/bar'
def test_creates_capabilities(options):
options._arguments = ['foo']
options._binary_location = '/bar'
caps = options.to_capabilities()
opts = caps.get(Options.KEY)
assert opts
assert 'foo' in opts['args']
assert opts['binary'] == '/bar'
def test_is_a_baseoptions(options):
from selenium.webdriver.common.options import BaseOptions
assert isinstance(options, BaseOptions)
|
<commit_before><commit_msg>[py] Add WPEWebKit options test case<commit_after>
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.wpewebkit.options import Options
@pytest.fixture
def options():
return Options()
def test_set_binary_location(options):
options.binary_location = '/foo/bar'
assert options._binary_location == '/foo/bar'
def test_get_binary_location(options):
options._binary_location = '/foo/bar'
assert options.binary_location == '/foo/bar'
def test_creates_capabilities(options):
options._arguments = ['foo']
options._binary_location = '/bar'
caps = options.to_capabilities()
opts = caps.get(Options.KEY)
assert opts
assert 'foo' in opts['args']
assert opts['binary'] == '/bar'
def test_is_a_baseoptions(options):
from selenium.webdriver.common.options import BaseOptions
assert isinstance(options, BaseOptions)
|
[py] Add WPEWebKit options test case# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.wpewebkit.options import Options
@pytest.fixture
def options():
return Options()
def test_set_binary_location(options):
options.binary_location = '/foo/bar'
assert options._binary_location == '/foo/bar'
def test_get_binary_location(options):
options._binary_location = '/foo/bar'
assert options.binary_location == '/foo/bar'
def test_creates_capabilities(options):
options._arguments = ['foo']
options._binary_location = '/bar'
caps = options.to_capabilities()
opts = caps.get(Options.KEY)
assert opts
assert 'foo' in opts['args']
assert opts['binary'] == '/bar'
def test_is_a_baseoptions(options):
from selenium.webdriver.common.options import BaseOptions
assert isinstance(options, BaseOptions)
|
<commit_before><commit_msg>[py] Add WPEWebKit options test case<commit_after># Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.wpewebkit.options import Options
@pytest.fixture
def options():
return Options()
def test_set_binary_location(options):
options.binary_location = '/foo/bar'
assert options._binary_location == '/foo/bar'
def test_get_binary_location(options):
options._binary_location = '/foo/bar'
assert options.binary_location == '/foo/bar'
def test_creates_capabilities(options):
options._arguments = ['foo']
options._binary_location = '/bar'
caps = options.to_capabilities()
opts = caps.get(Options.KEY)
assert opts
assert 'foo' in opts['args']
assert opts['binary'] == '/bar'
def test_is_a_baseoptions(options):
from selenium.webdriver.common.options import BaseOptions
assert isinstance(options, BaseOptions)
|
|
79578c3ad23dfba9ffc1a15015cf6718c067b03a
|
perspective_reddit_bot/get_top_subreddits.py
|
perspective_reddit_bot/get_top_subreddits.py
|
"""Small tool for scraping http://redditlist.com/"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from bs4 import BeautifulSoup
import requests
# Only getting SFW subreddits.
BASE = 'http://redditlist.com/sfw?page='
def fetch_page(page_num):
print('fetching page', page_num)
url = BASE + str(page_num)
return BeautifulSoup(requests.get(url).text, 'html.parser')
def get_hot_list(soup):
# First ".span4.listing" element gets the "Recent Activity" list.
# The 2nd list is by subscribers, and the 3rd is by 24h growth.
items = soup.select('#listing-parent .span4.listing')[0].select('.listing-item')
return [i.get('data-target-subreddit') for i in items]
def main():
outfile = sys.argv[1]
all_subs = []
# There are actually only 34 pages of results at the moment, but redditlist.com
# doesn't throw errors, it just serves empty pages.
for i in xrange(40):
i += 1
p = fetch_page(i)
hots = get_hot_list(p)
print('got', len(hots)) # should be 125 per page, except the last
all_subs.extend(hots)
print('got', len(all_subs), 'total')
with open(outfile, 'w') as f:
f.write('\n'.join(all_subs))
print('done')
if __name__ == '__main__':
main()
|
Add script for getting top subreddits.
|
Add script for getting top subreddits.
|
Python
|
apache-2.0
|
conversationai/conversationai-moderator-reddit,conversationai/conversationai-moderator-reddit,conversationai/conversationai-moderator-reddit,conversationai/conversationai-moderator-reddit
|
Add script for getting top subreddits.
|
"""Small tool for scraping http://redditlist.com/"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from bs4 import BeautifulSoup
import requests
# Only getting SFW subreddits.
BASE = 'http://redditlist.com/sfw?page='
def fetch_page(page_num):
print('fetching page', page_num)
url = BASE + str(page_num)
return BeautifulSoup(requests.get(url).text, 'html.parser')
def get_hot_list(soup):
# First ".span4.listing" element gets the "Recent Activity" list.
# The 2nd list is by subscribers, and the 3rd is by 24h growth.
items = soup.select('#listing-parent .span4.listing')[0].select('.listing-item')
return [i.get('data-target-subreddit') for i in items]
def main():
outfile = sys.argv[1]
all_subs = []
# There are actually only 34 pages of results at the moment, but redditlist.com
# doesn't throw errors, it just serves empty pages.
for i in xrange(40):
i += 1
p = fetch_page(i)
hots = get_hot_list(p)
print('got', len(hots)) # should be 125 per page, except the last
all_subs.extend(hots)
print('got', len(all_subs), 'total')
with open(outfile, 'w') as f:
f.write('\n'.join(all_subs))
print('done')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for getting top subreddits.<commit_after>
|
"""Small tool for scraping http://redditlist.com/"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from bs4 import BeautifulSoup
import requests
# Only getting SFW subreddits.
BASE = 'http://redditlist.com/sfw?page='
def fetch_page(page_num):
print('fetching page', page_num)
url = BASE + str(page_num)
return BeautifulSoup(requests.get(url).text, 'html.parser')
def get_hot_list(soup):
# First ".span4.listing" element gets the "Recent Activity" list.
# The 2nd list is by subscribers, and the 3rd is by 24h growth.
items = soup.select('#listing-parent .span4.listing')[0].select('.listing-item')
return [i.get('data-target-subreddit') for i in items]
def main():
outfile = sys.argv[1]
all_subs = []
# There are actually only 34 pages of results at the moment, but redditlist.com
# doesn't throw errors, it just serves empty pages.
for i in xrange(40):
i += 1
p = fetch_page(i)
hots = get_hot_list(p)
print('got', len(hots)) # should be 125 per page, except the last
all_subs.extend(hots)
print('got', len(all_subs), 'total')
with open(outfile, 'w') as f:
f.write('\n'.join(all_subs))
print('done')
if __name__ == '__main__':
main()
|
Add script for getting top subreddits."""Small tool for scraping http://redditlist.com/"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from bs4 import BeautifulSoup
import requests
# Only getting SFW subreddits.
BASE = 'http://redditlist.com/sfw?page='
def fetch_page(page_num):
print('fetching page', page_num)
url = BASE + str(page_num)
return BeautifulSoup(requests.get(url).text, 'html.parser')
def get_hot_list(soup):
# First ".span4.listing" element gets the "Recent Activity" list.
# The 2nd list is by subscribers, and the 3rd is by 24h growth.
items = soup.select('#listing-parent .span4.listing')[0].select('.listing-item')
return [i.get('data-target-subreddit') for i in items]
def main():
outfile = sys.argv[1]
all_subs = []
# There are actually only 34 pages of results at the moment, but redditlist.com
# doesn't throw errors, it just serves empty pages.
for i in xrange(40):
i += 1
p = fetch_page(i)
hots = get_hot_list(p)
print('got', len(hots)) # should be 125 per page, except the last
all_subs.extend(hots)
print('got', len(all_subs), 'total')
with open(outfile, 'w') as f:
f.write('\n'.join(all_subs))
print('done')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for getting top subreddits.<commit_after>"""Small tool for scraping http://redditlist.com/"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from bs4 import BeautifulSoup
import requests
# Only getting SFW subreddits.
BASE = 'http://redditlist.com/sfw?page='
def fetch_page(page_num):
print('fetching page', page_num)
url = BASE + str(page_num)
return BeautifulSoup(requests.get(url).text, 'html.parser')
def get_hot_list(soup):
# First ".span4.listing" element gets the "Recent Activity" list.
# The 2nd list is by subscribers, and the 3rd is by 24h growth.
items = soup.select('#listing-parent .span4.listing')[0].select('.listing-item')
return [i.get('data-target-subreddit') for i in items]
def main():
outfile = sys.argv[1]
all_subs = []
# There are actually only 34 pages of results at the moment, but redditlist.com
# doesn't throw errors, it just serves empty pages.
for i in xrange(40):
i += 1
p = fetch_page(i)
hots = get_hot_list(p)
print('got', len(hots)) # should be 125 per page, except the last
all_subs.extend(hots)
print('got', len(all_subs), 'total')
with open(outfile, 'w') as f:
f.write('\n'.join(all_subs))
print('done')
if __name__ == '__main__':
main()
|
|
f01c3d82a5db95a7723232edfa4a5437f0d992cf
|
tf_agents/experimental/distributed/__init__.py
|
tf_agents/experimental/distributed/__init__.py
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
Add reverb_variable_container as a module.
|
Add reverb_variable_container as a module.
PiperOrigin-RevId: 332960465
Change-Id: I5ab58ad8529db359891f3296274176a002791436
|
Python
|
apache-2.0
|
tensorflow/agents,tensorflow/agents
|
Add reverb_variable_container as a module.
PiperOrigin-RevId: 332960465
Change-Id: I5ab58ad8529db359891f3296274176a002791436
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
<commit_before><commit_msg>Add reverb_variable_container as a module.
PiperOrigin-RevId: 332960465
Change-Id: I5ab58ad8529db359891f3296274176a002791436<commit_after>
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
Add reverb_variable_container as a module.
PiperOrigin-RevId: 332960465
Change-Id: I5ab58ad8529db359891f3296274176a002791436# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
<commit_before><commit_msg>Add reverb_variable_container as a module.
PiperOrigin-RevId: 332960465
Change-Id: I5ab58ad8529db359891f3296274176a002791436<commit_after># coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
|
bec56f40e6ed8114b34363383787507a64aead79
|
ReadResults.py
|
ReadResults.py
|
def read(filename):
f = open(filename)
line = f.readline()
print(line)
folds_res = line.split(',')
f.close()
return folds_res[1:11]
for i in range(1, 13):
filename = 'output/psl/auc_all_all_dataset' + str(i) + '.txt'
folds_res = read(filename)
lines = []
write_line = ''
for item in folds_res:
write_line = write_line + item + '\t'
write_line = write_line + '\n'
lines.append(write_line)
write_file = 'output.txt'
with open(write_file, 'a') as f:
f.writelines(lines)
|
Read results and write to a txt file
|
Read results and write to a txt file
|
Python
|
mit
|
HaoboGu/Structure-Similarity
|
Read results and write to a txt file
|
def read(filename):
f = open(filename)
line = f.readline()
print(line)
folds_res = line.split(',')
f.close()
return folds_res[1:11]
for i in range(1, 13):
filename = 'output/psl/auc_all_all_dataset' + str(i) + '.txt'
folds_res = read(filename)
lines = []
write_line = ''
for item in folds_res:
write_line = write_line + item + '\t'
write_line = write_line + '\n'
lines.append(write_line)
write_file = 'output.txt'
with open(write_file, 'a') as f:
f.writelines(lines)
|
<commit_before><commit_msg>Read results and write to a txt file<commit_after>
|
def read(filename):
f = open(filename)
line = f.readline()
print(line)
folds_res = line.split(',')
f.close()
return folds_res[1:11]
for i in range(1, 13):
filename = 'output/psl/auc_all_all_dataset' + str(i) + '.txt'
folds_res = read(filename)
lines = []
write_line = ''
for item in folds_res:
write_line = write_line + item + '\t'
write_line = write_line + '\n'
lines.append(write_line)
write_file = 'output.txt'
with open(write_file, 'a') as f:
f.writelines(lines)
|
Read results and write to a txt file
def read(filename):
f = open(filename)
line = f.readline()
print(line)
folds_res = line.split(',')
f.close()
return folds_res[1:11]
for i in range(1, 13):
filename = 'output/psl/auc_all_all_dataset' + str(i) + '.txt'
folds_res = read(filename)
lines = []
write_line = ''
for item in folds_res:
write_line = write_line + item + '\t'
write_line = write_line + '\n'
lines.append(write_line)
write_file = 'output.txt'
with open(write_file, 'a') as f:
f.writelines(lines)
|
<commit_before><commit_msg>Read results and write to a txt file<commit_after>
def read(filename):
f = open(filename)
line = f.readline()
print(line)
folds_res = line.split(',')
f.close()
return folds_res[1:11]
for i in range(1, 13):
filename = 'output/psl/auc_all_all_dataset' + str(i) + '.txt'
folds_res = read(filename)
lines = []
write_line = ''
for item in folds_res:
write_line = write_line + item + '\t'
write_line = write_line + '\n'
lines.append(write_line)
write_file = 'output.txt'
with open(write_file, 'a') as f:
f.writelines(lines)
|
|
6ccccd125f3ca438b15903a1153d19d4bb869a4f
|
unittests/test_logging.py
|
unittests/test_logging.py
|
pytest_plugins = "logwatch"
def test_logwatch_source(testdir):
testdir.makeconftest("""
import mock
import pytest
pytest_plugins = 'logwatch'
mocksource = mock.Mock()
mocksource.capture = mock.Mock(
return_value=[('test_logwatch.txt', 'Error: This is a test error.')]
)
@pytest.hookimpl
def pytest_lab_log_watch(logmanager):
logmanager.register(mocksource)
mocksource.prepare.assert_called_once_with()
mocksource.prepare.reset_mock()
@pytest.hookimpl
def pytest_lab_process_logs(config, item, logs):
mocksource.prepare.assert_called_once_with()
mocksource.capture.assert_called_once_with()
assert item.name == 'test_example'
assert len(logs) == 1
assert mocksource.ctl in logs.keys()
mocklogs = logs[mocksource.ctl]
assert len(mocklogs) == 1
assert mocklogs == {
'test_logwatch.txt': 'Error: This is a test error.'
}
""")
testdir.makepyfile("""
def test_example():
pass
""")
result = testdir.runpytest()
assert result.ret == 0
|
Add a simple unittest for logging support
|
Add a simple unittest for logging support
|
Python
|
mpl-2.0
|
sangoma/pytestlab
|
Add a simple unittest for logging support
|
pytest_plugins = "logwatch"
def test_logwatch_source(testdir):
testdir.makeconftest("""
import mock
import pytest
pytest_plugins = 'logwatch'
mocksource = mock.Mock()
mocksource.capture = mock.Mock(
return_value=[('test_logwatch.txt', 'Error: This is a test error.')]
)
@pytest.hookimpl
def pytest_lab_log_watch(logmanager):
logmanager.register(mocksource)
mocksource.prepare.assert_called_once_with()
mocksource.prepare.reset_mock()
@pytest.hookimpl
def pytest_lab_process_logs(config, item, logs):
mocksource.prepare.assert_called_once_with()
mocksource.capture.assert_called_once_with()
assert item.name == 'test_example'
assert len(logs) == 1
assert mocksource.ctl in logs.keys()
mocklogs = logs[mocksource.ctl]
assert len(mocklogs) == 1
assert mocklogs == {
'test_logwatch.txt': 'Error: This is a test error.'
}
""")
testdir.makepyfile("""
def test_example():
pass
""")
result = testdir.runpytest()
assert result.ret == 0
|
<commit_before><commit_msg>Add a simple unittest for logging support<commit_after>
|
pytest_plugins = "logwatch"
def test_logwatch_source(testdir):
testdir.makeconftest("""
import mock
import pytest
pytest_plugins = 'logwatch'
mocksource = mock.Mock()
mocksource.capture = mock.Mock(
return_value=[('test_logwatch.txt', 'Error: This is a test error.')]
)
@pytest.hookimpl
def pytest_lab_log_watch(logmanager):
logmanager.register(mocksource)
mocksource.prepare.assert_called_once_with()
mocksource.prepare.reset_mock()
@pytest.hookimpl
def pytest_lab_process_logs(config, item, logs):
mocksource.prepare.assert_called_once_with()
mocksource.capture.assert_called_once_with()
assert item.name == 'test_example'
assert len(logs) == 1
assert mocksource.ctl in logs.keys()
mocklogs = logs[mocksource.ctl]
assert len(mocklogs) == 1
assert mocklogs == {
'test_logwatch.txt': 'Error: This is a test error.'
}
""")
testdir.makepyfile("""
def test_example():
pass
""")
result = testdir.runpytest()
assert result.ret == 0
|
Add a simple unittest for logging supportpytest_plugins = "logwatch"
def test_logwatch_source(testdir):
testdir.makeconftest("""
import mock
import pytest
pytest_plugins = 'logwatch'
mocksource = mock.Mock()
mocksource.capture = mock.Mock(
return_value=[('test_logwatch.txt', 'Error: This is a test error.')]
)
@pytest.hookimpl
def pytest_lab_log_watch(logmanager):
logmanager.register(mocksource)
mocksource.prepare.assert_called_once_with()
mocksource.prepare.reset_mock()
@pytest.hookimpl
def pytest_lab_process_logs(config, item, logs):
mocksource.prepare.assert_called_once_with()
mocksource.capture.assert_called_once_with()
assert item.name == 'test_example'
assert len(logs) == 1
assert mocksource.ctl in logs.keys()
mocklogs = logs[mocksource.ctl]
assert len(mocklogs) == 1
assert mocklogs == {
'test_logwatch.txt': 'Error: This is a test error.'
}
""")
testdir.makepyfile("""
def test_example():
pass
""")
result = testdir.runpytest()
assert result.ret == 0
|
<commit_before><commit_msg>Add a simple unittest for logging support<commit_after>pytest_plugins = "logwatch"
def test_logwatch_source(testdir):
testdir.makeconftest("""
import mock
import pytest
pytest_plugins = 'logwatch'
mocksource = mock.Mock()
mocksource.capture = mock.Mock(
return_value=[('test_logwatch.txt', 'Error: This is a test error.')]
)
@pytest.hookimpl
def pytest_lab_log_watch(logmanager):
logmanager.register(mocksource)
mocksource.prepare.assert_called_once_with()
mocksource.prepare.reset_mock()
@pytest.hookimpl
def pytest_lab_process_logs(config, item, logs):
mocksource.prepare.assert_called_once_with()
mocksource.capture.assert_called_once_with()
assert item.name == 'test_example'
assert len(logs) == 1
assert mocksource.ctl in logs.keys()
mocklogs = logs[mocksource.ctl]
assert len(mocklogs) == 1
assert mocklogs == {
'test_logwatch.txt': 'Error: This is a test error.'
}
""")
testdir.makepyfile("""
def test_example():
pass
""")
result = testdir.runpytest()
assert result.ret == 0
|
|
c6523a03f5a000a5b04724e8750fc1b19d1dc13d
|
scripts/message_prs_in_range.py
|
scripts/message_prs_in_range.py
|
"""
Command-line script message pull requests in a range
"""
from os import path
import sys
import logging
import click
# Add top-level module path to sys.path before importing tubular code.
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from tubular.github_api import GitHubAPI # pylint: disable=wrong-import-position
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
@click.command()
@click.option(
u'--org',
help=u'Org from the GitHub repository URL of https://github.com/<org>/<repo>',
default=u'edx'
)
@click.option(
u'--repo',
required=True,
help=u'Repo name from the GitHub repository URL of https://github.com/<org>/<repo>'
)
@click.option(
u'--token',
envvar=u'GIT_TOKEN',
required=True,
help=u'The github access token, see https://help.github.com/articles/creating-an-access-token-for-command-line-use/'
)
@click.option(
u'--base_sha',
required=True,
help=u'The BASE SHA of the range',
)
@click.option(
u'--head_sha',
required=True,
help=u'The HEAD SHA of the range',
)
@click.option(
u'--release_stage', u'message_type', flag_value=u'stage'
)
@click.option(
u'--release_prod', u'message_type', flag_value=u'prod'
)
@click.option(
u'--release_rollback', u'message_type', flag_value=u'rollback'
)
def message_pull_requests(org,
repo,
token,
base_sha,
head_sha,
message_type):
u"""
Message a range of Pull requests between the BASE and HEAD SHA specified.
Message can be one of 3 types:
- PR on stage
- PR on prod
- Release canceled
Args:
org (str): The github organization
repo (str): The github repository
token (str): The authentication token
base_sha (str): The starting SHA
head_sha (str): The ending SHA
message_type (str): type of message to send
Returns:
None
"""
methods = {
u'stage': u'message_pr_deployed_stage',
u'prod': u'message_pr_deployed_prod',
u'rollback': u'message_pr_release_canceled'
}
api = GitHubAPI(org, repo, token)
for pull_request in api.get_pr_range(base_sha, head_sha):
getattr(api, methods[message_type])(pull_request.number)
if __name__ == u"__main__":
message_pull_requests() # pylint: disable=no-value-for-parameter
|
Add script to message PRs in a range
|
Add script to message PRs in a range
|
Python
|
agpl-3.0
|
eltoncarr/tubular,eltoncarr/tubular
|
Add script to message PRs in a range
|
"""
Command-line script message pull requests in a range
"""
from os import path
import sys
import logging
import click
# Add top-level module path to sys.path before importing tubular code.
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from tubular.github_api import GitHubAPI # pylint: disable=wrong-import-position
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
@click.command()
@click.option(
u'--org',
help=u'Org from the GitHub repository URL of https://github.com/<org>/<repo>',
default=u'edx'
)
@click.option(
u'--repo',
required=True,
help=u'Repo name from the GitHub repository URL of https://github.com/<org>/<repo>'
)
@click.option(
u'--token',
envvar=u'GIT_TOKEN',
required=True,
help=u'The github access token, see https://help.github.com/articles/creating-an-access-token-for-command-line-use/'
)
@click.option(
u'--base_sha',
required=True,
help=u'The BASE SHA of the range',
)
@click.option(
u'--head_sha',
required=True,
help=u'The HEAD SHA of the range',
)
@click.option(
u'--release_stage', u'message_type', flag_value=u'stage'
)
@click.option(
u'--release_prod', u'message_type', flag_value=u'prod'
)
@click.option(
u'--release_rollback', u'message_type', flag_value=u'rollback'
)
def message_pull_requests(org,
repo,
token,
base_sha,
head_sha,
message_type):
u"""
Message a range of Pull requests between the BASE and HEAD SHA specified.
Message can be one of 3 types:
- PR on stage
- PR on prod
- Release canceled
Args:
org (str): The github organization
repo (str): The github repository
token (str): The authentication token
base_sha (str): The starting SHA
head_sha (str): The ending SHA
message_type (str): type of message to send
Returns:
None
"""
methods = {
u'stage': u'message_pr_deployed_stage',
u'prod': u'message_pr_deployed_prod',
u'rollback': u'message_pr_release_canceled'
}
api = GitHubAPI(org, repo, token)
for pull_request in api.get_pr_range(base_sha, head_sha):
getattr(api, methods[message_type])(pull_request.number)
if __name__ == u"__main__":
message_pull_requests() # pylint: disable=no-value-for-parameter
|
<commit_before><commit_msg>Add script to message PRs in a range<commit_after>
|
"""
Command-line script message pull requests in a range
"""
from os import path
import sys
import logging
import click
# Add top-level module path to sys.path before importing tubular code.
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from tubular.github_api import GitHubAPI # pylint: disable=wrong-import-position
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
@click.command()
@click.option(
u'--org',
help=u'Org from the GitHub repository URL of https://github.com/<org>/<repo>',
default=u'edx'
)
@click.option(
u'--repo',
required=True,
help=u'Repo name from the GitHub repository URL of https://github.com/<org>/<repo>'
)
@click.option(
u'--token',
envvar=u'GIT_TOKEN',
required=True,
help=u'The github access token, see https://help.github.com/articles/creating-an-access-token-for-command-line-use/'
)
@click.option(
u'--base_sha',
required=True,
help=u'The BASE SHA of the range',
)
@click.option(
u'--head_sha',
required=True,
help=u'The HEAD SHA of the range',
)
@click.option(
u'--release_stage', u'message_type', flag_value=u'stage'
)
@click.option(
u'--release_prod', u'message_type', flag_value=u'prod'
)
@click.option(
u'--release_rollback', u'message_type', flag_value=u'rollback'
)
def message_pull_requests(org,
repo,
token,
base_sha,
head_sha,
message_type):
u"""
Message a range of Pull requests between the BASE and HEAD SHA specified.
Message can be one of 3 types:
- PR on stage
- PR on prod
- Release canceled
Args:
org (str): The github organization
repo (str): The github repository
token (str): The authentication token
base_sha (str): The starting SHA
head_sha (str): The ending SHA
message_type (str): type of message to send
Returns:
None
"""
methods = {
u'stage': u'message_pr_deployed_stage',
u'prod': u'message_pr_deployed_prod',
u'rollback': u'message_pr_release_canceled'
}
api = GitHubAPI(org, repo, token)
for pull_request in api.get_pr_range(base_sha, head_sha):
getattr(api, methods[message_type])(pull_request.number)
if __name__ == u"__main__":
message_pull_requests() # pylint: disable=no-value-for-parameter
|
Add script to message PRs in a range"""
Command-line script message pull requests in a range
"""
from os import path
import sys
import logging
import click
# Add top-level module path to sys.path before importing tubular code.
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from tubular.github_api import GitHubAPI # pylint: disable=wrong-import-position
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
@click.command()
@click.option(
u'--org',
help=u'Org from the GitHub repository URL of https://github.com/<org>/<repo>',
default=u'edx'
)
@click.option(
u'--repo',
required=True,
help=u'Repo name from the GitHub repository URL of https://github.com/<org>/<repo>'
)
@click.option(
u'--token',
envvar=u'GIT_TOKEN',
required=True,
help=u'The github access token, see https://help.github.com/articles/creating-an-access-token-for-command-line-use/'
)
@click.option(
u'--base_sha',
required=True,
help=u'The BASE SHA of the range',
)
@click.option(
u'--head_sha',
required=True,
help=u'The HEAD SHA of the range',
)
@click.option(
u'--release_stage', u'message_type', flag_value=u'stage'
)
@click.option(
u'--release_prod', u'message_type', flag_value=u'prod'
)
@click.option(
u'--release_rollback', u'message_type', flag_value=u'rollback'
)
def message_pull_requests(org,
repo,
token,
base_sha,
head_sha,
message_type):
u"""
Message a range of Pull requests between the BASE and HEAD SHA specified.
Message can be one of 3 types:
- PR on stage
- PR on prod
- Release canceled
Args:
org (str): The github organization
repo (str): The github repository
token (str): The authentication token
base_sha (str): The starting SHA
head_sha (str): The ending SHA
message_type (str): type of message to send
Returns:
None
"""
methods = {
u'stage': u'message_pr_deployed_stage',
u'prod': u'message_pr_deployed_prod',
u'rollback': u'message_pr_release_canceled'
}
api = GitHubAPI(org, repo, token)
for pull_request in api.get_pr_range(base_sha, head_sha):
getattr(api, methods[message_type])(pull_request.number)
if __name__ == u"__main__":
message_pull_requests() # pylint: disable=no-value-for-parameter
|
<commit_before><commit_msg>Add script to message PRs in a range<commit_after>"""
Command-line script message pull requests in a range
"""
from os import path
import sys
import logging
import click
# Add top-level module path to sys.path before importing tubular code.
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from tubular.github_api import GitHubAPI # pylint: disable=wrong-import-position
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
@click.command()
@click.option(
u'--org',
help=u'Org from the GitHub repository URL of https://github.com/<org>/<repo>',
default=u'edx'
)
@click.option(
u'--repo',
required=True,
help=u'Repo name from the GitHub repository URL of https://github.com/<org>/<repo>'
)
@click.option(
u'--token',
envvar=u'GIT_TOKEN',
required=True,
help=u'The github access token, see https://help.github.com/articles/creating-an-access-token-for-command-line-use/'
)
@click.option(
u'--base_sha',
required=True,
help=u'The BASE SHA of the range',
)
@click.option(
u'--head_sha',
required=True,
help=u'The HEAD SHA of the range',
)
@click.option(
u'--release_stage', u'message_type', flag_value=u'stage'
)
@click.option(
u'--release_prod', u'message_type', flag_value=u'prod'
)
@click.option(
u'--release_rollback', u'message_type', flag_value=u'rollback'
)
def message_pull_requests(org,
repo,
token,
base_sha,
head_sha,
message_type):
u"""
Message a range of Pull requests between the BASE and HEAD SHA specified.
Message can be one of 3 types:
- PR on stage
- PR on prod
- Release canceled
Args:
org (str): The github organization
repo (str): The github repository
token (str): The authentication token
base_sha (str): The starting SHA
head_sha (str): The ending SHA
message_type (str): type of message to send
Returns:
None
"""
methods = {
u'stage': u'message_pr_deployed_stage',
u'prod': u'message_pr_deployed_prod',
u'rollback': u'message_pr_release_canceled'
}
api = GitHubAPI(org, repo, token)
for pull_request in api.get_pr_range(base_sha, head_sha):
getattr(api, methods[message_type])(pull_request.number)
if __name__ == u"__main__":
message_pull_requests() # pylint: disable=no-value-for-parameter
|
|
13f271c7d6d259e8a478657fa05822f9584090c4
|
tests/keras/utils/generic_utils_test.py
|
tests/keras/utils/generic_utils_test.py
|
import pytest
from keras.utils.generic_utils import custom_object_scope
from keras import activations
from keras import regularizers
def test_custom_objects_scope():
def custom_fn():
pass
class CustomClass(object):
pass
with custom_object_scope({'CustomClass': CustomClass,
'custom_fn': custom_fn}):
act = activations.get('custom_fn')
assert act == custom_fn
cl = regularizers.get('CustomClass')
assert cl.__class__ == CustomClass
if __name__ == '__main__':
pytest.main([__file__])
|
Add unit test for custom objects scope.
|
Add unit test for custom objects scope.
|
Python
|
apache-2.0
|
keras-team/keras,keras-team/keras
|
Add unit test for custom objects scope.
|
import pytest
from keras.utils.generic_utils import custom_object_scope
from keras import activations
from keras import regularizers
def test_custom_objects_scope():
def custom_fn():
pass
class CustomClass(object):
pass
with custom_object_scope({'CustomClass': CustomClass,
'custom_fn': custom_fn}):
act = activations.get('custom_fn')
assert act == custom_fn
cl = regularizers.get('CustomClass')
assert cl.__class__ == CustomClass
if __name__ == '__main__':
pytest.main([__file__])
|
<commit_before><commit_msg>Add unit test for custom objects scope.<commit_after>
|
import pytest
from keras.utils.generic_utils import custom_object_scope
from keras import activations
from keras import regularizers
def test_custom_objects_scope():
def custom_fn():
pass
class CustomClass(object):
pass
with custom_object_scope({'CustomClass': CustomClass,
'custom_fn': custom_fn}):
act = activations.get('custom_fn')
assert act == custom_fn
cl = regularizers.get('CustomClass')
assert cl.__class__ == CustomClass
if __name__ == '__main__':
pytest.main([__file__])
|
Add unit test for custom objects scope.import pytest
from keras.utils.generic_utils import custom_object_scope
from keras import activations
from keras import regularizers
def test_custom_objects_scope():
def custom_fn():
pass
class CustomClass(object):
pass
with custom_object_scope({'CustomClass': CustomClass,
'custom_fn': custom_fn}):
act = activations.get('custom_fn')
assert act == custom_fn
cl = regularizers.get('CustomClass')
assert cl.__class__ == CustomClass
if __name__ == '__main__':
pytest.main([__file__])
|
<commit_before><commit_msg>Add unit test for custom objects scope.<commit_after>import pytest
from keras.utils.generic_utils import custom_object_scope
from keras import activations
from keras import regularizers
def test_custom_objects_scope():
def custom_fn():
pass
class CustomClass(object):
pass
with custom_object_scope({'CustomClass': CustomClass,
'custom_fn': custom_fn}):
act = activations.get('custom_fn')
assert act == custom_fn
cl = regularizers.get('CustomClass')
assert cl.__class__ == CustomClass
if __name__ == '__main__':
pytest.main([__file__])
|
|
2d59af0c67eecf95c00c36cf93e1d563a489b390
|
contentcuration/contentcuration/migrations/0091_auto_20180724_2243.py
|
contentcuration/contentcuration/migrations/0091_auto_20180724_2243.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-07-24 22:43
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0090_auto_20180724_1625'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AlterField(
model_name='user',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
|
Add migration for content defaults.
|
Add migration for content defaults.
|
Python
|
mit
|
jayoshih/content-curation,fle-internal/content-curation,DXCanas/content-curation,jayoshih/content-curation,jayoshih/content-curation,DXCanas/content-curation,fle-internal/content-curation,DXCanas/content-curation,fle-internal/content-curation,fle-internal/content-curation,jayoshih/content-curation,DXCanas/content-curation
|
Add migration for content defaults.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-07-24 22:43
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0090_auto_20180724_1625'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AlterField(
model_name='user',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
|
<commit_before><commit_msg>Add migration for content defaults.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-07-24 22:43
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0090_auto_20180724_1625'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AlterField(
model_name='user',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
|
Add migration for content defaults.# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-07-24 22:43
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0090_auto_20180724_1625'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AlterField(
model_name='user',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
|
<commit_before><commit_msg>Add migration for content defaults.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-07-24 22:43
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0090_auto_20180724_1625'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AlterField(
model_name='user',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
|
|
06830b84693bb79375d61883f087dab503538709
|
apps/jobs/cms_app.py
|
apps/jobs/cms_app.py
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class JobsAppHook(CMSApp):
name = _("Jobs")
urls = ["apps.jobs.urls"]
apphook_pool.register(JobsAppHook)
|
Allow app to be bound to CMS pages
|
Allow app to be bound to CMS pages
|
Python
|
mit
|
MjAbuz/foundation,okfn/foundation,okfn/website,MjAbuz/foundation,MjAbuz/foundation,MjAbuz/foundation,okfn/foundation,okfn/website,okfn/foundation,okfn/website,okfn/website,okfn/foundation
|
Allow app to be bound to CMS pages
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class JobsAppHook(CMSApp):
name = _("Jobs")
urls = ["apps.jobs.urls"]
apphook_pool.register(JobsAppHook)
|
<commit_before><commit_msg>Allow app to be bound to CMS pages<commit_after>
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class JobsAppHook(CMSApp):
name = _("Jobs")
urls = ["apps.jobs.urls"]
apphook_pool.register(JobsAppHook)
|
Allow app to be bound to CMS pagesfrom cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class JobsAppHook(CMSApp):
name = _("Jobs")
urls = ["apps.jobs.urls"]
apphook_pool.register(JobsAppHook)
|
<commit_before><commit_msg>Allow app to be bound to CMS pages<commit_after>from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class JobsAppHook(CMSApp):
name = _("Jobs")
urls = ["apps.jobs.urls"]
apphook_pool.register(JobsAppHook)
|
|
b4e3a38910202d077c21bff53febe07001c7848b
|
exercises/chapter_15/exercise_15_01/exercise_15_01.py
|
exercises/chapter_15/exercise_15_01/exercise_15_01.py
|
# 15-1. Cubes
import matplotlib.pyplot as plt
def plot_cubes(min_x, max_x):
"""Plots the cube of range of numbers"""
x_values = list(range(min_x, max_x))
y_values = [x**3 for x in x_values]
plt.scatter(x_values, y_values, s = 40)
plt.show()
start_x = 1
stop_x = 6
plot_cubes(start_x, stop_x)
start_x = 1
stop_x = 5001
plot_cubes(start_x, stop_x)
|
Add solution to exercise 15.1.
|
Add solution to exercise 15.1.
|
Python
|
mit
|
HenrikSamuelsson/python-crash-course
|
Add solution to exercise 15.1.
|
# 15-1. Cubes
import matplotlib.pyplot as plt
def plot_cubes(min_x, max_x):
"""Plots the cube of range of numbers"""
x_values = list(range(min_x, max_x))
y_values = [x**3 for x in x_values]
plt.scatter(x_values, y_values, s = 40)
plt.show()
start_x = 1
stop_x = 6
plot_cubes(start_x, stop_x)
start_x = 1
stop_x = 5001
plot_cubes(start_x, stop_x)
|
<commit_before><commit_msg>Add solution to exercise 15.1.<commit_after>
|
# 15-1. Cubes
import matplotlib.pyplot as plt
def plot_cubes(min_x, max_x):
"""Plots the cube of range of numbers"""
x_values = list(range(min_x, max_x))
y_values = [x**3 for x in x_values]
plt.scatter(x_values, y_values, s = 40)
plt.show()
start_x = 1
stop_x = 6
plot_cubes(start_x, stop_x)
start_x = 1
stop_x = 5001
plot_cubes(start_x, stop_x)
|
Add solution to exercise 15.1.# 15-1. Cubes
import matplotlib.pyplot as plt
def plot_cubes(min_x, max_x):
"""Plots the cube of range of numbers"""
x_values = list(range(min_x, max_x))
y_values = [x**3 for x in x_values]
plt.scatter(x_values, y_values, s = 40)
plt.show()
start_x = 1
stop_x = 6
plot_cubes(start_x, stop_x)
start_x = 1
stop_x = 5001
plot_cubes(start_x, stop_x)
|
<commit_before><commit_msg>Add solution to exercise 15.1.<commit_after># 15-1. Cubes
import matplotlib.pyplot as plt
def plot_cubes(min_x, max_x):
"""Plots the cube of range of numbers"""
x_values = list(range(min_x, max_x))
y_values = [x**3 for x in x_values]
plt.scatter(x_values, y_values, s = 40)
plt.show()
start_x = 1
stop_x = 6
plot_cubes(start_x, stop_x)
start_x = 1
stop_x = 5001
plot_cubes(start_x, stop_x)
|
|
91242064c3e98d28ec05662dd0fddfc0fadecad5
|
Communication/tcpClient.py
|
Communication/tcpClient.py
|
#!/usr/bin/env python
# This script was read data from a serial port forwarded to tcp with
# socat with the following command:
# sudo socat tcp-l:1234,reuseaddr,fork file:/dev/ttyACM0,nonblock,raw,echo=0,waitlock=/var/run/ttyACM0.lock,b9600
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 1234
BUFFER_SIZE = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
data = ""
while True :
#s.send(MESSAGE)
data = data + s.recv(BUFFER_SIZE)
data_split = data.split("\r\n")
if len(data_split)>1:
print data_split[-2] # Before the last (which is empty if no more messages)
data = data_split[-1]
s.close()
|
Add a tcp client to read from tcp
|
Add a tcp client to read from tcp
|
Python
|
mit
|
baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite
|
Add a tcp client to read from tcp
|
#!/usr/bin/env python
# This script was read data from a serial port forwarded to tcp with
# socat with the following command:
# sudo socat tcp-l:1234,reuseaddr,fork file:/dev/ttyACM0,nonblock,raw,echo=0,waitlock=/var/run/ttyACM0.lock,b9600
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 1234
BUFFER_SIZE = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
data = ""
while True :
#s.send(MESSAGE)
data = data + s.recv(BUFFER_SIZE)
data_split = data.split("\r\n")
if len(data_split)>1:
print data_split[-2] # Before the last (which is empty if no more messages)
data = data_split[-1]
s.close()
|
<commit_before><commit_msg>Add a tcp client to read from tcp<commit_after>
|
#!/usr/bin/env python
# This script was read data from a serial port forwarded to tcp with
# socat with the following command:
# sudo socat tcp-l:1234,reuseaddr,fork file:/dev/ttyACM0,nonblock,raw,echo=0,waitlock=/var/run/ttyACM0.lock,b9600
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 1234
BUFFER_SIZE = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
data = ""
while True :
#s.send(MESSAGE)
data = data + s.recv(BUFFER_SIZE)
data_split = data.split("\r\n")
if len(data_split)>1:
print data_split[-2] # Before the last (which is empty if no more messages)
data = data_split[-1]
s.close()
|
Add a tcp client to read from tcp#!/usr/bin/env python
# This script was read data from a serial port forwarded to tcp with
# socat with the following command:
# sudo socat tcp-l:1234,reuseaddr,fork file:/dev/ttyACM0,nonblock,raw,echo=0,waitlock=/var/run/ttyACM0.lock,b9600
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 1234
BUFFER_SIZE = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
data = ""
while True :
#s.send(MESSAGE)
data = data + s.recv(BUFFER_SIZE)
data_split = data.split("\r\n")
if len(data_split)>1:
print data_split[-2] # Before the last (which is empty if no more messages)
data = data_split[-1]
s.close()
|
<commit_before><commit_msg>Add a tcp client to read from tcp<commit_after>#!/usr/bin/env python
# This script was read data from a serial port forwarded to tcp with
# socat with the following command:
# sudo socat tcp-l:1234,reuseaddr,fork file:/dev/ttyACM0,nonblock,raw,echo=0,waitlock=/var/run/ttyACM0.lock,b9600
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 1234
BUFFER_SIZE = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
data = ""
while True :
#s.send(MESSAGE)
data = data + s.recv(BUFFER_SIZE)
data_split = data.split("\r\n")
if len(data_split)>1:
print data_split[-2] # Before the last (which is empty if no more messages)
data = data_split[-1]
s.close()
|
|
faf35e328ffbea9d0a391b16a2aa8bb60589bce8
|
altair/examples/scatter_alternate_axes_scale.py
|
altair/examples/scatter_alternate_axes_scale.py
|
"""
Scatter Plots with alternate Y axis scale
---------------------------------
A few examples that make use of alternate Y axis scales.
"""
# category: simple charts
import altair as alt
import numpy as np
import pandas as pd
chart_df = pd.DataFrame(
{
'x': list(range(0,20)),
'y': [2** x for x in range(0,20)],
}
)
base_chart = alt.Chart(chart_df).mark_line().encode(
x=alt.X('x', type='quantitative'),
).properties(
height=200,
width=200,
)
chart1 = base_chart.encode(
y=alt.Y('y', type='quantitative'),
).properties(title='linear')
chart2 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log', base=2)),
).properties(title='log base 2')
chart3 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log', base=np.e)),
).properties(title='log base e')
chart4 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log')),
).properties(title='log base 10')
display(chart1 | chart2 | chart3 | chart4)
|
Add example for alternate axes scale.
|
Add example for alternate axes scale.
|
Python
|
bsd-3-clause
|
jakevdp/altair,altair-viz/altair
|
Add example for alternate axes scale.
|
"""
Scatter Plots with alternate Y axis scale
---------------------------------
A few examples that make use of alternate Y axis scales.
"""
# category: simple charts
import altair as alt
import numpy as np
import pandas as pd
chart_df = pd.DataFrame(
{
'x': list(range(0,20)),
'y': [2** x for x in range(0,20)],
}
)
base_chart = alt.Chart(chart_df).mark_line().encode(
x=alt.X('x', type='quantitative'),
).properties(
height=200,
width=200,
)
chart1 = base_chart.encode(
y=alt.Y('y', type='quantitative'),
).properties(title='linear')
chart2 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log', base=2)),
).properties(title='log base 2')
chart3 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log', base=np.e)),
).properties(title='log base e')
chart4 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log')),
).properties(title='log base 10')
display(chart1 | chart2 | chart3 | chart4)
|
<commit_before><commit_msg>Add example for alternate axes scale.<commit_after>
|
"""
Scatter Plots with alternate Y axis scale
---------------------------------
A few examples that make use of alternate Y axis scales.
"""
# category: simple charts
import altair as alt
import numpy as np
import pandas as pd
chart_df = pd.DataFrame(
{
'x': list(range(0,20)),
'y': [2** x for x in range(0,20)],
}
)
base_chart = alt.Chart(chart_df).mark_line().encode(
x=alt.X('x', type='quantitative'),
).properties(
height=200,
width=200,
)
chart1 = base_chart.encode(
y=alt.Y('y', type='quantitative'),
).properties(title='linear')
chart2 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log', base=2)),
).properties(title='log base 2')
chart3 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log', base=np.e)),
).properties(title='log base e')
chart4 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log')),
).properties(title='log base 10')
display(chart1 | chart2 | chart3 | chart4)
|
Add example for alternate axes scale."""
Scatter Plots with alternate Y axis scale
---------------------------------
A few examples that make use of alternate Y axis scales.
"""
# category: simple charts
import altair as alt
import numpy as np
import pandas as pd
chart_df = pd.DataFrame(
{
'x': list(range(0,20)),
'y': [2** x for x in range(0,20)],
}
)
base_chart = alt.Chart(chart_df).mark_line().encode(
x=alt.X('x', type='quantitative'),
).properties(
height=200,
width=200,
)
chart1 = base_chart.encode(
y=alt.Y('y', type='quantitative'),
).properties(title='linear')
chart2 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log', base=2)),
).properties(title='log base 2')
chart3 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log', base=np.e)),
).properties(title='log base e')
chart4 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log')),
).properties(title='log base 10')
display(chart1 | chart2 | chart3 | chart4)
|
<commit_before><commit_msg>Add example for alternate axes scale.<commit_after>"""
Scatter Plots with alternate Y axis scale
---------------------------------
A few examples that make use of alternate Y axis scales.
"""
# category: simple charts
import altair as alt
import numpy as np
import pandas as pd
chart_df = pd.DataFrame(
{
'x': list(range(0,20)),
'y': [2** x for x in range(0,20)],
}
)
base_chart = alt.Chart(chart_df).mark_line().encode(
x=alt.X('x', type='quantitative'),
).properties(
height=200,
width=200,
)
chart1 = base_chart.encode(
y=alt.Y('y', type='quantitative'),
).properties(title='linear')
chart2 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log', base=2)),
).properties(title='log base 2')
chart3 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log', base=np.e)),
).properties(title='log base e')
chart4 = base_chart.encode(
y=alt.Y('y', type='quantitative', scale=alt.Scale(type='log')),
).properties(title='log base 10')
display(chart1 | chart2 | chart3 | chart4)
|
|
9ed1ee608b132f618358e2d16829c431ff46bd9b
|
mysite/search/tests.py
|
mysite/search/tests.py
|
import django.test
from search.models import Project
class NonJavascriptSearch(django.test.TestCase):
fixtures = ['bugs-for-two-projects.json']
def testSearch(self):
response = self.client.get('/search/')
for n in range(1, 11):
self.assertContains(response, 'Title #%d' % n)
self.assertContains(response, 'Description #%d' % n)
|
import django.test
from search.models import Project
class NonJavascriptSearch(django.test.TestCase):
fixtures = ['bugs-for-two-projects.json']
def testSearch(self):
response = self.client.get('/search/')
for n in range(1, 11):
self.assertContains(response, 'Title #%d' % n)
self.assertContains(response, 'Description #%d' % n)
def testMatchingBugsFromMtoN(self):
response = self.client.get('/search/')
self.assertContains(response, '1 to 10')
|
Add a failing test (for a usability issue)
|
Add a failing test (for a usability issue)
|
Python
|
agpl-3.0
|
moijes12/oh-mainline,willingc/oh-mainline,sudheesh001/oh-mainline,campbe13/openhatch,vipul-sharma20/oh-mainline,nirmeshk/oh-mainline,SnappleCap/oh-mainline,onceuponatimeforever/oh-mainline,onceuponatimeforever/oh-mainline,onceuponatimeforever/oh-mainline,vipul-sharma20/oh-mainline,openhatch/oh-mainline,jledbetter/openhatch,nirmeshk/oh-mainline,ehashman/oh-mainline,waseem18/oh-mainline,ehashman/oh-mainline,willingc/oh-mainline,openhatch/oh-mainline,heeraj123/oh-mainline,vipul-sharma20/oh-mainline,ehashman/oh-mainline,nirmeshk/oh-mainline,ehashman/oh-mainline,onceuponatimeforever/oh-mainline,heeraj123/oh-mainline,waseem18/oh-mainline,willingc/oh-mainline,waseem18/oh-mainline,jledbetter/openhatch,campbe13/openhatch,willingc/oh-mainline,Changaco/oh-mainline,vipul-sharma20/oh-mainline,nirmeshk/oh-mainline,willingc/oh-mainline,mzdaniel/oh-mainline,jledbetter/openhatch,heeraj123/oh-mainline,openhatch/oh-mainline,jledbetter/openhatch,campbe13/openhatch,sudheesh001/oh-mainline,eeshangarg/oh-mainline,ojengwa/oh-mainline,nirmeshk/oh-mainline,openhatch/oh-mainline,Changaco/oh-mainline,moijes12/oh-mainline,vipul-sharma20/oh-mainline,Changaco/oh-mainline,Changaco/oh-mainline,heeraj123/oh-mainline,sudheesh001/oh-mainline,campbe13/openhatch,SnappleCap/oh-mainline,eeshangarg/oh-mainline,heeraj123/oh-mainline,jledbetter/openhatch,eeshangarg/oh-mainline,ojengwa/oh-mainline,SnappleCap/oh-mainline,ojengwa/oh-mainline,onceuponatimeforever/oh-mainline,waseem18/oh-mainline,SnappleCap/oh-mainline,mzdaniel/oh-mainline,eeshangarg/oh-mainline,mzdaniel/oh-mainline,moijes12/oh-mainline,ehashman/oh-mainline,moijes12/oh-mainline,ojengwa/oh-mainline,Changaco/oh-mainline,mzdaniel/oh-mainline,eeshangarg/oh-mainline,sudheesh001/oh-mainline,mzdaniel/oh-mainline,ojengwa/oh-mainline,SnappleCap/oh-mainline,sudheesh001/oh-mainline,mzdaniel/oh-mainline,campbe13/openhatch,openhatch/oh-mainline,moijes12/oh-mainline,mzdaniel/oh-mainline,waseem18/oh-mainline
|
import django.test
from search.models import Project
class NonJavascriptSearch(django.test.TestCase):
fixtures = ['bugs-for-two-projects.json']
def testSearch(self):
response = self.client.get('/search/')
for n in range(1, 11):
self.assertContains(response, 'Title #%d' % n)
self.assertContains(response, 'Description #%d' % n)
Add a failing test (for a usability issue)
|
import django.test
from search.models import Project
class NonJavascriptSearch(django.test.TestCase):
fixtures = ['bugs-for-two-projects.json']
def testSearch(self):
response = self.client.get('/search/')
for n in range(1, 11):
self.assertContains(response, 'Title #%d' % n)
self.assertContains(response, 'Description #%d' % n)
def testMatchingBugsFromMtoN(self):
response = self.client.get('/search/')
self.assertContains(response, '1 to 10')
|
<commit_before>import django.test
from search.models import Project
class NonJavascriptSearch(django.test.TestCase):
fixtures = ['bugs-for-two-projects.json']
def testSearch(self):
response = self.client.get('/search/')
for n in range(1, 11):
self.assertContains(response, 'Title #%d' % n)
self.assertContains(response, 'Description #%d' % n)
<commit_msg>Add a failing test (for a usability issue)<commit_after>
|
import django.test
from search.models import Project
class NonJavascriptSearch(django.test.TestCase):
fixtures = ['bugs-for-two-projects.json']
def testSearch(self):
response = self.client.get('/search/')
for n in range(1, 11):
self.assertContains(response, 'Title #%d' % n)
self.assertContains(response, 'Description #%d' % n)
def testMatchingBugsFromMtoN(self):
response = self.client.get('/search/')
self.assertContains(response, '1 to 10')
|
import django.test
from search.models import Project
class NonJavascriptSearch(django.test.TestCase):
fixtures = ['bugs-for-two-projects.json']
def testSearch(self):
response = self.client.get('/search/')
for n in range(1, 11):
self.assertContains(response, 'Title #%d' % n)
self.assertContains(response, 'Description #%d' % n)
Add a failing test (for a usability issue)import django.test
from search.models import Project
class NonJavascriptSearch(django.test.TestCase):
fixtures = ['bugs-for-two-projects.json']
def testSearch(self):
response = self.client.get('/search/')
for n in range(1, 11):
self.assertContains(response, 'Title #%d' % n)
self.assertContains(response, 'Description #%d' % n)
def testMatchingBugsFromMtoN(self):
response = self.client.get('/search/')
self.assertContains(response, '1 to 10')
|
<commit_before>import django.test
from search.models import Project
class NonJavascriptSearch(django.test.TestCase):
fixtures = ['bugs-for-two-projects.json']
def testSearch(self):
response = self.client.get('/search/')
for n in range(1, 11):
self.assertContains(response, 'Title #%d' % n)
self.assertContains(response, 'Description #%d' % n)
<commit_msg>Add a failing test (for a usability issue)<commit_after>import django.test
from search.models import Project
class NonJavascriptSearch(django.test.TestCase):
fixtures = ['bugs-for-two-projects.json']
def testSearch(self):
response = self.client.get('/search/')
for n in range(1, 11):
self.assertContains(response, 'Title #%d' % n)
self.assertContains(response, 'Description #%d' % n)
def testMatchingBugsFromMtoN(self):
response = self.client.get('/search/')
self.assertContains(response, '1 to 10')
|
be9c03b7903c1493fd7dc2721aa676d2b6c8ae31
|
bin/stock_dashboard.py
|
bin/stock_dashboard.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
from grs import RealtimeTWSE
def main():
stock = RealtimeTWSE('0050')
data = stock.data['0050']
print("{0} {1}".format(
data['info']['name'].encode('utf-8'),
data['price']))
if __name__ == "__main__":
main()
|
Add script to display stock for conky.
|
Add script to display stock for conky.
|
Python
|
apache-2.0
|
elleryq/oh-my-home,elleryq/oh-my-home,elleryq/oh-my-home
|
Add script to display stock for conky.
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
from grs import RealtimeTWSE
def main():
stock = RealtimeTWSE('0050')
data = stock.data['0050']
print("{0} {1}".format(
data['info']['name'].encode('utf-8'),
data['price']))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to display stock for conky.<commit_after>
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
from grs import RealtimeTWSE
def main():
stock = RealtimeTWSE('0050')
data = stock.data['0050']
print("{0} {1}".format(
data['info']['name'].encode('utf-8'),
data['price']))
if __name__ == "__main__":
main()
|
Add script to display stock for conky.#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
from grs import RealtimeTWSE
def main():
stock = RealtimeTWSE('0050')
data = stock.data['0050']
print("{0} {1}".format(
data['info']['name'].encode('utf-8'),
data['price']))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to display stock for conky.<commit_after>#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
from grs import RealtimeTWSE
def main():
stock = RealtimeTWSE('0050')
data = stock.data['0050']
print("{0} {1}".format(
data['info']['name'].encode('utf-8'),
data['price']))
if __name__ == "__main__":
main()
|
|
1d028effaded2094d2dbc16b4c5aa04af13b55a7
|
lava_scheduler_app/migrations/0013_auto_20160302_0404.py
|
lava_scheduler_app/migrations/0013_auto_20160302_0404.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-02 04:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lava_scheduler_app', '0012_auto_20160208_1600'),
]
operations = [
migrations.AlterField(
model_name='devicetype',
name='health_denominator',
field=models.IntegerField(choices=[(0, b'hours'), (1, b'jobs')], default=0, help_text=b'Choose to submit a health check every N hours or every N jobs. Balance against the duration of a health check job and the average job duration.', verbose_name=b'Initiate health checks by hours or by jobs.'),
),
]
|
Add missing migration w.r.t. health check frequency.
|
Add missing migration w.r.t. health check frequency.
Change-Id: Ic6a40780bfe73245593fcb5f8cfbb8a6eb4d8dd6
|
Python
|
agpl-3.0
|
Linaro/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server
|
Add missing migration w.r.t. health check frequency.
Change-Id: Ic6a40780bfe73245593fcb5f8cfbb8a6eb4d8dd6
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-02 04:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lava_scheduler_app', '0012_auto_20160208_1600'),
]
operations = [
migrations.AlterField(
model_name='devicetype',
name='health_denominator',
field=models.IntegerField(choices=[(0, b'hours'), (1, b'jobs')], default=0, help_text=b'Choose to submit a health check every N hours or every N jobs. Balance against the duration of a health check job and the average job duration.', verbose_name=b'Initiate health checks by hours or by jobs.'),
),
]
|
<commit_before><commit_msg>Add missing migration w.r.t. health check frequency.
Change-Id: Ic6a40780bfe73245593fcb5f8cfbb8a6eb4d8dd6<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-02 04:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lava_scheduler_app', '0012_auto_20160208_1600'),
]
operations = [
migrations.AlterField(
model_name='devicetype',
name='health_denominator',
field=models.IntegerField(choices=[(0, b'hours'), (1, b'jobs')], default=0, help_text=b'Choose to submit a health check every N hours or every N jobs. Balance against the duration of a health check job and the average job duration.', verbose_name=b'Initiate health checks by hours or by jobs.'),
),
]
|
Add missing migration w.r.t. health check frequency.
Change-Id: Ic6a40780bfe73245593fcb5f8cfbb8a6eb4d8dd6# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-02 04:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lava_scheduler_app', '0012_auto_20160208_1600'),
]
operations = [
migrations.AlterField(
model_name='devicetype',
name='health_denominator',
field=models.IntegerField(choices=[(0, b'hours'), (1, b'jobs')], default=0, help_text=b'Choose to submit a health check every N hours or every N jobs. Balance against the duration of a health check job and the average job duration.', verbose_name=b'Initiate health checks by hours or by jobs.'),
),
]
|
<commit_before><commit_msg>Add missing migration w.r.t. health check frequency.
Change-Id: Ic6a40780bfe73245593fcb5f8cfbb8a6eb4d8dd6<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-02 04:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lava_scheduler_app', '0012_auto_20160208_1600'),
]
operations = [
migrations.AlterField(
model_name='devicetype',
name='health_denominator',
field=models.IntegerField(choices=[(0, b'hours'), (1, b'jobs')], default=0, help_text=b'Choose to submit a health check every N hours or every N jobs. Balance against the duration of a health check job and the average job duration.', verbose_name=b'Initiate health checks by hours or by jobs.'),
),
]
|
|
4d4aea6f1077da609a527b70a9f24e38eda8669c
|
pybug/image/test/image_update_from_vector_test.py
|
pybug/image/test/image_update_from_vector_test.py
|
import numpy as np
from numpy.testing import assert_allclose
from pybug.image import *
from nose.tools import raises
def update_im_from_vector(im):
new_values = np.random.random(im.pixels.shape)
same_im = im.update_from_vector(new_values.flatten())
assert same_im is im
assert same_im.pixels.shape == new_values.shape
return new_values
def test_depthimage_update_from_vector():
im = DepthImage.blank((10, 10))
# Force the lazy construction of the old mesh
im.mesh
new_values = update_im_from_vector(im)
assert_allclose(im.mesh.points[:, 2], new_values.flatten())
def test_shapeimage_update_from_vector():
old_values = np.random.random((10, 10, 3))
im = ShapeImage(old_values)
# Force the lazy construction of the old mesh
im.mesh
new_values = update_im_from_vector(im)
assert_allclose(im.mesh.points.flatten(), new_values.flatten())
def test_intensityimage_update_from_vector():
im = IntensityImage.blank((10, 10))
update_im_from_vector(im)
def test_rgbimage_update_from_vector():
im = RGBImage.blank((10, 10), n_channels=3)
update_im_from_vector(im)
def test_maskedndimage_update_from_vector():
im = MaskedNDImage.blank((10, 10), n_channels=10)
update_im_from_vector(im)
|
Add new tests for update from vector
|
Add new tests for update from vector
|
Python
|
bsd-3-clause
|
jabooth/menpo-archive,jabooth/menpo-archive,mozata/menpo,patricksnape/menpo,mozata/menpo,jabooth/menpo-archive,patricksnape/menpo,menpo/menpo,yuxiang-zhou/menpo,yuxiang-zhou/menpo,menpo/menpo,menpo/menpo,grigorisg9gr/menpo,mozata/menpo,mozata/menpo,patricksnape/menpo,grigorisg9gr/menpo,yuxiang-zhou/menpo,grigorisg9gr/menpo,jabooth/menpo-archive
|
Add new tests for update from vector
|
import numpy as np
from numpy.testing import assert_allclose
from pybug.image import *
from nose.tools import raises
def update_im_from_vector(im):
new_values = np.random.random(im.pixels.shape)
same_im = im.update_from_vector(new_values.flatten())
assert same_im is im
assert same_im.pixels.shape == new_values.shape
return new_values
def test_depthimage_update_from_vector():
im = DepthImage.blank((10, 10))
# Force the lazy construction of the old mesh
im.mesh
new_values = update_im_from_vector(im)
assert_allclose(im.mesh.points[:, 2], new_values.flatten())
def test_shapeimage_update_from_vector():
old_values = np.random.random((10, 10, 3))
im = ShapeImage(old_values)
# Force the lazy construction of the old mesh
im.mesh
new_values = update_im_from_vector(im)
assert_allclose(im.mesh.points.flatten(), new_values.flatten())
def test_intensityimage_update_from_vector():
im = IntensityImage.blank((10, 10))
update_im_from_vector(im)
def test_rgbimage_update_from_vector():
im = RGBImage.blank((10, 10), n_channels=3)
update_im_from_vector(im)
def test_maskedndimage_update_from_vector():
im = MaskedNDImage.blank((10, 10), n_channels=10)
update_im_from_vector(im)
|
<commit_before><commit_msg>Add new tests for update from vector<commit_after>
|
import numpy as np
from numpy.testing import assert_allclose
from pybug.image import *
from nose.tools import raises
def update_im_from_vector(im):
new_values = np.random.random(im.pixels.shape)
same_im = im.update_from_vector(new_values.flatten())
assert same_im is im
assert same_im.pixels.shape == new_values.shape
return new_values
def test_depthimage_update_from_vector():
im = DepthImage.blank((10, 10))
# Force the lazy construction of the old mesh
im.mesh
new_values = update_im_from_vector(im)
assert_allclose(im.mesh.points[:, 2], new_values.flatten())
def test_shapeimage_update_from_vector():
old_values = np.random.random((10, 10, 3))
im = ShapeImage(old_values)
# Force the lazy construction of the old mesh
im.mesh
new_values = update_im_from_vector(im)
assert_allclose(im.mesh.points.flatten(), new_values.flatten())
def test_intensityimage_update_from_vector():
im = IntensityImage.blank((10, 10))
update_im_from_vector(im)
def test_rgbimage_update_from_vector():
im = RGBImage.blank((10, 10), n_channels=3)
update_im_from_vector(im)
def test_maskedndimage_update_from_vector():
im = MaskedNDImage.blank((10, 10), n_channels=10)
update_im_from_vector(im)
|
Add new tests for update from vectorimport numpy as np
from numpy.testing import assert_allclose
from pybug.image import *
from nose.tools import raises
def update_im_from_vector(im):
new_values = np.random.random(im.pixels.shape)
same_im = im.update_from_vector(new_values.flatten())
assert same_im is im
assert same_im.pixels.shape == new_values.shape
return new_values
def test_depthimage_update_from_vector():
im = DepthImage.blank((10, 10))
# Force the lazy construction of the old mesh
im.mesh
new_values = update_im_from_vector(im)
assert_allclose(im.mesh.points[:, 2], new_values.flatten())
def test_shapeimage_update_from_vector():
old_values = np.random.random((10, 10, 3))
im = ShapeImage(old_values)
# Force the lazy construction of the old mesh
im.mesh
new_values = update_im_from_vector(im)
assert_allclose(im.mesh.points.flatten(), new_values.flatten())
def test_intensityimage_update_from_vector():
im = IntensityImage.blank((10, 10))
update_im_from_vector(im)
def test_rgbimage_update_from_vector():
im = RGBImage.blank((10, 10), n_channels=3)
update_im_from_vector(im)
def test_maskedndimage_update_from_vector():
im = MaskedNDImage.blank((10, 10), n_channels=10)
update_im_from_vector(im)
|
<commit_before><commit_msg>Add new tests for update from vector<commit_after>import numpy as np
from numpy.testing import assert_allclose
from pybug.image import *
from nose.tools import raises
def update_im_from_vector(im):
new_values = np.random.random(im.pixels.shape)
same_im = im.update_from_vector(new_values.flatten())
assert same_im is im
assert same_im.pixels.shape == new_values.shape
return new_values
def test_depthimage_update_from_vector():
im = DepthImage.blank((10, 10))
# Force the lazy construction of the old mesh
im.mesh
new_values = update_im_from_vector(im)
assert_allclose(im.mesh.points[:, 2], new_values.flatten())
def test_shapeimage_update_from_vector():
old_values = np.random.random((10, 10, 3))
im = ShapeImage(old_values)
# Force the lazy construction of the old mesh
im.mesh
new_values = update_im_from_vector(im)
assert_allclose(im.mesh.points.flatten(), new_values.flatten())
def test_intensityimage_update_from_vector():
im = IntensityImage.blank((10, 10))
update_im_from_vector(im)
def test_rgbimage_update_from_vector():
im = RGBImage.blank((10, 10), n_channels=3)
update_im_from_vector(im)
def test_maskedndimage_update_from_vector():
im = MaskedNDImage.blank((10, 10), n_channels=10)
update_im_from_vector(im)
|
|
46e0d823e1c8cd903b034630d58a0a7ff13aeb34
|
tests/test_elsewhere_public_json.py
|
tests/test_elsewhere_public_json.py
|
from __future__ import print_function, unicode_literals
import json
#import datetime
#import pytz
from gittip.testing import Harness
class Tests(Harness):
def test_returns_json_if_not_opted_in(self, *classes):
for platform in self.platforms:
self.make_elsewhere(platform.name, 1, 'alice')
response = self.client.GET('/on/%s/alice/public.json' % platform.name)
assert response.code == 200
data = json.loads(response.body)
assert data['on'] == platform.name
def test_redirect_if_opted_in(self, *classes):
self.make_participant('alice')
for platform in self.platforms:
account = self.make_elsewhere(platform.name, 1, 'alice')
account.opt_in('alice')
response = self.client.GxT('/on/%s/alice/public.json' % platform.name)
assert response.code == 302
|
Add unit tests for elsewhere public.json
|
Add unit tests for elsewhere public.json
|
Python
|
mit
|
eXcomm/gratipay.com,gratipay/gratipay.com,studio666/gratipay.com,mccolgst/www.gittip.com,gratipay/gratipay.com,eXcomm/gratipay.com,gratipay/gratipay.com,studio666/gratipay.com,studio666/gratipay.com,gratipay/gratipay.com,mccolgst/www.gittip.com,mccolgst/www.gittip.com,mccolgst/www.gittip.com,eXcomm/gratipay.com,studio666/gratipay.com,eXcomm/gratipay.com
|
Add unit tests for elsewhere public.json
|
from __future__ import print_function, unicode_literals
import json
#import datetime
#import pytz
from gittip.testing import Harness
class Tests(Harness):
def test_returns_json_if_not_opted_in(self, *classes):
for platform in self.platforms:
self.make_elsewhere(platform.name, 1, 'alice')
response = self.client.GET('/on/%s/alice/public.json' % platform.name)
assert response.code == 200
data = json.loads(response.body)
assert data['on'] == platform.name
def test_redirect_if_opted_in(self, *classes):
self.make_participant('alice')
for platform in self.platforms:
account = self.make_elsewhere(platform.name, 1, 'alice')
account.opt_in('alice')
response = self.client.GxT('/on/%s/alice/public.json' % platform.name)
assert response.code == 302
|
<commit_before><commit_msg>Add unit tests for elsewhere public.json<commit_after>
|
from __future__ import print_function, unicode_literals
import json
#import datetime
#import pytz
from gittip.testing import Harness
class Tests(Harness):
def test_returns_json_if_not_opted_in(self, *classes):
for platform in self.platforms:
self.make_elsewhere(platform.name, 1, 'alice')
response = self.client.GET('/on/%s/alice/public.json' % platform.name)
assert response.code == 200
data = json.loads(response.body)
assert data['on'] == platform.name
def test_redirect_if_opted_in(self, *classes):
self.make_participant('alice')
for platform in self.platforms:
account = self.make_elsewhere(platform.name, 1, 'alice')
account.opt_in('alice')
response = self.client.GxT('/on/%s/alice/public.json' % platform.name)
assert response.code == 302
|
Add unit tests for elsewhere public.jsonfrom __future__ import print_function, unicode_literals
import json
#import datetime
#import pytz
from gittip.testing import Harness
class Tests(Harness):
def test_returns_json_if_not_opted_in(self, *classes):
for platform in self.platforms:
self.make_elsewhere(platform.name, 1, 'alice')
response = self.client.GET('/on/%s/alice/public.json' % platform.name)
assert response.code == 200
data = json.loads(response.body)
assert data['on'] == platform.name
def test_redirect_if_opted_in(self, *classes):
self.make_participant('alice')
for platform in self.platforms:
account = self.make_elsewhere(platform.name, 1, 'alice')
account.opt_in('alice')
response = self.client.GxT('/on/%s/alice/public.json' % platform.name)
assert response.code == 302
|
<commit_before><commit_msg>Add unit tests for elsewhere public.json<commit_after>from __future__ import print_function, unicode_literals
import json
#import datetime
#import pytz
from gittip.testing import Harness
class Tests(Harness):
def test_returns_json_if_not_opted_in(self, *classes):
for platform in self.platforms:
self.make_elsewhere(platform.name, 1, 'alice')
response = self.client.GET('/on/%s/alice/public.json' % platform.name)
assert response.code == 200
data = json.loads(response.body)
assert data['on'] == platform.name
def test_redirect_if_opted_in(self, *classes):
self.make_participant('alice')
for platform in self.platforms:
account = self.make_elsewhere(platform.name, 1, 'alice')
account.opt_in('alice')
response = self.client.GxT('/on/%s/alice/public.json' % platform.name)
assert response.code == 302
|
|
d14daf8707701d06e27f0bc8b86fd83adaa52e31
|
tools/model_surgery/save_weights.py
|
tools/model_surgery/save_weights.py
|
#!/usr/bin/env python
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/caffe-mpi/build/install/python/'))
import caffe
import argparse
import cPickle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('def_file')
parser.add_argument('param')
parser.add_argument('bbox_mean')
parser.add_argument('bbox_std')
parser.add_argument('save_cls_param_file')
parser.add_argument('save_bbox_param_file')
args = parser.parse_args()
net = caffe.Net(args.def_file, args.param, caffe.TEST)
cls_w = net.params['cls_score_vid'][0].data.T
cls_b = net.params['cls_score_vid'][1].data
with open(args.save_cls_param_file, 'wb') as f:
cPickle.dump((cls_w, cls_b), f)
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params['bbox_pred_vid'][0].data[...] = \
net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis]
net.params['bbox_pred_vid'][1].data[...] = \
net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means
bbox_w = net.params['bbox_pred_vid'][0].data.T
bbox_b = net.params['bbox_pred_vid'][1].data
with open(args.save_bbox_param_file, 'wb') as f:
cPickle.dump((bbox_w, bbox_b), f)
|
Add a script to save trained cls and bbox weights.
|
Add a script to save trained cls and bbox weights.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add a script to save trained cls and bbox weights.
|
#!/usr/bin/env python
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/caffe-mpi/build/install/python/'))
import caffe
import argparse
import cPickle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('def_file')
parser.add_argument('param')
parser.add_argument('bbox_mean')
parser.add_argument('bbox_std')
parser.add_argument('save_cls_param_file')
parser.add_argument('save_bbox_param_file')
args = parser.parse_args()
net = caffe.Net(args.def_file, args.param, caffe.TEST)
cls_w = net.params['cls_score_vid'][0].data.T
cls_b = net.params['cls_score_vid'][1].data
with open(args.save_cls_param_file, 'wb') as f:
cPickle.dump((cls_w, cls_b), f)
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params['bbox_pred_vid'][0].data[...] = \
net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis]
net.params['bbox_pred_vid'][1].data[...] = \
net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means
bbox_w = net.params['bbox_pred_vid'][0].data.T
bbox_b = net.params['bbox_pred_vid'][1].data
with open(args.save_bbox_param_file, 'wb') as f:
cPickle.dump((bbox_w, bbox_b), f)
|
<commit_before><commit_msg>Add a script to save trained cls and bbox weights.<commit_after>
|
#!/usr/bin/env python
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/caffe-mpi/build/install/python/'))
import caffe
import argparse
import cPickle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('def_file')
parser.add_argument('param')
parser.add_argument('bbox_mean')
parser.add_argument('bbox_std')
parser.add_argument('save_cls_param_file')
parser.add_argument('save_bbox_param_file')
args = parser.parse_args()
net = caffe.Net(args.def_file, args.param, caffe.TEST)
cls_w = net.params['cls_score_vid'][0].data.T
cls_b = net.params['cls_score_vid'][1].data
with open(args.save_cls_param_file, 'wb') as f:
cPickle.dump((cls_w, cls_b), f)
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params['bbox_pred_vid'][0].data[...] = \
net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis]
net.params['bbox_pred_vid'][1].data[...] = \
net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means
bbox_w = net.params['bbox_pred_vid'][0].data.T
bbox_b = net.params['bbox_pred_vid'][1].data
with open(args.save_bbox_param_file, 'wb') as f:
cPickle.dump((bbox_w, bbox_b), f)
|
Add a script to save trained cls and bbox weights.#!/usr/bin/env python
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/caffe-mpi/build/install/python/'))
import caffe
import argparse
import cPickle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('def_file')
parser.add_argument('param')
parser.add_argument('bbox_mean')
parser.add_argument('bbox_std')
parser.add_argument('save_cls_param_file')
parser.add_argument('save_bbox_param_file')
args = parser.parse_args()
net = caffe.Net(args.def_file, args.param, caffe.TEST)
cls_w = net.params['cls_score_vid'][0].data.T
cls_b = net.params['cls_score_vid'][1].data
with open(args.save_cls_param_file, 'wb') as f:
cPickle.dump((cls_w, cls_b), f)
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params['bbox_pred_vid'][0].data[...] = \
net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis]
net.params['bbox_pred_vid'][1].data[...] = \
net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means
bbox_w = net.params['bbox_pred_vid'][0].data.T
bbox_b = net.params['bbox_pred_vid'][1].data
with open(args.save_bbox_param_file, 'wb') as f:
cPickle.dump((bbox_w, bbox_b), f)
|
<commit_before><commit_msg>Add a script to save trained cls and bbox weights.<commit_after>#!/usr/bin/env python
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/caffe-mpi/build/install/python/'))
import caffe
import argparse
import cPickle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('def_file')
parser.add_argument('param')
parser.add_argument('bbox_mean')
parser.add_argument('bbox_std')
parser.add_argument('save_cls_param_file')
parser.add_argument('save_bbox_param_file')
args = parser.parse_args()
net = caffe.Net(args.def_file, args.param, caffe.TEST)
cls_w = net.params['cls_score_vid'][0].data.T
cls_b = net.params['cls_score_vid'][1].data
with open(args.save_cls_param_file, 'wb') as f:
cPickle.dump((cls_w, cls_b), f)
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params['bbox_pred_vid'][0].data[...] = \
net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis]
net.params['bbox_pred_vid'][1].data[...] = \
net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means
bbox_w = net.params['bbox_pred_vid'][0].data.T
bbox_b = net.params['bbox_pred_vid'][1].data
with open(args.save_bbox_param_file, 'wb') as f:
cPickle.dump((bbox_w, bbox_b), f)
|
|
307071147378990f1b038c0c29adb6d7f1ec8939
|
tests/genmb_test.py
|
tests/genmb_test.py
|
"""Tests for the general many-body problem."""
import pytest
from sympy import IndexedBase
from drudge import GenMBDrudge, CR, AN
@pytest.fixture(scope='module')
def genmb(spark_ctx):
"""Initialize the environment for a free algebra."""
dr = GenMBDrudge(spark_ctx)
return dr
def test_genmb_has_basic_properties(genmb):
"""Test the general many-body model has basic properties."""
dr = genmb
assert len(dr.orb_ranges) == 1
assert len(dr.spin_vals) == 0
assert dr.one_body == dr.names.t == IndexedBase('t')
assert dr.two_body == dr.names.u == IndexedBase('u')
# The Hamiltonian should already be simplified for this simple model.
assert dr.ham.n_terms == 2
assert dr.ham == dr.orig_ham
# The details of the Hamiltonian will be tested in other ways.
def test_genmb_derives_spin_orbit_hartree_fock(genmb):
"""Test general many-body model can derive HF theory in spin-orbital basis.
"""
dr = genmb
p = genmb.names
c = p.c
r = p.L
a, b = p.L_dumms[:2]
rot = c[CR, a] * c[AN, b]
comm = (dr.ham | rot).simplify()
assert comm.n_terms == 4
rho = IndexedBase('rho')
# Following Ring and Schuck, here all creation comes before the
# annihilation.
res = dr.eval_vev(comm, lambda op1, op2: (
rho[op2.indices[1], op1.indices[1]]
if op1.indices[0] == CR and op2.indices[0] == AN
else 0
)).simplify()
assert res.n_terms == 2
# TODO: Add test of the actual values.
|
Add test for spin-orbital Hartree-Fock theory
|
Add test for spin-orbital Hartree-Fock theory
The actual value is not tested, which will be added when the
substitution facility is ready.
|
Python
|
mit
|
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
|
Add test for spin-orbital Hartree-Fock theory
The actual value is not tested, which will be added when the
substitution facility is ready.
|
"""Tests for the general many-body problem."""
import pytest
from sympy import IndexedBase
from drudge import GenMBDrudge, CR, AN
@pytest.fixture(scope='module')
def genmb(spark_ctx):
"""Initialize the environment for a free algebra."""
dr = GenMBDrudge(spark_ctx)
return dr
def test_genmb_has_basic_properties(genmb):
"""Test the general many-body model has basic properties."""
dr = genmb
assert len(dr.orb_ranges) == 1
assert len(dr.spin_vals) == 0
assert dr.one_body == dr.names.t == IndexedBase('t')
assert dr.two_body == dr.names.u == IndexedBase('u')
# The Hamiltonian should already be simplified for this simple model.
assert dr.ham.n_terms == 2
assert dr.ham == dr.orig_ham
# The details of the Hamiltonian will be tested in other ways.
def test_genmb_derives_spin_orbit_hartree_fock(genmb):
"""Test general many-body model can derive HF theory in spin-orbital basis.
"""
dr = genmb
p = genmb.names
c = p.c
r = p.L
a, b = p.L_dumms[:2]
rot = c[CR, a] * c[AN, b]
comm = (dr.ham | rot).simplify()
assert comm.n_terms == 4
rho = IndexedBase('rho')
# Following Ring and Schuck, here all creation comes before the
# annihilation.
res = dr.eval_vev(comm, lambda op1, op2: (
rho[op2.indices[1], op1.indices[1]]
if op1.indices[0] == CR and op2.indices[0] == AN
else 0
)).simplify()
assert res.n_terms == 2
# TODO: Add test of the actual values.
|
<commit_before><commit_msg>Add test for spin-orbital Hartree-Fock theory
The actual value is not tested, which will be added when the
substitution facility is ready.<commit_after>
|
"""Tests for the general many-body problem."""
import pytest
from sympy import IndexedBase
from drudge import GenMBDrudge, CR, AN
@pytest.fixture(scope='module')
def genmb(spark_ctx):
"""Initialize the environment for a free algebra."""
dr = GenMBDrudge(spark_ctx)
return dr
def test_genmb_has_basic_properties(genmb):
"""Test the general many-body model has basic properties."""
dr = genmb
assert len(dr.orb_ranges) == 1
assert len(dr.spin_vals) == 0
assert dr.one_body == dr.names.t == IndexedBase('t')
assert dr.two_body == dr.names.u == IndexedBase('u')
# The Hamiltonian should already be simplified for this simple model.
assert dr.ham.n_terms == 2
assert dr.ham == dr.orig_ham
# The details of the Hamiltonian will be tested in other ways.
def test_genmb_derives_spin_orbit_hartree_fock(genmb):
"""Test general many-body model can derive HF theory in spin-orbital basis.
"""
dr = genmb
p = genmb.names
c = p.c
r = p.L
a, b = p.L_dumms[:2]
rot = c[CR, a] * c[AN, b]
comm = (dr.ham | rot).simplify()
assert comm.n_terms == 4
rho = IndexedBase('rho')
# Following Ring and Schuck, here all creation comes before the
# annihilation.
res = dr.eval_vev(comm, lambda op1, op2: (
rho[op2.indices[1], op1.indices[1]]
if op1.indices[0] == CR and op2.indices[0] == AN
else 0
)).simplify()
assert res.n_terms == 2
# TODO: Add test of the actual values.
|
Add test for spin-orbital Hartree-Fock theory
The actual value is not tested, which will be added when the
substitution facility is ready."""Tests for the general many-body problem."""
import pytest
from sympy import IndexedBase
from drudge import GenMBDrudge, CR, AN
@pytest.fixture(scope='module')
def genmb(spark_ctx):
"""Initialize the environment for a free algebra."""
dr = GenMBDrudge(spark_ctx)
return dr
def test_genmb_has_basic_properties(genmb):
"""Test the general many-body model has basic properties."""
dr = genmb
assert len(dr.orb_ranges) == 1
assert len(dr.spin_vals) == 0
assert dr.one_body == dr.names.t == IndexedBase('t')
assert dr.two_body == dr.names.u == IndexedBase('u')
# The Hamiltonian should already be simplified for this simple model.
assert dr.ham.n_terms == 2
assert dr.ham == dr.orig_ham
# The details of the Hamiltonian will be tested in other ways.
def test_genmb_derives_spin_orbit_hartree_fock(genmb):
"""Test general many-body model can derive HF theory in spin-orbital basis.
"""
dr = genmb
p = genmb.names
c = p.c
r = p.L
a, b = p.L_dumms[:2]
rot = c[CR, a] * c[AN, b]
comm = (dr.ham | rot).simplify()
assert comm.n_terms == 4
rho = IndexedBase('rho')
# Following Ring and Schuck, here all creation comes before the
# annihilation.
res = dr.eval_vev(comm, lambda op1, op2: (
rho[op2.indices[1], op1.indices[1]]
if op1.indices[0] == CR and op2.indices[0] == AN
else 0
)).simplify()
assert res.n_terms == 2
# TODO: Add test of the actual values.
|
<commit_before><commit_msg>Add test for spin-orbital Hartree-Fock theory
The actual value is not tested, which will be added when the
substitution facility is ready.<commit_after>"""Tests for the general many-body problem."""
import pytest
from sympy import IndexedBase
from drudge import GenMBDrudge, CR, AN
@pytest.fixture(scope='module')
def genmb(spark_ctx):
"""Initialize the environment for a free algebra."""
dr = GenMBDrudge(spark_ctx)
return dr
def test_genmb_has_basic_properties(genmb):
"""Test the general many-body model has basic properties."""
dr = genmb
assert len(dr.orb_ranges) == 1
assert len(dr.spin_vals) == 0
assert dr.one_body == dr.names.t == IndexedBase('t')
assert dr.two_body == dr.names.u == IndexedBase('u')
# The Hamiltonian should already be simplified for this simple model.
assert dr.ham.n_terms == 2
assert dr.ham == dr.orig_ham
# The details of the Hamiltonian will be tested in other ways.
def test_genmb_derives_spin_orbit_hartree_fock(genmb):
"""Test general many-body model can derive HF theory in spin-orbital basis.
"""
dr = genmb
p = genmb.names
c = p.c
r = p.L
a, b = p.L_dumms[:2]
rot = c[CR, a] * c[AN, b]
comm = (dr.ham | rot).simplify()
assert comm.n_terms == 4
rho = IndexedBase('rho')
# Following Ring and Schuck, here all creation comes before the
# annihilation.
res = dr.eval_vev(comm, lambda op1, op2: (
rho[op2.indices[1], op1.indices[1]]
if op1.indices[0] == CR and op2.indices[0] == AN
else 0
)).simplify()
assert res.n_terms == 2
# TODO: Add test of the actual values.
|
|
ec25fd3099e5c921c9818cbafe48a520e991d87a
|
tests/test_jieba.py
|
tests/test_jieba.py
|
# encoding=utf-8
import unittest
import jieba
class TestJieba(unittest.TestCase):
def test_text_split(self):
sentence = "我爱北京天安门"
seg_list = jieba.cut(sentence)
|
ADD unit test for jieba
|
ADD unit test for jieba
|
Python
|
apache-2.0
|
Kaggle/docker-python,Kaggle/docker-python
|
ADD unit test for jieba
|
# encoding=utf-8
import unittest
import jieba
class TestJieba(unittest.TestCase):
def test_text_split(self):
sentence = "我爱北京天安门"
seg_list = jieba.cut(sentence)
|
<commit_before><commit_msg>ADD unit test for jieba<commit_after>
|
# encoding=utf-8
import unittest
import jieba
class TestJieba(unittest.TestCase):
def test_text_split(self):
sentence = "我爱北京天安门"
seg_list = jieba.cut(sentence)
|
ADD unit test for jieba# encoding=utf-8
import unittest
import jieba
class TestJieba(unittest.TestCase):
def test_text_split(self):
sentence = "我爱北京天安门"
seg_list = jieba.cut(sentence)
|
<commit_before><commit_msg>ADD unit test for jieba<commit_after># encoding=utf-8
import unittest
import jieba
class TestJieba(unittest.TestCase):
def test_text_split(self):
sentence = "我爱北京天安门"
seg_list = jieba.cut(sentence)
|
|
4e23e1f1790e609bd829c476b5948356feb15814
|
tests/test_share.py
|
tests/test_share.py
|
from nose.tools import * # PEP8 asserts
from mock import patch
from tests.base import OsfTestCase
from website.search import share_search
class TestShareSearch(OsfTestCase):
@patch.object(share_search.share_es, 'search')
def test_share_search(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
}
}
self.app.get('/api/v1/share/', params={
'q': '*',
'from': '1',
'size:': '20',
'sort': 'date'
})
assert_is(mock_search.called, True)
@patch.object(share_search.share_es, 'count')
def test_share_count(self, mock_count):
mock_count.return_value = {'count': 0}
self.app.get('/api/v1/share/', params={
'q': '*',
'from': '1',
'size:': '20',
'sort': 'date',
'count': True
})
assert_is(mock_count.called, True)
@patch.object(share_search.share_es, 'search')
def test_share_providers(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
}
}
self.app.get('/api/v1/share/providers/')
assert_is(mock_search.called, True)
@patch.object(share_search.share_es, 'search')
def test_share_stats(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
},
'aggregations': {
'date_chunks': {
'buckets': [{
'articles_over_time': {
'buckets': []
},
'key': 'test',
'doc_count': 0
}]
},
'sources': {
'buckets': [{
'key': 'test',
'doc_count': 0
}]
},
'earlier_documents': {
'sources': {
'buckets': [{
'key': 'test',
'doc_count': 0
}]
}
}
}
}
self.app.get('/api/v1/share/stats/')
assert_is(mock_search.called, True)
|
Add tests for share view functions
|
Add tests for share view functions
|
Python
|
apache-2.0
|
laurenrevere/osf.io,cslzchen/osf.io,abought/osf.io,brianjgeiger/osf.io,samanehsan/osf.io,monikagrabowska/osf.io,monikagrabowska/osf.io,adlius/osf.io,jeffreyliu3230/osf.io,HalcyonChimera/osf.io,amyshi188/osf.io,acshi/osf.io,mluo613/osf.io,GaryKriebel/osf.io,barbour-em/osf.io,caneruguz/osf.io,TomBaxter/osf.io,crcresearch/osf.io,doublebits/osf.io,Johnetordoff/osf.io,cwisecarver/osf.io,ckc6cz/osf.io,jolene-esposito/osf.io,monikagrabowska/osf.io,revanthkolli/osf.io,zamattiac/osf.io,arpitar/osf.io,danielneis/osf.io,jolene-esposito/osf.io,cwisecarver/osf.io,haoyuchen1992/osf.io,aaxelb/osf.io,saradbowman/osf.io,GageGaskins/osf.io,KAsante95/osf.io,haoyuchen1992/osf.io,billyhunt/osf.io,petermalcolm/osf.io,SSJohns/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,kushG/osf.io,wearpants/osf.io,CenterForOpenScience/osf.io,lamdnhan/osf.io,MerlinZhang/osf.io,TomHeatwole/osf.io,lamdnhan/osf.io,acshi/osf.io,emetsger/osf.io,jnayak1/osf.io,mattclark/osf.io,lyndsysimon/osf.io,revanthkolli/osf.io,caseyrygt/osf.io,wearpants/osf.io,kwierman/osf.io,HarryRybacki/osf.io,brianjgeiger/osf.io,petermalcolm/osf.io,dplorimer/osf,reinaH/osf.io,fabianvf/osf.io,baylee-d/osf.io,dplorimer/osf,adlius/osf.io,jinluyuan/osf.io,mluo613/osf.io,DanielSBrown/osf.io,RomanZWang/osf.io,kushG/osf.io,SSJohns/osf.io,reinaH/osf.io,KAsante95/osf.io,pattisdr/osf.io,Johnetordoff/osf.io,jnayak1/osf.io,haoyuchen1992/osf.io,zamattiac/osf.io,fabianvf/osf.io,emetsger/osf.io,kch8qx/osf.io,rdhyee/osf.io,kwierman/osf.io,zkraime/osf.io,cslzchen/osf.io,jnayak1/osf.io,jmcarp/osf.io,cwisecarver/osf.io,icereval/osf.io,mfraezz/osf.io,MerlinZhang/osf.io,kch8qx/osf.io,chrisseto/osf.io,felliott/osf.io,monikagrabowska/osf.io,doublebits/osf.io,sloria/osf.io,doublebits/osf.io,abought/osf.io,lyndsysimon/osf.io,kch8qx/osf.io,kushG/osf.io,HalcyonChimera/osf.io,himanshuo/osf.io,caneruguz/osf.io,barbour-em/osf.io,bdyetton/prettychart,doublebits/osf.io,reinaH/osf.io,RomanZWang/osf.io,alexschiller/osf.io,jmcarp/osf.io,hmoco/osf.io,billyhunt/osf.io,ticklemepierce/osf.io,caseyrollins/osf.io,caseyrygt/osf.io,Ghalko/osf.io,HarryRybacki/osf.io,SSJohns/osf.io,brandonPurvis/osf.io,brandonPurvis/osf.io,KAsante95/osf.io,haoyuchen1992/osf.io,KAsante95/osf.io,cldershem/osf.io,jolene-esposito/osf.io,mluke93/osf.io,bdyetton/prettychart,chennan47/osf.io,kch8qx/osf.io,icereval/osf.io,emetsger/osf.io,samchrisinger/osf.io,ckc6cz/osf.io,asanfilippo7/osf.io,crcresearch/osf.io,hmoco/osf.io,zachjanicki/osf.io,Nesiehr/osf.io,revanthkolli/osf.io,cwisecarver/osf.io,sloria/osf.io,billyhunt/osf.io,mluke93/osf.io,hmoco/osf.io,ckc6cz/osf.io,KAsante95/osf.io,binoculars/osf.io,TomHeatwole/osf.io,himanshuo/osf.io,Nesiehr/osf.io,GageGaskins/osf.io,caneruguz/osf.io,chrisseto/osf.io,ZobairAlijan/osf.io,cosenal/osf.io,abought/osf.io,mluke93/osf.io,zkraime/osf.io,Johnetordoff/osf.io,arpitar/osf.io,fabianvf/osf.io,sbt9uc/osf.io,mattclark/osf.io,bdyetton/prettychart,barbour-em/osf.io,wearpants/osf.io,Nesiehr/osf.io,revanthkolli/osf.io,TomBaxter/osf.io,rdhyee/osf.io,baylee-d/osf.io,jeffreyliu3230/osf.io,njantrania/osf.io,adlius/osf.io,cosenal/osf.io,jnayak1/osf.io,erinspace/osf.io,ZobairAlijan/osf.io,fabianvf/osf.io,HalcyonChimera/osf.io,caseyrygt/osf.io,Johnetordoff/osf.io,laurenrevere/osf.io,erinspace/osf.io,mfraezz/osf.io,zachjanicki/osf.io,jeffreyliu3230/osf.io,GaryKriebel/osf.io,alexschiller/osf.io,ZobairAlijan/osf.io,brandonPurvis/osf.io,zamattiac/osf.io,RomanZWang/osf.io,petermalcolm/osf.io,alexschiller/osf.io,samchrisinger/osf.io,rdhyee/osf.io,mattclark/osf.io,jinluyuan/osf.io,njantrania/osf.io,sbt9uc/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,HarryRybacki/osf.io,jolene-esposito/osf.io,ticklemepierce/osf.io,Ghalko/osf.io,caseyrygt/osf.io,GaryKriebel/osf.io,danielneis/osf.io,samchrisinger/osf.io,leb2dg/osf.io,felliott/osf.io,leb2dg/osf.io,cldershem/osf.io,acshi/osf.io,arpitar/osf.io,saradbowman/osf.io,binoculars/osf.io,ticklemepierce/osf.io,asanfilippo7/osf.io,RomanZWang/osf.io,acshi/osf.io,MerlinZhang/osf.io,felliott/osf.io,danielneis/osf.io,Ghalko/osf.io,mluke93/osf.io,laurenrevere/osf.io,HarryRybacki/osf.io,wearpants/osf.io,sbt9uc/osf.io,GageGaskins/osf.io,lyndsysimon/osf.io,sloria/osf.io,mluo613/osf.io,erinspace/osf.io,cslzchen/osf.io,ckc6cz/osf.io,himanshuo/osf.io,jinluyuan/osf.io,samchrisinger/osf.io,baylee-d/osf.io,alexschiller/osf.io,jeffreyliu3230/osf.io,cldershem/osf.io,binoculars/osf.io,arpitar/osf.io,bdyetton/prettychart,billyhunt/osf.io,ZobairAlijan/osf.io,monikagrabowska/osf.io,hmoco/osf.io,Ghalko/osf.io,chrisseto/osf.io,doublebits/osf.io,kushG/osf.io,kch8qx/osf.io,zamattiac/osf.io,brandonPurvis/osf.io,aaxelb/osf.io,acshi/osf.io,zachjanicki/osf.io,mluo613/osf.io,caneruguz/osf.io,pattisdr/osf.io,dplorimer/osf,cslzchen/osf.io,pattisdr/osf.io,njantrania/osf.io,njantrania/osf.io,icereval/osf.io,amyshi188/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,lamdnhan/osf.io,GageGaskins/osf.io,barbour-em/osf.io,abought/osf.io,mfraezz/osf.io,jmcarp/osf.io,GageGaskins/osf.io,GaryKriebel/osf.io,kwierman/osf.io,emetsger/osf.io,amyshi188/osf.io,DanielSBrown/osf.io,samanehsan/osf.io,jmcarp/osf.io,cldershem/osf.io,cosenal/osf.io,cosenal/osf.io,brandonPurvis/osf.io,RomanZWang/osf.io,mluo613/osf.io,kwierman/osf.io,TomBaxter/osf.io,crcresearch/osf.io,Nesiehr/osf.io,CenterForOpenScience/osf.io,dplorimer/osf,samanehsan/osf.io,asanfilippo7/osf.io,leb2dg/osf.io,adlius/osf.io,aaxelb/osf.io,petermalcolm/osf.io,chennan47/osf.io,amyshi188/osf.io,TomHeatwole/osf.io,zkraime/osf.io,reinaH/osf.io,felliott/osf.io,billyhunt/osf.io,asanfilippo7/osf.io,DanielSBrown/osf.io,lyndsysimon/osf.io,rdhyee/osf.io,HalcyonChimera/osf.io,zkraime/osf.io,chrisseto/osf.io,ticklemepierce/osf.io,sbt9uc/osf.io,brianjgeiger/osf.io,zachjanicki/osf.io,DanielSBrown/osf.io,SSJohns/osf.io,aaxelb/osf.io,danielneis/osf.io,lamdnhan/osf.io,jinluyuan/osf.io,caseyrollins/osf.io,samanehsan/osf.io,mfraezz/osf.io,TomHeatwole/osf.io,himanshuo/osf.io,alexschiller/osf.io,MerlinZhang/osf.io
|
Add tests for share view functions
|
from nose.tools import * # PEP8 asserts
from mock import patch
from tests.base import OsfTestCase
from website.search import share_search
class TestShareSearch(OsfTestCase):
@patch.object(share_search.share_es, 'search')
def test_share_search(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
}
}
self.app.get('/api/v1/share/', params={
'q': '*',
'from': '1',
'size:': '20',
'sort': 'date'
})
assert_is(mock_search.called, True)
@patch.object(share_search.share_es, 'count')
def test_share_count(self, mock_count):
mock_count.return_value = {'count': 0}
self.app.get('/api/v1/share/', params={
'q': '*',
'from': '1',
'size:': '20',
'sort': 'date',
'count': True
})
assert_is(mock_count.called, True)
@patch.object(share_search.share_es, 'search')
def test_share_providers(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
}
}
self.app.get('/api/v1/share/providers/')
assert_is(mock_search.called, True)
@patch.object(share_search.share_es, 'search')
def test_share_stats(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
},
'aggregations': {
'date_chunks': {
'buckets': [{
'articles_over_time': {
'buckets': []
},
'key': 'test',
'doc_count': 0
}]
},
'sources': {
'buckets': [{
'key': 'test',
'doc_count': 0
}]
},
'earlier_documents': {
'sources': {
'buckets': [{
'key': 'test',
'doc_count': 0
}]
}
}
}
}
self.app.get('/api/v1/share/stats/')
assert_is(mock_search.called, True)
|
<commit_before><commit_msg>Add tests for share view functions<commit_after>
|
from nose.tools import * # PEP8 asserts
from mock import patch
from tests.base import OsfTestCase
from website.search import share_search
class TestShareSearch(OsfTestCase):
@patch.object(share_search.share_es, 'search')
def test_share_search(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
}
}
self.app.get('/api/v1/share/', params={
'q': '*',
'from': '1',
'size:': '20',
'sort': 'date'
})
assert_is(mock_search.called, True)
@patch.object(share_search.share_es, 'count')
def test_share_count(self, mock_count):
mock_count.return_value = {'count': 0}
self.app.get('/api/v1/share/', params={
'q': '*',
'from': '1',
'size:': '20',
'sort': 'date',
'count': True
})
assert_is(mock_count.called, True)
@patch.object(share_search.share_es, 'search')
def test_share_providers(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
}
}
self.app.get('/api/v1/share/providers/')
assert_is(mock_search.called, True)
@patch.object(share_search.share_es, 'search')
def test_share_stats(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
},
'aggregations': {
'date_chunks': {
'buckets': [{
'articles_over_time': {
'buckets': []
},
'key': 'test',
'doc_count': 0
}]
},
'sources': {
'buckets': [{
'key': 'test',
'doc_count': 0
}]
},
'earlier_documents': {
'sources': {
'buckets': [{
'key': 'test',
'doc_count': 0
}]
}
}
}
}
self.app.get('/api/v1/share/stats/')
assert_is(mock_search.called, True)
|
Add tests for share view functionsfrom nose.tools import * # PEP8 asserts
from mock import patch
from tests.base import OsfTestCase
from website.search import share_search
class TestShareSearch(OsfTestCase):
@patch.object(share_search.share_es, 'search')
def test_share_search(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
}
}
self.app.get('/api/v1/share/', params={
'q': '*',
'from': '1',
'size:': '20',
'sort': 'date'
})
assert_is(mock_search.called, True)
@patch.object(share_search.share_es, 'count')
def test_share_count(self, mock_count):
mock_count.return_value = {'count': 0}
self.app.get('/api/v1/share/', params={
'q': '*',
'from': '1',
'size:': '20',
'sort': 'date',
'count': True
})
assert_is(mock_count.called, True)
@patch.object(share_search.share_es, 'search')
def test_share_providers(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
}
}
self.app.get('/api/v1/share/providers/')
assert_is(mock_search.called, True)
@patch.object(share_search.share_es, 'search')
def test_share_stats(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
},
'aggregations': {
'date_chunks': {
'buckets': [{
'articles_over_time': {
'buckets': []
},
'key': 'test',
'doc_count': 0
}]
},
'sources': {
'buckets': [{
'key': 'test',
'doc_count': 0
}]
},
'earlier_documents': {
'sources': {
'buckets': [{
'key': 'test',
'doc_count': 0
}]
}
}
}
}
self.app.get('/api/v1/share/stats/')
assert_is(mock_search.called, True)
|
<commit_before><commit_msg>Add tests for share view functions<commit_after>from nose.tools import * # PEP8 asserts
from mock import patch
from tests.base import OsfTestCase
from website.search import share_search
class TestShareSearch(OsfTestCase):
@patch.object(share_search.share_es, 'search')
def test_share_search(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
}
}
self.app.get('/api/v1/share/', params={
'q': '*',
'from': '1',
'size:': '20',
'sort': 'date'
})
assert_is(mock_search.called, True)
@patch.object(share_search.share_es, 'count')
def test_share_count(self, mock_count):
mock_count.return_value = {'count': 0}
self.app.get('/api/v1/share/', params={
'q': '*',
'from': '1',
'size:': '20',
'sort': 'date',
'count': True
})
assert_is(mock_count.called, True)
@patch.object(share_search.share_es, 'search')
def test_share_providers(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
}
}
self.app.get('/api/v1/share/providers/')
assert_is(mock_search.called, True)
@patch.object(share_search.share_es, 'search')
def test_share_stats(self, mock_search):
mock_search.return_value = {
'hits': {
'hits': {},
'total': 0
},
'aggregations': {
'date_chunks': {
'buckets': [{
'articles_over_time': {
'buckets': []
},
'key': 'test',
'doc_count': 0
}]
},
'sources': {
'buckets': [{
'key': 'test',
'doc_count': 0
}]
},
'earlier_documents': {
'sources': {
'buckets': [{
'key': 'test',
'doc_count': 0
}]
}
}
}
}
self.app.get('/api/v1/share/stats/')
assert_is(mock_search.called, True)
|
|
388b6236761c5b05e1919f725d6bfe32b8961d5a
|
tests/helper_tests.py
|
tests/helper_tests.py
|
# -*- coding: utf-8 -*-
"""
Tests for helper and utility methods
TODO: move integration tests (e.g. all that test a full request cycle)
into smaller, broken-up unit tests to simplify testing.
~~~~
Flask-CORS is a simple extension to Flask allowing you to support cross
origin resource sharing (CORS) using a simple decorator.
:copyright: (c) 2014 by Cory Dolphin.
:license: MIT, see LICENSE for more details.
"""
from flask import Flask
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
# this is how you would normally import
from flask.ext.cors import _try_match, _flexible_str
except:
# support local usage without installed package
from flask_cors import _try_match, _flexible_str
class InternalsTestCase(unittest.TestCase):
def test_try_match(self):
self.assertTrue(_try_match('www.com/foo+', 'www.com/foo+'))
def test_flexible_str_str(self):
self.assertEquals(_flexible_str('Bar, Foo, Qux'), 'Bar, Foo, Qux')
def test_flexible_str_set(self):
self.assertEquals(_flexible_str(set(['Foo', 'Bar', 'Qux'])),
'Bar, Foo, Qux')
|
Add tests for helper functions
|
Add tests for helper functions
|
Python
|
mit
|
corydolphin/flask-cors,ashleysommer/sanic-cors
|
Add tests for helper functions
|
# -*- coding: utf-8 -*-
"""
Tests for helper and utility methods
TODO: move integration tests (e.g. all that test a full request cycle)
into smaller, broken-up unit tests to simplify testing.
~~~~
Flask-CORS is a simple extension to Flask allowing you to support cross
origin resource sharing (CORS) using a simple decorator.
:copyright: (c) 2014 by Cory Dolphin.
:license: MIT, see LICENSE for more details.
"""
from flask import Flask
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
# this is how you would normally import
from flask.ext.cors import _try_match, _flexible_str
except:
# support local usage without installed package
from flask_cors import _try_match, _flexible_str
class InternalsTestCase(unittest.TestCase):
def test_try_match(self):
self.assertTrue(_try_match('www.com/foo+', 'www.com/foo+'))
def test_flexible_str_str(self):
self.assertEquals(_flexible_str('Bar, Foo, Qux'), 'Bar, Foo, Qux')
def test_flexible_str_set(self):
self.assertEquals(_flexible_str(set(['Foo', 'Bar', 'Qux'])),
'Bar, Foo, Qux')
|
<commit_before><commit_msg>Add tests for helper functions<commit_after>
|
# -*- coding: utf-8 -*-
"""
Tests for helper and utility methods
TODO: move integration tests (e.g. all that test a full request cycle)
into smaller, broken-up unit tests to simplify testing.
~~~~
Flask-CORS is a simple extension to Flask allowing you to support cross
origin resource sharing (CORS) using a simple decorator.
:copyright: (c) 2014 by Cory Dolphin.
:license: MIT, see LICENSE for more details.
"""
from flask import Flask
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
# this is how you would normally import
from flask.ext.cors import _try_match, _flexible_str
except:
# support local usage without installed package
from flask_cors import _try_match, _flexible_str
class InternalsTestCase(unittest.TestCase):
def test_try_match(self):
self.assertTrue(_try_match('www.com/foo+', 'www.com/foo+'))
def test_flexible_str_str(self):
self.assertEquals(_flexible_str('Bar, Foo, Qux'), 'Bar, Foo, Qux')
def test_flexible_str_set(self):
self.assertEquals(_flexible_str(set(['Foo', 'Bar', 'Qux'])),
'Bar, Foo, Qux')
|
Add tests for helper functions# -*- coding: utf-8 -*-
"""
Tests for helper and utility methods
TODO: move integration tests (e.g. all that test a full request cycle)
into smaller, broken-up unit tests to simplify testing.
~~~~
Flask-CORS is a simple extension to Flask allowing you to support cross
origin resource sharing (CORS) using a simple decorator.
:copyright: (c) 2014 by Cory Dolphin.
:license: MIT, see LICENSE for more details.
"""
from flask import Flask
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
# this is how you would normally import
from flask.ext.cors import _try_match, _flexible_str
except:
# support local usage without installed package
from flask_cors import _try_match, _flexible_str
class InternalsTestCase(unittest.TestCase):
def test_try_match(self):
self.assertTrue(_try_match('www.com/foo+', 'www.com/foo+'))
def test_flexible_str_str(self):
self.assertEquals(_flexible_str('Bar, Foo, Qux'), 'Bar, Foo, Qux')
def test_flexible_str_set(self):
self.assertEquals(_flexible_str(set(['Foo', 'Bar', 'Qux'])),
'Bar, Foo, Qux')
|
<commit_before><commit_msg>Add tests for helper functions<commit_after># -*- coding: utf-8 -*-
"""
Tests for helper and utility methods
TODO: move integration tests (e.g. all that test a full request cycle)
into smaller, broken-up unit tests to simplify testing.
~~~~
Flask-CORS is a simple extension to Flask allowing you to support cross
origin resource sharing (CORS) using a simple decorator.
:copyright: (c) 2014 by Cory Dolphin.
:license: MIT, see LICENSE for more details.
"""
from flask import Flask
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
# this is how you would normally import
from flask.ext.cors import _try_match, _flexible_str
except:
# support local usage without installed package
from flask_cors import _try_match, _flexible_str
class InternalsTestCase(unittest.TestCase):
def test_try_match(self):
self.assertTrue(_try_match('www.com/foo+', 'www.com/foo+'))
def test_flexible_str_str(self):
self.assertEquals(_flexible_str('Bar, Foo, Qux'), 'Bar, Foo, Qux')
def test_flexible_str_set(self):
self.assertEquals(_flexible_str(set(['Foo', 'Bar', 'Qux'])),
'Bar, Foo, Qux')
|
|
90b98d9b0f85f153ec90f2e8534f001774203aa0
|
tests/nuclear_test.py
|
tests/nuclear_test.py
|
"""Tests for special utilities related to nuclear problems."""
import pytest
from sympy import Symbol, simplify, latex
from drudge import NuclearBogoliubovDrudge
from drudge.nuclear import JOf, TildeOf, MOf, NOf, LOf, PiOf
@pytest.fixture(scope='module')
def nuclear(spark_ctx):
"""Set up the drudge to test."""
return NuclearBogoliubovDrudge(spark_ctx)
def test_qn_accessors():
"""Test the symbolic functions for quantum number access."""
k = Symbol('k')
for acc in [JOf, TildeOf, MOf, NOf, LOf, PiOf]:
# Test that they are considered integers.
e = acc(k)
assert simplify((-1) ** (e * 2)) == 1
latex_form = latex(e)
assert latex_form[-3:] == '{k}'
def test_jm_dummies_are_integers(nuclear: NuclearBogoliubovDrudge):
"""Test that the angular momentum dummies has the right assumptions."""
p = nuclear.names
for i in [p.m1, p.m2, p.M1, p.M2, p.J1, p.J2]:
assert simplify((-1) ** (i * 2)) == 1
|
Add tests for nuclear QN accessors and dummies
|
Add tests for nuclear QN accessors and dummies
Here we just test the most basic printing and assumptions.
|
Python
|
mit
|
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
|
Add tests for nuclear QN accessors and dummies
Here we just test the most basic printing and assumptions.
|
"""Tests for special utilities related to nuclear problems."""
import pytest
from sympy import Symbol, simplify, latex
from drudge import NuclearBogoliubovDrudge
from drudge.nuclear import JOf, TildeOf, MOf, NOf, LOf, PiOf
@pytest.fixture(scope='module')
def nuclear(spark_ctx):
"""Set up the drudge to test."""
return NuclearBogoliubovDrudge(spark_ctx)
def test_qn_accessors():
"""Test the symbolic functions for quantum number access."""
k = Symbol('k')
for acc in [JOf, TildeOf, MOf, NOf, LOf, PiOf]:
# Test that they are considered integers.
e = acc(k)
assert simplify((-1) ** (e * 2)) == 1
latex_form = latex(e)
assert latex_form[-3:] == '{k}'
def test_jm_dummies_are_integers(nuclear: NuclearBogoliubovDrudge):
"""Test that the angular momentum dummies has the right assumptions."""
p = nuclear.names
for i in [p.m1, p.m2, p.M1, p.M2, p.J1, p.J2]:
assert simplify((-1) ** (i * 2)) == 1
|
<commit_before><commit_msg>Add tests for nuclear QN accessors and dummies
Here we just test the most basic printing and assumptions.<commit_after>
|
"""Tests for special utilities related to nuclear problems."""
import pytest
from sympy import Symbol, simplify, latex
from drudge import NuclearBogoliubovDrudge
from drudge.nuclear import JOf, TildeOf, MOf, NOf, LOf, PiOf
@pytest.fixture(scope='module')
def nuclear(spark_ctx):
"""Set up the drudge to test."""
return NuclearBogoliubovDrudge(spark_ctx)
def test_qn_accessors():
"""Test the symbolic functions for quantum number access."""
k = Symbol('k')
for acc in [JOf, TildeOf, MOf, NOf, LOf, PiOf]:
# Test that they are considered integers.
e = acc(k)
assert simplify((-1) ** (e * 2)) == 1
latex_form = latex(e)
assert latex_form[-3:] == '{k}'
def test_jm_dummies_are_integers(nuclear: NuclearBogoliubovDrudge):
"""Test that the angular momentum dummies has the right assumptions."""
p = nuclear.names
for i in [p.m1, p.m2, p.M1, p.M2, p.J1, p.J2]:
assert simplify((-1) ** (i * 2)) == 1
|
Add tests for nuclear QN accessors and dummies
Here we just test the most basic printing and assumptions."""Tests for special utilities related to nuclear problems."""
import pytest
from sympy import Symbol, simplify, latex
from drudge import NuclearBogoliubovDrudge
from drudge.nuclear import JOf, TildeOf, MOf, NOf, LOf, PiOf
@pytest.fixture(scope='module')
def nuclear(spark_ctx):
"""Set up the drudge to test."""
return NuclearBogoliubovDrudge(spark_ctx)
def test_qn_accessors():
"""Test the symbolic functions for quantum number access."""
k = Symbol('k')
for acc in [JOf, TildeOf, MOf, NOf, LOf, PiOf]:
# Test that they are considered integers.
e = acc(k)
assert simplify((-1) ** (e * 2)) == 1
latex_form = latex(e)
assert latex_form[-3:] == '{k}'
def test_jm_dummies_are_integers(nuclear: NuclearBogoliubovDrudge):
"""Test that the angular momentum dummies has the right assumptions."""
p = nuclear.names
for i in [p.m1, p.m2, p.M1, p.M2, p.J1, p.J2]:
assert simplify((-1) ** (i * 2)) == 1
|
<commit_before><commit_msg>Add tests for nuclear QN accessors and dummies
Here we just test the most basic printing and assumptions.<commit_after>"""Tests for special utilities related to nuclear problems."""
import pytest
from sympy import Symbol, simplify, latex
from drudge import NuclearBogoliubovDrudge
from drudge.nuclear import JOf, TildeOf, MOf, NOf, LOf, PiOf
@pytest.fixture(scope='module')
def nuclear(spark_ctx):
"""Set up the drudge to test."""
return NuclearBogoliubovDrudge(spark_ctx)
def test_qn_accessors():
"""Test the symbolic functions for quantum number access."""
k = Symbol('k')
for acc in [JOf, TildeOf, MOf, NOf, LOf, PiOf]:
# Test that they are considered integers.
e = acc(k)
assert simplify((-1) ** (e * 2)) == 1
latex_form = latex(e)
assert latex_form[-3:] == '{k}'
def test_jm_dummies_are_integers(nuclear: NuclearBogoliubovDrudge):
"""Test that the angular momentum dummies has the right assumptions."""
p = nuclear.names
for i in [p.m1, p.m2, p.M1, p.M2, p.J1, p.J2]:
assert simplify((-1) ** (i * 2)) == 1
|
|
459e2dad7162c5091cd963edace8e2edb20e1e0e
|
migrations/versions/3a5eba38e4e8_set_g6_services_to_be_published_rather_.py
|
migrations/versions/3a5eba38e4e8_set_g6_services_to_be_published_rather_.py
|
"""Set G6 services to be published rather than enabled
Revision ID: 3a5eba38e4e8
Revises: 3e6c454a6fc7
Create Date: 2015-04-02 16:18:30.609595
"""
# revision identifiers, used by Alembic.
revision = '3a5eba38e4e8'
down_revision = '3e6c454a6fc7'
from alembic import op
from sqlalchemy.sql import column, table
from sqlalchemy import String
import sqlalchemy as sa
services = table('services', column('status', String))
def upgrade():
op.execute(
services.update(). \
values({'status': op.inline_literal('published')})
)
def downgrade():
op.execute(
services.update(). \
where(services.c.status == 'published'). \
values({'status': op.inline_literal('enabled')})
)
|
Update 'enabled' services to 'published'
|
Update 'enabled' services to 'published'
The previous migration that added the 'status' field set all G6 services to 'enabled'. This was wrong - they should be 'published', as they are visible to everyone ('enabled' will be visible only to admin and the supplier of the service).
|
Python
|
mit
|
mtekel/digitalmarketplace-api,mtekel/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,RichardKnop/digitalmarketplace-api,RichardKnop/digitalmarketplace-api,mtekel/digitalmarketplace-api,RichardKnop/digitalmarketplace-api,RichardKnop/digitalmarketplace-api,mtekel/digitalmarketplace-api
|
Update 'enabled' services to 'published'
The previous migration that added the 'status' field set all G6 services to 'enabled'. This was wrong - they should be 'published', as they are visible to everyone ('enabled' will be visible only to admin and the supplier of the service).
|
"""Set G6 services to be published rather than enabled
Revision ID: 3a5eba38e4e8
Revises: 3e6c454a6fc7
Create Date: 2015-04-02 16:18:30.609595
"""
# revision identifiers, used by Alembic.
revision = '3a5eba38e4e8'
down_revision = '3e6c454a6fc7'
from alembic import op
from sqlalchemy.sql import column, table
from sqlalchemy import String
import sqlalchemy as sa
services = table('services', column('status', String))
def upgrade():
op.execute(
services.update(). \
values({'status': op.inline_literal('published')})
)
def downgrade():
op.execute(
services.update(). \
where(services.c.status == 'published'). \
values({'status': op.inline_literal('enabled')})
)
|
<commit_before><commit_msg>Update 'enabled' services to 'published'
The previous migration that added the 'status' field set all G6 services to 'enabled'. This was wrong - they should be 'published', as they are visible to everyone ('enabled' will be visible only to admin and the supplier of the service).<commit_after>
|
"""Set G6 services to be published rather than enabled
Revision ID: 3a5eba38e4e8
Revises: 3e6c454a6fc7
Create Date: 2015-04-02 16:18:30.609595
"""
# revision identifiers, used by Alembic.
revision = '3a5eba38e4e8'
down_revision = '3e6c454a6fc7'
from alembic import op
from sqlalchemy.sql import column, table
from sqlalchemy import String
import sqlalchemy as sa
services = table('services', column('status', String))
def upgrade():
op.execute(
services.update(). \
values({'status': op.inline_literal('published')})
)
def downgrade():
op.execute(
services.update(). \
where(services.c.status == 'published'). \
values({'status': op.inline_literal('enabled')})
)
|
Update 'enabled' services to 'published'
The previous migration that added the 'status' field set all G6 services to 'enabled'. This was wrong - they should be 'published', as they are visible to everyone ('enabled' will be visible only to admin and the supplier of the service)."""Set G6 services to be published rather than enabled
Revision ID: 3a5eba38e4e8
Revises: 3e6c454a6fc7
Create Date: 2015-04-02 16:18:30.609595
"""
# revision identifiers, used by Alembic.
revision = '3a5eba38e4e8'
down_revision = '3e6c454a6fc7'
from alembic import op
from sqlalchemy.sql import column, table
from sqlalchemy import String
import sqlalchemy as sa
services = table('services', column('status', String))
def upgrade():
op.execute(
services.update(). \
values({'status': op.inline_literal('published')})
)
def downgrade():
op.execute(
services.update(). \
where(services.c.status == 'published'). \
values({'status': op.inline_literal('enabled')})
)
|
<commit_before><commit_msg>Update 'enabled' services to 'published'
The previous migration that added the 'status' field set all G6 services to 'enabled'. This was wrong - they should be 'published', as they are visible to everyone ('enabled' will be visible only to admin and the supplier of the service).<commit_after>"""Set G6 services to be published rather than enabled
Revision ID: 3a5eba38e4e8
Revises: 3e6c454a6fc7
Create Date: 2015-04-02 16:18:30.609595
"""
# revision identifiers, used by Alembic.
revision = '3a5eba38e4e8'
down_revision = '3e6c454a6fc7'
from alembic import op
from sqlalchemy.sql import column, table
from sqlalchemy import String
import sqlalchemy as sa
services = table('services', column('status', String))
def upgrade():
op.execute(
services.update(). \
values({'status': op.inline_literal('published')})
)
def downgrade():
op.execute(
services.update(). \
where(services.c.status == 'published'). \
values({'status': op.inline_literal('enabled')})
)
|
|
146627e5e0b66dfe413b8993b3ba2bb11fa247e6
|
inventory/migrations/0002_auto_20171107_2108.py
|
inventory/migrations/0002_auto_20171107_2108.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-11-07 21:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='place',
),
migrations.AddField(
model_name='item',
name='column',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='item',
name='row',
field=models.IntegerField(null=True),
),
]
|
Add migrations file from server, after inventory update.
|
Add migrations file from server, after inventory update.
|
Python
|
mit
|
hackerspace-ntnu/website,hackerspace-ntnu/website,hackerspace-ntnu/website
|
Add migrations file from server, after inventory update.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-11-07 21:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='place',
),
migrations.AddField(
model_name='item',
name='column',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='item',
name='row',
field=models.IntegerField(null=True),
),
]
|
<commit_before><commit_msg>Add migrations file from server, after inventory update.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-11-07 21:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='place',
),
migrations.AddField(
model_name='item',
name='column',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='item',
name='row',
field=models.IntegerField(null=True),
),
]
|
Add migrations file from server, after inventory update.# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-11-07 21:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='place',
),
migrations.AddField(
model_name='item',
name='column',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='item',
name='row',
field=models.IntegerField(null=True),
),
]
|
<commit_before><commit_msg>Add migrations file from server, after inventory update.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-11-07 21:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='place',
),
migrations.AddField(
model_name='item',
name='column',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='item',
name='row',
field=models.IntegerField(null=True),
),
]
|
|
267516c0750cf698a02e52cc46ad90180533a985
|
VehicleDetectionTracking/spatial_bin.py
|
VehicleDetectionTracking/spatial_bin.py
|
# Code given by Udacity, complete by Andres Guijarro
# Define a function that takes an image, a color space,
# and a new image size and returns a feature vector
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in an image
# You can also read cutout2, 3, 4 etc. to see other examples
image = mpimg.imread('cutout1.jpg')
# Define a function to compute color histogram features
# Pass the color_space flag as 3-letter all caps string
# like 'HSV' or 'LUV' etc.
def bin_spatial(img, color_space='RGB', size=(32, 32)):
# Convert image to new color space (if specified)
small_img = cv2.resize(img, size)
# Use cv2.resize().ravel() to create the feature vector
features = small_img.ravel() # Remove this line!
# Return the feature vector
return features
def main():
feature_vec = bin_spatial(image, color_space='RGB', size=(32, 32))
# Plot features
plt.plot(feature_vec)
plt.title('Spatially Binned Features')
plt.show()
if __name__ == '__main__':
main()
|
Add scripts which define a function that takes an image, a color space, and a new image size and returns a feature vector
|
feat: Add scripts which define a function that takes an image, a color space, and a new image size and returns a feature vector
|
Python
|
mit
|
aguijarro/SelfDrivingCar
|
feat: Add scripts which define a function that takes an image, a color space, and a new image size and returns a feature vector
|
# Code given by Udacity, complete by Andres Guijarro
# Define a function that takes an image, a color space,
# and a new image size and returns a feature vector
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in an image
# You can also read cutout2, 3, 4 etc. to see other examples
image = mpimg.imread('cutout1.jpg')
# Define a function to compute color histogram features
# Pass the color_space flag as 3-letter all caps string
# like 'HSV' or 'LUV' etc.
def bin_spatial(img, color_space='RGB', size=(32, 32)):
# Convert image to new color space (if specified)
small_img = cv2.resize(img, size)
# Use cv2.resize().ravel() to create the feature vector
features = small_img.ravel() # Remove this line!
# Return the feature vector
return features
def main():
feature_vec = bin_spatial(image, color_space='RGB', size=(32, 32))
# Plot features
plt.plot(feature_vec)
plt.title('Spatially Binned Features')
plt.show()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>feat: Add scripts which define a function that takes an image, a color space, and a new image size and returns a feature vector<commit_after>
|
# Code given by Udacity, complete by Andres Guijarro
# Define a function that takes an image, a color space,
# and a new image size and returns a feature vector
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in an image
# You can also read cutout2, 3, 4 etc. to see other examples
image = mpimg.imread('cutout1.jpg')
# Define a function to compute color histogram features
# Pass the color_space flag as 3-letter all caps string
# like 'HSV' or 'LUV' etc.
def bin_spatial(img, color_space='RGB', size=(32, 32)):
# Convert image to new color space (if specified)
small_img = cv2.resize(img, size)
# Use cv2.resize().ravel() to create the feature vector
features = small_img.ravel() # Remove this line!
# Return the feature vector
return features
def main():
feature_vec = bin_spatial(image, color_space='RGB', size=(32, 32))
# Plot features
plt.plot(feature_vec)
plt.title('Spatially Binned Features')
plt.show()
if __name__ == '__main__':
main()
|
feat: Add scripts which define a function that takes an image, a color space, and a new image size and returns a feature vector# Code given by Udacity, complete by Andres Guijarro
# Define a function that takes an image, a color space,
# and a new image size and returns a feature vector
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in an image
# You can also read cutout2, 3, 4 etc. to see other examples
image = mpimg.imread('cutout1.jpg')
# Define a function to compute color histogram features
# Pass the color_space flag as 3-letter all caps string
# like 'HSV' or 'LUV' etc.
def bin_spatial(img, color_space='RGB', size=(32, 32)):
# Convert image to new color space (if specified)
small_img = cv2.resize(img, size)
# Use cv2.resize().ravel() to create the feature vector
features = small_img.ravel() # Remove this line!
# Return the feature vector
return features
def main():
feature_vec = bin_spatial(image, color_space='RGB', size=(32, 32))
# Plot features
plt.plot(feature_vec)
plt.title('Spatially Binned Features')
plt.show()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>feat: Add scripts which define a function that takes an image, a color space, and a new image size and returns a feature vector<commit_after># Code given by Udacity, complete by Andres Guijarro
# Define a function that takes an image, a color space,
# and a new image size and returns a feature vector
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in an image
# You can also read cutout2, 3, 4 etc. to see other examples
image = mpimg.imread('cutout1.jpg')
# Define a function to compute color histogram features
# Pass the color_space flag as 3-letter all caps string
# like 'HSV' or 'LUV' etc.
def bin_spatial(img, color_space='RGB', size=(32, 32)):
# Convert image to new color space (if specified)
small_img = cv2.resize(img, size)
# Use cv2.resize().ravel() to create the feature vector
features = small_img.ravel() # Remove this line!
# Return the feature vector
return features
def main():
feature_vec = bin_spatial(image, color_space='RGB', size=(32, 32))
# Plot features
plt.plot(feature_vec)
plt.title('Spatially Binned Features')
plt.show()
if __name__ == '__main__':
main()
|
|
9751902bc25d08577f98a4346c4605a4cfbcec73
|
src/collectors/users/test/testusers.py
|
src/collectors/users/test/testusers.py
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from users import UsersCollector
import sys
################################################################################
class TestUsersCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('UsersCollector', {
'utmp': self.getFixturePath('utmp.centos6'),
})
self.collector = UsersCollector(config, None)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
# Because of the compiled nature of pyutmp, we can't actually test
# different operating system versions then the currently running
# one
if sys.platform.startswith('linux'):
self.collector.collect()
metrics = {
'kormoc': 2,
'root': 2,
'total': 4,
}
self.setDocExample(self.collector.__class__.__name__, metrics)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from users import UsersCollector
import sys
################################################################################
class TestUsersCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('UsersCollector', {
'utmp': self.getFixturePath('utmp.centos6'),
})
self.collector = UsersCollector(config, None)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
# Because of the compiled nature of pyutmp, we can't actually test
# different operating system versions then the currently running
# one
if sys.platform.startswith('linux'):
self.collector.collect()
metrics = {
'kormoc': 2,
'root': 3,
'total': 5,
}
self.setDocExample(self.collector.__class__.__name__, metrics)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
Fix users count for unit test
|
Fix users count for unit test
|
Python
|
mit
|
skbkontur/Diamond,rtoma/Diamond,python-diamond/Diamond,Nihn/Diamond-1,eMerzh/Diamond-1,TinLe/Diamond,skbkontur/Diamond,disqus/Diamond,russss/Diamond,tuenti/Diamond,codepython/Diamond,janisz/Diamond-1,sebbrandt87/Diamond,Nihn/Diamond-1,mfriedenhagen/Diamond,Ensighten/Diamond,bmhatfield/Diamond,actmd/Diamond,gg7/diamond,tellapart/Diamond,zoidbergwill/Diamond,acquia/Diamond,russss/Diamond,thardie/Diamond,hamelg/Diamond,hamelg/Diamond,Netuitive/netuitive-diamond,hamelg/Diamond,szibis/Diamond,timchenxiaoyu/Diamond,socialwareinc/Diamond,joel-airspring/Diamond,janisz/Diamond-1,datafiniti/Diamond,joel-airspring/Diamond,Basis/Diamond,tuenti/Diamond,codepython/Diamond,tellapart/Diamond,krbaker/Diamond,tusharmakkar08/Diamond,disqus/Diamond,EzyInsights/Diamond,ceph/Diamond,python-diamond/Diamond,tellapart/Diamond,datafiniti/Diamond,TinLe/Diamond,h00dy/Diamond,mfriedenhagen/Diamond,Ormod/Diamond,ramjothikumar/Diamond,ceph/Diamond,Nihn/Diamond-1,Precis/Diamond,ramjothikumar/Diamond,MediaMath/Diamond,anandbhoraskar/Diamond,Netuitive/Diamond,Slach/Diamond,codepython/Diamond,datafiniti/Diamond,jumping/Diamond,krbaker/Diamond,saucelabs/Diamond,jaingaurav/Diamond,mfriedenhagen/Diamond,CYBERBUGJR/Diamond,bmhatfield/Diamond,stuartbfox/Diamond,Netuitive/netuitive-diamond,zoidbergwill/Diamond,CYBERBUGJR/Diamond,acquia/Diamond,signalfx/Diamond,jaingaurav/Diamond,metamx/Diamond,skbkontur/Diamond,actmd/Diamond,anandbhoraskar/Diamond,rtoma/Diamond,Slach/Diamond,dcsquared13/Diamond,TinLe/Diamond,signalfx/Diamond,mzupan/Diamond,Clever/Diamond,gg7/diamond,CYBERBUGJR/Diamond,Ssawa/Diamond,actmd/Diamond,EzyInsights/Diamond,TinLe/Diamond,Precis/Diamond,eMerzh/Diamond-1,ceph/Diamond,cannium/Diamond,bmhatfield/Diamond,skbkontur/Diamond,krbaker/Diamond,gg7/diamond,ramjothikumar/Diamond,szibis/Diamond,TAKEALOT/Diamond,works-mobile/Diamond,jumping/Diamond,stuartbfox/Diamond,hvnsweeting/Diamond,socialwareinc/Diamond,hvnsweeting/Diamond,MichaelDoyle/Diamond,tusharmakkar08/Diamond,socialwareinc/Diamond,thardie/Diamond,disqus/Diamond,russss/Diamond,szibis/Diamond,rtoma/Diamond,acquia/Diamond,mzupan/Diamond,saucelabs/Diamond,MichaelDoyle/Diamond,signalfx/Diamond,TAKEALOT/Diamond,works-mobile/Diamond,MichaelDoyle/Diamond,Nihn/Diamond-1,Ensighten/Diamond,mzupan/Diamond,Ssawa/Diamond,Ssawa/Diamond,datafiniti/Diamond,Basis/Diamond,codepython/Diamond,Slach/Diamond,Slach/Diamond,dcsquared13/Diamond,tusharmakkar08/Diamond,h00dy/Diamond,Clever/Diamond,jriguera/Diamond,eMerzh/Diamond-1,jaingaurav/Diamond,MediaMath/Diamond,hvnsweeting/Diamond,works-mobile/Diamond,tusharmakkar08/Diamond,metamx/Diamond,russss/Diamond,CYBERBUGJR/Diamond,rtoma/Diamond,Ormod/Diamond,sebbrandt87/Diamond,joel-airspring/Diamond,h00dy/Diamond,dcsquared13/Diamond,stuartbfox/Diamond,metamx/Diamond,ceph/Diamond,hvnsweeting/Diamond,Ormod/Diamond,tellapart/Diamond,tuenti/Diamond,Ensighten/Diamond,stuartbfox/Diamond,jriguera/Diamond,mfriedenhagen/Diamond,TAKEALOT/Diamond,jriguera/Diamond,anandbhoraskar/Diamond,tuenti/Diamond,szibis/Diamond,Basis/Diamond,zoidbergwill/Diamond,EzyInsights/Diamond,EzyInsights/Diamond,ramjothikumar/Diamond,Clever/Diamond,mzupan/Diamond,Netuitive/Diamond,janisz/Diamond-1,socialwareinc/Diamond,timchenxiaoyu/Diamond,saucelabs/Diamond,MediaMath/Diamond,dcsquared13/Diamond,cannium/Diamond,Netuitive/netuitive-diamond,Netuitive/Diamond,Clever/Diamond,janisz/Diamond-1,anandbhoraskar/Diamond,works-mobile/Diamond,Precis/Diamond,MediaMath/Diamond,h00dy/Diamond,sebbrandt87/Diamond,signalfx/Diamond,Ensighten/Diamond,jumping/Diamond,timchenxiaoyu/Diamond,jriguera/Diamond,MichaelDoyle/Diamond,cannium/Diamond,saucelabs/Diamond,thardie/Diamond,jaingaurav/Diamond,acquia/Diamond,TAKEALOT/Diamond,Netuitive/netuitive-diamond,bmhatfield/Diamond,hamelg/Diamond,gg7/diamond,zoidbergwill/Diamond,Basis/Diamond,timchenxiaoyu/Diamond,Netuitive/Diamond,thardie/Diamond,jumping/Diamond,Precis/Diamond,python-diamond/Diamond,cannium/Diamond,sebbrandt87/Diamond,Ssawa/Diamond,krbaker/Diamond,eMerzh/Diamond-1,Ormod/Diamond,joel-airspring/Diamond,actmd/Diamond
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from users import UsersCollector
import sys
################################################################################
class TestUsersCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('UsersCollector', {
'utmp': self.getFixturePath('utmp.centos6'),
})
self.collector = UsersCollector(config, None)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
# Because of the compiled nature of pyutmp, we can't actually test
# different operating system versions then the currently running
# one
if sys.platform.startswith('linux'):
self.collector.collect()
metrics = {
'kormoc': 2,
'root': 2,
'total': 4,
}
self.setDocExample(self.collector.__class__.__name__, metrics)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
Fix users count for unit test
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from users import UsersCollector
import sys
################################################################################
class TestUsersCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('UsersCollector', {
'utmp': self.getFixturePath('utmp.centos6'),
})
self.collector = UsersCollector(config, None)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
# Because of the compiled nature of pyutmp, we can't actually test
# different operating system versions then the currently running
# one
if sys.platform.startswith('linux'):
self.collector.collect()
metrics = {
'kormoc': 2,
'root': 3,
'total': 5,
}
self.setDocExample(self.collector.__class__.__name__, metrics)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
<commit_before>#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from users import UsersCollector
import sys
################################################################################
class TestUsersCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('UsersCollector', {
'utmp': self.getFixturePath('utmp.centos6'),
})
self.collector = UsersCollector(config, None)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
# Because of the compiled nature of pyutmp, we can't actually test
# different operating system versions then the currently running
# one
if sys.platform.startswith('linux'):
self.collector.collect()
metrics = {
'kormoc': 2,
'root': 2,
'total': 4,
}
self.setDocExample(self.collector.__class__.__name__, metrics)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
<commit_msg>Fix users count for unit test<commit_after>
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from users import UsersCollector
import sys
################################################################################
class TestUsersCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('UsersCollector', {
'utmp': self.getFixturePath('utmp.centos6'),
})
self.collector = UsersCollector(config, None)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
# Because of the compiled nature of pyutmp, we can't actually test
# different operating system versions then the currently running
# one
if sys.platform.startswith('linux'):
self.collector.collect()
metrics = {
'kormoc': 2,
'root': 3,
'total': 5,
}
self.setDocExample(self.collector.__class__.__name__, metrics)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from users import UsersCollector
import sys
################################################################################
class TestUsersCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('UsersCollector', {
'utmp': self.getFixturePath('utmp.centos6'),
})
self.collector = UsersCollector(config, None)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
# Because of the compiled nature of pyutmp, we can't actually test
# different operating system versions then the currently running
# one
if sys.platform.startswith('linux'):
self.collector.collect()
metrics = {
'kormoc': 2,
'root': 2,
'total': 4,
}
self.setDocExample(self.collector.__class__.__name__, metrics)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
Fix users count for unit test#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from users import UsersCollector
import sys
################################################################################
class TestUsersCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('UsersCollector', {
'utmp': self.getFixturePath('utmp.centos6'),
})
self.collector = UsersCollector(config, None)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
# Because of the compiled nature of pyutmp, we can't actually test
# different operating system versions then the currently running
# one
if sys.platform.startswith('linux'):
self.collector.collect()
metrics = {
'kormoc': 2,
'root': 3,
'total': 5,
}
self.setDocExample(self.collector.__class__.__name__, metrics)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
<commit_before>#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from users import UsersCollector
import sys
################################################################################
class TestUsersCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('UsersCollector', {
'utmp': self.getFixturePath('utmp.centos6'),
})
self.collector = UsersCollector(config, None)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
# Because of the compiled nature of pyutmp, we can't actually test
# different operating system versions then the currently running
# one
if sys.platform.startswith('linux'):
self.collector.collect()
metrics = {
'kormoc': 2,
'root': 2,
'total': 4,
}
self.setDocExample(self.collector.__class__.__name__, metrics)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
<commit_msg>Fix users count for unit test<commit_after>#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from users import UsersCollector
import sys
################################################################################
class TestUsersCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('UsersCollector', {
'utmp': self.getFixturePath('utmp.centos6'),
})
self.collector = UsersCollector(config, None)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
# Because of the compiled nature of pyutmp, we can't actually test
# different operating system versions then the currently running
# one
if sys.platform.startswith('linux'):
self.collector.collect()
metrics = {
'kormoc': 2,
'root': 3,
'total': 5,
}
self.setDocExample(self.collector.__class__.__name__, metrics)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
ab42c5e8c3ac51c65ed7229dafb751c7baa667aa
|
examples/mnist-rica.py
|
examples/mnist-rica.py
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import theanets
from utils import load_mnist, plot_layers, plot_images
class RICA(theanets.Autoencoder):
def J(self, weight_inverse=0, **kwargs):
cost = super(RICA, self).J(**kwargs)
if weight_inverse > 0:
cost += sum((weight_inverse / (w * w).sum(axis=0)).sum() for w in self.weights)
return cost
train, valid, _ = load_mnist()
# mean-center the digits and compute a pca whitening transform.
train -= 0.5
valid -= 0.5
vals, vecs = np.linalg.eigh(np.dot(train.T, train) / len(train))
vals = vals[::-1]
vecs = vecs[:, ::-1]
K = 197 # this retains 99% of the variance in the digit data.
vals = np.sqrt(vals[:K])
vecs = vecs[:, :K]
def whiten(x):
return np.dot(x, np.dot(vecs, np.diag(1. / vals)))
def color(z):
return np.dot(z, np.dot(np.diag(vals), vecs.T))
# now train our model on the whitened dataset.
N = 16
e = theanets.Experiment(
RICA,
layers=(K, N * N, K),
activation='linear',
hidden_l1=0.2,
no_learn_biases=True,
tied_weights=True,
train_batches=100,
weight_inverse=0.01,
)
e.run(whiten(train), whiten(valid))
# color the network weights so they are viewable as digits.
plot_layers(
[color(e.network.weights[0].get_value().T).T],
tied_weights=True)
plt.tight_layout()
plt.show()
plot_images(valid[:N*N], 121, 'Sample data')
plot_images(
color(e.network.predict(whiten(valid[:N*N]))),
122, 'Reconstructed data')
plt.tight_layout()
plt.show()
|
Add an example for computing sparse codes using RICA.
|
Add an example for computing sparse codes using RICA.
|
Python
|
mit
|
lmjohns3/theanets,chrinide/theanets,devdoer/theanets
|
Add an example for computing sparse codes using RICA.
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import theanets
from utils import load_mnist, plot_layers, plot_images
class RICA(theanets.Autoencoder):
def J(self, weight_inverse=0, **kwargs):
cost = super(RICA, self).J(**kwargs)
if weight_inverse > 0:
cost += sum((weight_inverse / (w * w).sum(axis=0)).sum() for w in self.weights)
return cost
train, valid, _ = load_mnist()
# mean-center the digits and compute a pca whitening transform.
train -= 0.5
valid -= 0.5
vals, vecs = np.linalg.eigh(np.dot(train.T, train) / len(train))
vals = vals[::-1]
vecs = vecs[:, ::-1]
K = 197 # this retains 99% of the variance in the digit data.
vals = np.sqrt(vals[:K])
vecs = vecs[:, :K]
def whiten(x):
return np.dot(x, np.dot(vecs, np.diag(1. / vals)))
def color(z):
return np.dot(z, np.dot(np.diag(vals), vecs.T))
# now train our model on the whitened dataset.
N = 16
e = theanets.Experiment(
RICA,
layers=(K, N * N, K),
activation='linear',
hidden_l1=0.2,
no_learn_biases=True,
tied_weights=True,
train_batches=100,
weight_inverse=0.01,
)
e.run(whiten(train), whiten(valid))
# color the network weights so they are viewable as digits.
plot_layers(
[color(e.network.weights[0].get_value().T).T],
tied_weights=True)
plt.tight_layout()
plt.show()
plot_images(valid[:N*N], 121, 'Sample data')
plot_images(
color(e.network.predict(whiten(valid[:N*N]))),
122, 'Reconstructed data')
plt.tight_layout()
plt.show()
|
<commit_before><commit_msg>Add an example for computing sparse codes using RICA.<commit_after>
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import theanets
from utils import load_mnist, plot_layers, plot_images
class RICA(theanets.Autoencoder):
def J(self, weight_inverse=0, **kwargs):
cost = super(RICA, self).J(**kwargs)
if weight_inverse > 0:
cost += sum((weight_inverse / (w * w).sum(axis=0)).sum() for w in self.weights)
return cost
train, valid, _ = load_mnist()
# mean-center the digits and compute a pca whitening transform.
train -= 0.5
valid -= 0.5
vals, vecs = np.linalg.eigh(np.dot(train.T, train) / len(train))
vals = vals[::-1]
vecs = vecs[:, ::-1]
K = 197 # this retains 99% of the variance in the digit data.
vals = np.sqrt(vals[:K])
vecs = vecs[:, :K]
def whiten(x):
return np.dot(x, np.dot(vecs, np.diag(1. / vals)))
def color(z):
return np.dot(z, np.dot(np.diag(vals), vecs.T))
# now train our model on the whitened dataset.
N = 16
e = theanets.Experiment(
RICA,
layers=(K, N * N, K),
activation='linear',
hidden_l1=0.2,
no_learn_biases=True,
tied_weights=True,
train_batches=100,
weight_inverse=0.01,
)
e.run(whiten(train), whiten(valid))
# color the network weights so they are viewable as digits.
plot_layers(
[color(e.network.weights[0].get_value().T).T],
tied_weights=True)
plt.tight_layout()
plt.show()
plot_images(valid[:N*N], 121, 'Sample data')
plot_images(
color(e.network.predict(whiten(valid[:N*N]))),
122, 'Reconstructed data')
plt.tight_layout()
plt.show()
|
Add an example for computing sparse codes using RICA.#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import theanets
from utils import load_mnist, plot_layers, plot_images
class RICA(theanets.Autoencoder):
def J(self, weight_inverse=0, **kwargs):
cost = super(RICA, self).J(**kwargs)
if weight_inverse > 0:
cost += sum((weight_inverse / (w * w).sum(axis=0)).sum() for w in self.weights)
return cost
train, valid, _ = load_mnist()
# mean-center the digits and compute a pca whitening transform.
train -= 0.5
valid -= 0.5
vals, vecs = np.linalg.eigh(np.dot(train.T, train) / len(train))
vals = vals[::-1]
vecs = vecs[:, ::-1]
K = 197 # this retains 99% of the variance in the digit data.
vals = np.sqrt(vals[:K])
vecs = vecs[:, :K]
def whiten(x):
return np.dot(x, np.dot(vecs, np.diag(1. / vals)))
def color(z):
return np.dot(z, np.dot(np.diag(vals), vecs.T))
# now train our model on the whitened dataset.
N = 16
e = theanets.Experiment(
RICA,
layers=(K, N * N, K),
activation='linear',
hidden_l1=0.2,
no_learn_biases=True,
tied_weights=True,
train_batches=100,
weight_inverse=0.01,
)
e.run(whiten(train), whiten(valid))
# color the network weights so they are viewable as digits.
plot_layers(
[color(e.network.weights[0].get_value().T).T],
tied_weights=True)
plt.tight_layout()
plt.show()
plot_images(valid[:N*N], 121, 'Sample data')
plot_images(
color(e.network.predict(whiten(valid[:N*N]))),
122, 'Reconstructed data')
plt.tight_layout()
plt.show()
|
<commit_before><commit_msg>Add an example for computing sparse codes using RICA.<commit_after>#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import theanets
from utils import load_mnist, plot_layers, plot_images
class RICA(theanets.Autoencoder):
def J(self, weight_inverse=0, **kwargs):
cost = super(RICA, self).J(**kwargs)
if weight_inverse > 0:
cost += sum((weight_inverse / (w * w).sum(axis=0)).sum() for w in self.weights)
return cost
train, valid, _ = load_mnist()
# mean-center the digits and compute a pca whitening transform.
train -= 0.5
valid -= 0.5
vals, vecs = np.linalg.eigh(np.dot(train.T, train) / len(train))
vals = vals[::-1]
vecs = vecs[:, ::-1]
K = 197 # this retains 99% of the variance in the digit data.
vals = np.sqrt(vals[:K])
vecs = vecs[:, :K]
def whiten(x):
return np.dot(x, np.dot(vecs, np.diag(1. / vals)))
def color(z):
return np.dot(z, np.dot(np.diag(vals), vecs.T))
# now train our model on the whitened dataset.
N = 16
e = theanets.Experiment(
RICA,
layers=(K, N * N, K),
activation='linear',
hidden_l1=0.2,
no_learn_biases=True,
tied_weights=True,
train_batches=100,
weight_inverse=0.01,
)
e.run(whiten(train), whiten(valid))
# color the network weights so they are viewable as digits.
plot_layers(
[color(e.network.weights[0].get_value().T).T],
tied_weights=True)
plt.tight_layout()
plt.show()
plot_images(valid[:N*N], 121, 'Sample data')
plot_images(
color(e.network.predict(whiten(valid[:N*N]))),
122, 'Reconstructed data')
plt.tight_layout()
plt.show()
|
|
79fc87489595eebbc6c9f40d9d79a74af4e7494d
|
scripts/testdynamic.py
|
scripts/testdynamic.py
|
#!/usr/bin/env python
# -*- coding: utf-8
import Ice, IcePy, sys, tempfile
ice = Ice.initialize(sys.argv)
proxy = ice.stringToProxy('Meta:tcp -h 127.0.0.1 -p 6502')
slice = IcePy.Operation('getSlice', Ice.OperationMode.Idempotent, Ice.OperationMode.Idempotent, True, (), (), (), IcePy._t_string, ()).invoke(proxy, ((), None))
slicefile = tempfile.NamedTemporaryFile(suffix = '.ice')
slicefile.write(slice)
slicefile.flush()
Ice.loadSlice(slicefile.name)
slicefile.close()
import Murmur
meta = Murmur.MetaPrx.checkedCast(proxy)
print meta.getVersion()
|
#!/usr/bin/env python
# -*- coding: utf-8
import Ice, IcePy, sys, tempfile
ice = Ice.initialize(sys.argv)
proxy = ice.stringToProxy('Meta:tcp -h 127.0.0.1 -p 6502')
try:
slice = IcePy.Operation('getSlice', Ice.OperationMode.Idempotent, Ice.OperationMode.Idempotent, True, (), (), (), IcePy._t_string, ()).invoke(proxy, ((), None))
slicefile = tempfile.NamedTemporaryFile(suffix = '.ice')
slicefile.write(slice)
slicefile.flush()
Ice.loadSlice(slicefile.name)
slicefile.close()
print 'Using dynamic slice'
except:
Ice.loadSlice('Murmur.ice')
print 'Using bundled slice'
import Murmur
meta = Murmur.MetaPrx.checkedCast(proxy)
print meta.getVersion()
|
Expand dynamic slice-fetch example to show fallback
|
Expand dynamic slice-fetch example to show fallback
|
Python
|
bsd-3-clause
|
mkrautz/mumble-sbcelt,austinliou/mumble,unascribed/mumble,LuAPi/mumble,Githlar/mumble,chiefdome/mumble-code,panaschieren/mumble-test,ccpgames/mumble,unascribed/mumble,Zopieux/mumble,SuperNascher/mumble,Zopieux/mumble,Lartza/mumble,feld/mumble,feld/mumble,arrai/mumble-record,Lartza/mumble,unascribed/mumble,ccpgames/mumble,chancegarcia/mumble,Zopieux/mumble,feld/mumble,bheart/mumble,Keridos/mumble,chancegarcia/mumble,mbax/mumble,unascribed/mumble,Githlar/mumble,mkrautz/mumble-sbcelt,LuAPi/mumble,arrai/mumble-record,austinliou/mumble,Keridos/mumble,niko20010/mumble,richard227/mumble,mbax/mumble,panaschieren/mumble-test,austinliou/mumble,LuAPi/mumble,arrai/mumble-record,austinliou/mumble,Natenom/mumble,Zopieux/mumble,niko20010/mumble,SuperNascher/mumble,bheart/mumble,Githlar/mumble,Natenom/mumble,chancegarcia/mumble,bheart/mumble,mkrautz/mumble-sbcelt,SuperNascher/mumble,bheart/mumble,LuAPi/mumble,Githlar/mumble,richard227/mumble,feld/mumble,SuperNascher/mumble,Lartza/mumble,LuAPi/mumble,Lartza/mumble,mbax/mumble,Natenom/mumble,Zopieux/mumble,niko20010/mumble,Lartza/mumble,mkrautz/mumble-sbcelt,SuperNascher/mumble,mbax/mumble,LuAPi/mumble,arrai/mumble-record,Zopieux/mumble,chancegarcia/mumble,arrai/mumble-record,panaschieren/mumble-test,mkrautz/mumble-sbcelt,Keridos/mumble,Githlar/mumble,richard227/mumble,unascribed/mumble,niko20010/mumble,chiefdome/mumble-code,mkrautz/mumble-sbcelt,ccpgames/mumble,niko20010/mumble,unascribed/mumble,Keridos/mumble,Lartza/mumble,niko20010/mumble,mkrautz/mumble-sbcelt,niko20010/mumble,chiefdome/mumble-code,Githlar/mumble,LuAPi/mumble,mbax/mumble,feld/mumble,Keridos/mumble,LuAPi/mumble,chiefdome/mumble-code,arrai/mumble-record,unascribed/mumble,austinliou/mumble,ccpgames/mumble,Zopieux/mumble,Natenom/mumble,SuperNascher/mumble,Natenom/mumble,chiefdome/mumble-code,Keridos/mumble,austinliou/mumble,Natenom/mumble,feld/mumble,mbax/mumble,SuperNascher/mumble,chiefdome/mumble-code,arrai/mumble-record,panaschieren/mumble-test,Keridos/mumble,SuperNascher/mumble,SuperNascher/mumble,ccpgames/mumble,chancegarcia/mumble,richard227/mumble,panaschieren/mumble-test,ccpgames/mumble,arrai/mumble-record,bheart/mumble,mkrautz/mumble-sbcelt,Lartza/mumble,feld/mumble,unascribed/mumble,ccpgames/mumble,Githlar/mumble,panaschieren/mumble-test,panaschieren/mumble-test,richard227/mumble,feld/mumble,chancegarcia/mumble,chancegarcia/mumble,chiefdome/mumble-code,bheart/mumble,chancegarcia/mumble,ccpgames/mumble,mbax/mumble,bheart/mumble,richard227/mumble,Keridos/mumble,mbax/mumble,Zopieux/mumble,niko20010/mumble,bheart/mumble,Lartza/mumble,chiefdome/mumble-code,LuAPi/mumble,panaschieren/mumble-test,chancegarcia/mumble,richard227/mumble,austinliou/mumble,Natenom/mumble,Githlar/mumble,Natenom/mumble,austinliou/mumble,richard227/mumble
|
#!/usr/bin/env python
# -*- coding: utf-8
import Ice, IcePy, sys, tempfile
ice = Ice.initialize(sys.argv)
proxy = ice.stringToProxy('Meta:tcp -h 127.0.0.1 -p 6502')
slice = IcePy.Operation('getSlice', Ice.OperationMode.Idempotent, Ice.OperationMode.Idempotent, True, (), (), (), IcePy._t_string, ()).invoke(proxy, ((), None))
slicefile = tempfile.NamedTemporaryFile(suffix = '.ice')
slicefile.write(slice)
slicefile.flush()
Ice.loadSlice(slicefile.name)
slicefile.close()
import Murmur
meta = Murmur.MetaPrx.checkedCast(proxy)
print meta.getVersion()
Expand dynamic slice-fetch example to show fallback
|
#!/usr/bin/env python
# -*- coding: utf-8
import Ice, IcePy, sys, tempfile
ice = Ice.initialize(sys.argv)
proxy = ice.stringToProxy('Meta:tcp -h 127.0.0.1 -p 6502')
try:
slice = IcePy.Operation('getSlice', Ice.OperationMode.Idempotent, Ice.OperationMode.Idempotent, True, (), (), (), IcePy._t_string, ()).invoke(proxy, ((), None))
slicefile = tempfile.NamedTemporaryFile(suffix = '.ice')
slicefile.write(slice)
slicefile.flush()
Ice.loadSlice(slicefile.name)
slicefile.close()
print 'Using dynamic slice'
except:
Ice.loadSlice('Murmur.ice')
print 'Using bundled slice'
import Murmur
meta = Murmur.MetaPrx.checkedCast(proxy)
print meta.getVersion()
|
<commit_before>#!/usr/bin/env python
# -*- coding: utf-8
import Ice, IcePy, sys, tempfile
ice = Ice.initialize(sys.argv)
proxy = ice.stringToProxy('Meta:tcp -h 127.0.0.1 -p 6502')
slice = IcePy.Operation('getSlice', Ice.OperationMode.Idempotent, Ice.OperationMode.Idempotent, True, (), (), (), IcePy._t_string, ()).invoke(proxy, ((), None))
slicefile = tempfile.NamedTemporaryFile(suffix = '.ice')
slicefile.write(slice)
slicefile.flush()
Ice.loadSlice(slicefile.name)
slicefile.close()
import Murmur
meta = Murmur.MetaPrx.checkedCast(proxy)
print meta.getVersion()
<commit_msg>Expand dynamic slice-fetch example to show fallback<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8
import Ice, IcePy, sys, tempfile
ice = Ice.initialize(sys.argv)
proxy = ice.stringToProxy('Meta:tcp -h 127.0.0.1 -p 6502')
try:
slice = IcePy.Operation('getSlice', Ice.OperationMode.Idempotent, Ice.OperationMode.Idempotent, True, (), (), (), IcePy._t_string, ()).invoke(proxy, ((), None))
slicefile = tempfile.NamedTemporaryFile(suffix = '.ice')
slicefile.write(slice)
slicefile.flush()
Ice.loadSlice(slicefile.name)
slicefile.close()
print 'Using dynamic slice'
except:
Ice.loadSlice('Murmur.ice')
print 'Using bundled slice'
import Murmur
meta = Murmur.MetaPrx.checkedCast(proxy)
print meta.getVersion()
|
#!/usr/bin/env python
# -*- coding: utf-8
import Ice, IcePy, sys, tempfile
ice = Ice.initialize(sys.argv)
proxy = ice.stringToProxy('Meta:tcp -h 127.0.0.1 -p 6502')
slice = IcePy.Operation('getSlice', Ice.OperationMode.Idempotent, Ice.OperationMode.Idempotent, True, (), (), (), IcePy._t_string, ()).invoke(proxy, ((), None))
slicefile = tempfile.NamedTemporaryFile(suffix = '.ice')
slicefile.write(slice)
slicefile.flush()
Ice.loadSlice(slicefile.name)
slicefile.close()
import Murmur
meta = Murmur.MetaPrx.checkedCast(proxy)
print meta.getVersion()
Expand dynamic slice-fetch example to show fallback#!/usr/bin/env python
# -*- coding: utf-8
import Ice, IcePy, sys, tempfile
ice = Ice.initialize(sys.argv)
proxy = ice.stringToProxy('Meta:tcp -h 127.0.0.1 -p 6502')
try:
slice = IcePy.Operation('getSlice', Ice.OperationMode.Idempotent, Ice.OperationMode.Idempotent, True, (), (), (), IcePy._t_string, ()).invoke(proxy, ((), None))
slicefile = tempfile.NamedTemporaryFile(suffix = '.ice')
slicefile.write(slice)
slicefile.flush()
Ice.loadSlice(slicefile.name)
slicefile.close()
print 'Using dynamic slice'
except:
Ice.loadSlice('Murmur.ice')
print 'Using bundled slice'
import Murmur
meta = Murmur.MetaPrx.checkedCast(proxy)
print meta.getVersion()
|
<commit_before>#!/usr/bin/env python
# -*- coding: utf-8
import Ice, IcePy, sys, tempfile
ice = Ice.initialize(sys.argv)
proxy = ice.stringToProxy('Meta:tcp -h 127.0.0.1 -p 6502')
slice = IcePy.Operation('getSlice', Ice.OperationMode.Idempotent, Ice.OperationMode.Idempotent, True, (), (), (), IcePy._t_string, ()).invoke(proxy, ((), None))
slicefile = tempfile.NamedTemporaryFile(suffix = '.ice')
slicefile.write(slice)
slicefile.flush()
Ice.loadSlice(slicefile.name)
slicefile.close()
import Murmur
meta = Murmur.MetaPrx.checkedCast(proxy)
print meta.getVersion()
<commit_msg>Expand dynamic slice-fetch example to show fallback<commit_after>#!/usr/bin/env python
# -*- coding: utf-8
import Ice, IcePy, sys, tempfile
ice = Ice.initialize(sys.argv)
proxy = ice.stringToProxy('Meta:tcp -h 127.0.0.1 -p 6502')
try:
slice = IcePy.Operation('getSlice', Ice.OperationMode.Idempotent, Ice.OperationMode.Idempotent, True, (), (), (), IcePy._t_string, ()).invoke(proxy, ((), None))
slicefile = tempfile.NamedTemporaryFile(suffix = '.ice')
slicefile.write(slice)
slicefile.flush()
Ice.loadSlice(slicefile.name)
slicefile.close()
print 'Using dynamic slice'
except:
Ice.loadSlice('Murmur.ice')
print 'Using bundled slice'
import Murmur
meta = Murmur.MetaPrx.checkedCast(proxy)
print meta.getVersion()
|
7cfcdc25492ffb1da7a03edcb885d2db1ae20062
|
scratchpad/logger.py
|
scratchpad/logger.py
|
#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
import time
import datetime
sense = SenseHat()
sense.clear()
client = MongoClient('localhost:27017')
db = client.g2x
def log(device, property, value):
now = datetime.datetime.utcnow()
print("[{0}] {1}:{2} = {3}".format(now, device, property, value))
db.readings.insert({
"timestamp": now,
"device": device,
"property": property,
"value": value
})
while True:
temperature = sense.get_temperature_from_humidity()
log("SenseHat", "temperature from humidity", temperature)
time.sleep(0.5)
|
Test logging of Sense data to mongodb
|
Test logging of Sense data to mongodb
|
Python
|
mit
|
thelonious/g2x,gizmo-cda/g2x,thelonious/g2x,gizmo-cda/g2x,gizmo-cda/g2x,gizmo-cda/g2x
|
Test logging of Sense data to mongodb
|
#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
import time
import datetime
sense = SenseHat()
sense.clear()
client = MongoClient('localhost:27017')
db = client.g2x
def log(device, property, value):
now = datetime.datetime.utcnow()
print("[{0}] {1}:{2} = {3}".format(now, device, property, value))
db.readings.insert({
"timestamp": now,
"device": device,
"property": property,
"value": value
})
while True:
temperature = sense.get_temperature_from_humidity()
log("SenseHat", "temperature from humidity", temperature)
time.sleep(0.5)
|
<commit_before><commit_msg>Test logging of Sense data to mongodb<commit_after>
|
#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
import time
import datetime
sense = SenseHat()
sense.clear()
client = MongoClient('localhost:27017')
db = client.g2x
def log(device, property, value):
now = datetime.datetime.utcnow()
print("[{0}] {1}:{2} = {3}".format(now, device, property, value))
db.readings.insert({
"timestamp": now,
"device": device,
"property": property,
"value": value
})
while True:
temperature = sense.get_temperature_from_humidity()
log("SenseHat", "temperature from humidity", temperature)
time.sleep(0.5)
|
Test logging of Sense data to mongodb#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
import time
import datetime
sense = SenseHat()
sense.clear()
client = MongoClient('localhost:27017')
db = client.g2x
def log(device, property, value):
now = datetime.datetime.utcnow()
print("[{0}] {1}:{2} = {3}".format(now, device, property, value))
db.readings.insert({
"timestamp": now,
"device": device,
"property": property,
"value": value
})
while True:
temperature = sense.get_temperature_from_humidity()
log("SenseHat", "temperature from humidity", temperature)
time.sleep(0.5)
|
<commit_before><commit_msg>Test logging of Sense data to mongodb<commit_after>#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
import time
import datetime
sense = SenseHat()
sense.clear()
client = MongoClient('localhost:27017')
db = client.g2x
def log(device, property, value):
now = datetime.datetime.utcnow()
print("[{0}] {1}:{2} = {3}".format(now, device, property, value))
db.readings.insert({
"timestamp": now,
"device": device,
"property": property,
"value": value
})
while True:
temperature = sense.get_temperature_from_humidity()
log("SenseHat", "temperature from humidity", temperature)
time.sleep(0.5)
|
|
c171359ca9e50328a09f98af2cb5b0f6cd6f7e50
|
pombola/south_africa/management/commands/south_africa_import_new_constituency_office_locations.py
|
pombola/south_africa/management/commands/south_africa_import_new_constituency_office_locations.py
|
"""Command to import PMG's constitiency office data
PMG are undertaking an exercise to visit all the constituency
offices and collect accurate locations and photographs.
The data from this is available at
https://app.m4jam.com/app/campaigns/2298/export/
This script parses the resulting CSV file and updates our data
on constituency offices to have the correct location.
"""
import csv
import re
from django.core.management.base import BaseCommand, CommandError
from django.contrib.gis.geos import Point
from pombola.core.models import (
Organisation,
)
# Matches the format of the locations in the CSV we get from PMG.
location_re = re.compile(
r'SRID=4326;POINT\((-?[0-9]+\.[0-9]+) (-?[0-9]+\.[0-9]+)\)')
class Command(BaseCommand):
args = '<path to csv file>'
help = 'Updates constituency offices based on the supplied CSV file.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError(
"You must provide the path to the CSV file as an argument."
)
constituency_offices = Organisation.objects.filter(
kind__slug='constituency-office')
with open(args[0]) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
poi_ref = row['POI_REFERENCE']
party, name = (
re.match(
'(.*) Constituency Office (.*)', poi_ref
).groups()
)
name = re.sub('\s+', ' ', name)
qs = constituency_offices.filter(
name__regex=r'^{}.+: {}$'.format(party, name))
try:
org = qs.get()
except Organisation.MultipleObjectsReturned:
print "Skipping {} as multiple orgs returned: {}".format(
poi_ref,
repr(qs),
)
continue
except Organisation.DoesNotExist:
# Fall back to searching for the name and the party in the
# constituency office name
qs = (
Organisation.objects
.filter(kind__slug='constituency-office')
.filter(name__contains=name)
.filter(name__contains=party)
)
org = qs.get()
place = (
org.place_set
.filter(name__contains='Approximate position of')
.filter(kind__slug='constituency-office')
.get() # There should be only one.
)
lon, lat = location_re.match(row['Constituency_Pin']).groups()
place.location = Point(float(lon), float(lat))
place.save()
|
Add a script to import new constituency office locations.
|
Add a script to import new constituency office locations.
PMG are conducting an exercise to go to all the constituency offices
and get accurate locations for them and photographs. The data they
collect is available as a CSV, and this new management command
processes that CSV and updates our data with the improved locations.
|
Python
|
agpl-3.0
|
mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola
|
Add a script to import new constituency office locations.
PMG are conducting an exercise to go to all the constituency offices
and get accurate locations for them and photographs. The data they
collect is available as a CSV, and this new management command
processes that CSV and updates our data with the improved locations.
|
"""Command to import PMG's constitiency office data
PMG are undertaking an exercise to visit all the constituency
offices and collect accurate locations and photographs.
The data from this is available at
https://app.m4jam.com/app/campaigns/2298/export/
This script parses the resulting CSV file and updates our data
on constituency offices to have the correct location.
"""
import csv
import re
from django.core.management.base import BaseCommand, CommandError
from django.contrib.gis.geos import Point
from pombola.core.models import (
Organisation,
)
# Matches the format of the locations in the CSV we get from PMG.
location_re = re.compile(
r'SRID=4326;POINT\((-?[0-9]+\.[0-9]+) (-?[0-9]+\.[0-9]+)\)')
class Command(BaseCommand):
args = '<path to csv file>'
help = 'Updates constituency offices based on the supplied CSV file.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError(
"You must provide the path to the CSV file as an argument."
)
constituency_offices = Organisation.objects.filter(
kind__slug='constituency-office')
with open(args[0]) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
poi_ref = row['POI_REFERENCE']
party, name = (
re.match(
'(.*) Constituency Office (.*)', poi_ref
).groups()
)
name = re.sub('\s+', ' ', name)
qs = constituency_offices.filter(
name__regex=r'^{}.+: {}$'.format(party, name))
try:
org = qs.get()
except Organisation.MultipleObjectsReturned:
print "Skipping {} as multiple orgs returned: {}".format(
poi_ref,
repr(qs),
)
continue
except Organisation.DoesNotExist:
# Fall back to searching for the name and the party in the
# constituency office name
qs = (
Organisation.objects
.filter(kind__slug='constituency-office')
.filter(name__contains=name)
.filter(name__contains=party)
)
org = qs.get()
place = (
org.place_set
.filter(name__contains='Approximate position of')
.filter(kind__slug='constituency-office')
.get() # There should be only one.
)
lon, lat = location_re.match(row['Constituency_Pin']).groups()
place.location = Point(float(lon), float(lat))
place.save()
|
<commit_before><commit_msg>Add a script to import new constituency office locations.
PMG are conducting an exercise to go to all the constituency offices
and get accurate locations for them and photographs. The data they
collect is available as a CSV, and this new management command
processes that CSV and updates our data with the improved locations.<commit_after>
|
"""Command to import PMG's constitiency office data
PMG are undertaking an exercise to visit all the constituency
offices and collect accurate locations and photographs.
The data from this is available at
https://app.m4jam.com/app/campaigns/2298/export/
This script parses the resulting CSV file and updates our data
on constituency offices to have the correct location.
"""
import csv
import re
from django.core.management.base import BaseCommand, CommandError
from django.contrib.gis.geos import Point
from pombola.core.models import (
Organisation,
)
# Matches the format of the locations in the CSV we get from PMG.
location_re = re.compile(
r'SRID=4326;POINT\((-?[0-9]+\.[0-9]+) (-?[0-9]+\.[0-9]+)\)')
class Command(BaseCommand):
args = '<path to csv file>'
help = 'Updates constituency offices based on the supplied CSV file.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError(
"You must provide the path to the CSV file as an argument."
)
constituency_offices = Organisation.objects.filter(
kind__slug='constituency-office')
with open(args[0]) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
poi_ref = row['POI_REFERENCE']
party, name = (
re.match(
'(.*) Constituency Office (.*)', poi_ref
).groups()
)
name = re.sub('\s+', ' ', name)
qs = constituency_offices.filter(
name__regex=r'^{}.+: {}$'.format(party, name))
try:
org = qs.get()
except Organisation.MultipleObjectsReturned:
print "Skipping {} as multiple orgs returned: {}".format(
poi_ref,
repr(qs),
)
continue
except Organisation.DoesNotExist:
# Fall back to searching for the name and the party in the
# constituency office name
qs = (
Organisation.objects
.filter(kind__slug='constituency-office')
.filter(name__contains=name)
.filter(name__contains=party)
)
org = qs.get()
place = (
org.place_set
.filter(name__contains='Approximate position of')
.filter(kind__slug='constituency-office')
.get() # There should be only one.
)
lon, lat = location_re.match(row['Constituency_Pin']).groups()
place.location = Point(float(lon), float(lat))
place.save()
|
Add a script to import new constituency office locations.
PMG are conducting an exercise to go to all the constituency offices
and get accurate locations for them and photographs. The data they
collect is available as a CSV, and this new management command
processes that CSV and updates our data with the improved locations."""Command to import PMG's constitiency office data
PMG are undertaking an exercise to visit all the constituency
offices and collect accurate locations and photographs.
The data from this is available at
https://app.m4jam.com/app/campaigns/2298/export/
This script parses the resulting CSV file and updates our data
on constituency offices to have the correct location.
"""
import csv
import re
from django.core.management.base import BaseCommand, CommandError
from django.contrib.gis.geos import Point
from pombola.core.models import (
Organisation,
)
# Matches the format of the locations in the CSV we get from PMG.
location_re = re.compile(
r'SRID=4326;POINT\((-?[0-9]+\.[0-9]+) (-?[0-9]+\.[0-9]+)\)')
class Command(BaseCommand):
args = '<path to csv file>'
help = 'Updates constituency offices based on the supplied CSV file.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError(
"You must provide the path to the CSV file as an argument."
)
constituency_offices = Organisation.objects.filter(
kind__slug='constituency-office')
with open(args[0]) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
poi_ref = row['POI_REFERENCE']
party, name = (
re.match(
'(.*) Constituency Office (.*)', poi_ref
).groups()
)
name = re.sub('\s+', ' ', name)
qs = constituency_offices.filter(
name__regex=r'^{}.+: {}$'.format(party, name))
try:
org = qs.get()
except Organisation.MultipleObjectsReturned:
print "Skipping {} as multiple orgs returned: {}".format(
poi_ref,
repr(qs),
)
continue
except Organisation.DoesNotExist:
# Fall back to searching for the name and the party in the
# constituency office name
qs = (
Organisation.objects
.filter(kind__slug='constituency-office')
.filter(name__contains=name)
.filter(name__contains=party)
)
org = qs.get()
place = (
org.place_set
.filter(name__contains='Approximate position of')
.filter(kind__slug='constituency-office')
.get() # There should be only one.
)
lon, lat = location_re.match(row['Constituency_Pin']).groups()
place.location = Point(float(lon), float(lat))
place.save()
|
<commit_before><commit_msg>Add a script to import new constituency office locations.
PMG are conducting an exercise to go to all the constituency offices
and get accurate locations for them and photographs. The data they
collect is available as a CSV, and this new management command
processes that CSV and updates our data with the improved locations.<commit_after>"""Command to import PMG's constitiency office data
PMG are undertaking an exercise to visit all the constituency
offices and collect accurate locations and photographs.
The data from this is available at
https://app.m4jam.com/app/campaigns/2298/export/
This script parses the resulting CSV file and updates our data
on constituency offices to have the correct location.
"""
import csv
import re
from django.core.management.base import BaseCommand, CommandError
from django.contrib.gis.geos import Point
from pombola.core.models import (
Organisation,
)
# Matches the format of the locations in the CSV we get from PMG.
location_re = re.compile(
r'SRID=4326;POINT\((-?[0-9]+\.[0-9]+) (-?[0-9]+\.[0-9]+)\)')
class Command(BaseCommand):
args = '<path to csv file>'
help = 'Updates constituency offices based on the supplied CSV file.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError(
"You must provide the path to the CSV file as an argument."
)
constituency_offices = Organisation.objects.filter(
kind__slug='constituency-office')
with open(args[0]) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
poi_ref = row['POI_REFERENCE']
party, name = (
re.match(
'(.*) Constituency Office (.*)', poi_ref
).groups()
)
name = re.sub('\s+', ' ', name)
qs = constituency_offices.filter(
name__regex=r'^{}.+: {}$'.format(party, name))
try:
org = qs.get()
except Organisation.MultipleObjectsReturned:
print "Skipping {} as multiple orgs returned: {}".format(
poi_ref,
repr(qs),
)
continue
except Organisation.DoesNotExist:
# Fall back to searching for the name and the party in the
# constituency office name
qs = (
Organisation.objects
.filter(kind__slug='constituency-office')
.filter(name__contains=name)
.filter(name__contains=party)
)
org = qs.get()
place = (
org.place_set
.filter(name__contains='Approximate position of')
.filter(kind__slug='constituency-office')
.get() # There should be only one.
)
lon, lat = location_re.match(row['Constituency_Pin']).groups()
place.location = Point(float(lon), float(lat))
place.save()
|
|
f0d12bb2c3834365081fddcee200f6a5b779c8a4
|
scripts/syncrules.py
|
scripts/syncrules.py
|
#!/usr/bin/env python3
import os
import shutil
import argparse
import logging
from snakemake_rules import SNAKEMAKE_RULES_PATH
from snakemake.workflow import Workflow
from snakemake.exceptions import print_exception
FORMAT = '%(levelname)s: %(asctime)-15s: %(message)s'
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__file__)
def sync_file(source, dest, dryrun=False):
"""Sync file source to dest"""
if not os.path.exists(v):
if dryrun:
logger.info("DRY_RUN: Copying rule '{}' to '{}'".format(source, dest))
else:
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
logger.info("Copying rule '{}' to '{}'".format(source, dest))
shutil.copy2(source, dest)
else:
srctime = os.path.getmtime(source)
desttime = os.path.getmtime(dest)
if (desttime > srctime):
if dryrun:
logger.info("DRY_RUN: Updating rule '{}' to '{}'".format(source, dest))
else:
logger.info("Updating rule '{}' to '{}'".format(source, dest))
shutil.copy2(source, dest)
else:
if dryrun:
logger.info("DRY_RUN: rule '{}' up to date".format(dest))
else:
logger.info("rule '{}' up to date".format(dest))
parser = argparse.ArgumentParser("Copy/sync rules to a given directory")
parser.add_argument('Snakefile', help="Snakefile to import")
parser.add_argument('-n', '--dry-run', action="store_true", help="Dry run")
parser.add_argument('-d', '--outdir', action="store", default=os.curdir,
help="Snakefile to import")
args = parser.parse_args()
snakefile = os.path.abspath(args.Snakefile)
workflow = Workflow(snakefile=snakefile)
try:
workflow.include(snakefile,
overwrite_first_rule=True,
print_compilation=False)
workflow.check()
except (Exception, BaseException) as ex:
print_exception(ex, workflow.linemaps)
success = False
# Map the rules included from snakemake_rules
DEST=args.outdir
rules = {x:os.path.join(DEST, os.path.relpath(x, SNAKEMAKE_RULES_PATH)) for x in workflow.included if x.startswith(SNAKEMAKE_RULES_PATH)}
# Copy rules to outdir
for k, v in rules.items():
sync_file(k, v, args.dry_run)
|
Add script to sync rules
|
Add script to sync rules
|
Python
|
mit
|
percyfal/snakemake-rules,percyfal/snakemakelib-rules,percyfal/snakemakelib-rules,percyfal/snakemakelib-rules,percyfal/snakemake-rules
|
Add script to sync rules
|
#!/usr/bin/env python3
import os
import shutil
import argparse
import logging
from snakemake_rules import SNAKEMAKE_RULES_PATH
from snakemake.workflow import Workflow
from snakemake.exceptions import print_exception
FORMAT = '%(levelname)s: %(asctime)-15s: %(message)s'
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__file__)
def sync_file(source, dest, dryrun=False):
"""Sync file source to dest"""
if not os.path.exists(v):
if dryrun:
logger.info("DRY_RUN: Copying rule '{}' to '{}'".format(source, dest))
else:
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
logger.info("Copying rule '{}' to '{}'".format(source, dest))
shutil.copy2(source, dest)
else:
srctime = os.path.getmtime(source)
desttime = os.path.getmtime(dest)
if (desttime > srctime):
if dryrun:
logger.info("DRY_RUN: Updating rule '{}' to '{}'".format(source, dest))
else:
logger.info("Updating rule '{}' to '{}'".format(source, dest))
shutil.copy2(source, dest)
else:
if dryrun:
logger.info("DRY_RUN: rule '{}' up to date".format(dest))
else:
logger.info("rule '{}' up to date".format(dest))
parser = argparse.ArgumentParser("Copy/sync rules to a given directory")
parser.add_argument('Snakefile', help="Snakefile to import")
parser.add_argument('-n', '--dry-run', action="store_true", help="Dry run")
parser.add_argument('-d', '--outdir', action="store", default=os.curdir,
help="Snakefile to import")
args = parser.parse_args()
snakefile = os.path.abspath(args.Snakefile)
workflow = Workflow(snakefile=snakefile)
try:
workflow.include(snakefile,
overwrite_first_rule=True,
print_compilation=False)
workflow.check()
except (Exception, BaseException) as ex:
print_exception(ex, workflow.linemaps)
success = False
# Map the rules included from snakemake_rules
DEST=args.outdir
rules = {x:os.path.join(DEST, os.path.relpath(x, SNAKEMAKE_RULES_PATH)) for x in workflow.included if x.startswith(SNAKEMAKE_RULES_PATH)}
# Copy rules to outdir
for k, v in rules.items():
sync_file(k, v, args.dry_run)
|
<commit_before><commit_msg>Add script to sync rules<commit_after>
|
#!/usr/bin/env python3
import os
import shutil
import argparse
import logging
from snakemake_rules import SNAKEMAKE_RULES_PATH
from snakemake.workflow import Workflow
from snakemake.exceptions import print_exception
FORMAT = '%(levelname)s: %(asctime)-15s: %(message)s'
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__file__)
def sync_file(source, dest, dryrun=False):
"""Sync file source to dest"""
if not os.path.exists(v):
if dryrun:
logger.info("DRY_RUN: Copying rule '{}' to '{}'".format(source, dest))
else:
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
logger.info("Copying rule '{}' to '{}'".format(source, dest))
shutil.copy2(source, dest)
else:
srctime = os.path.getmtime(source)
desttime = os.path.getmtime(dest)
if (desttime > srctime):
if dryrun:
logger.info("DRY_RUN: Updating rule '{}' to '{}'".format(source, dest))
else:
logger.info("Updating rule '{}' to '{}'".format(source, dest))
shutil.copy2(source, dest)
else:
if dryrun:
logger.info("DRY_RUN: rule '{}' up to date".format(dest))
else:
logger.info("rule '{}' up to date".format(dest))
parser = argparse.ArgumentParser("Copy/sync rules to a given directory")
parser.add_argument('Snakefile', help="Snakefile to import")
parser.add_argument('-n', '--dry-run', action="store_true", help="Dry run")
parser.add_argument('-d', '--outdir', action="store", default=os.curdir,
help="Snakefile to import")
args = parser.parse_args()
snakefile = os.path.abspath(args.Snakefile)
workflow = Workflow(snakefile=snakefile)
try:
workflow.include(snakefile,
overwrite_first_rule=True,
print_compilation=False)
workflow.check()
except (Exception, BaseException) as ex:
print_exception(ex, workflow.linemaps)
success = False
# Map the rules included from snakemake_rules
DEST=args.outdir
rules = {x:os.path.join(DEST, os.path.relpath(x, SNAKEMAKE_RULES_PATH)) for x in workflow.included if x.startswith(SNAKEMAKE_RULES_PATH)}
# Copy rules to outdir
for k, v in rules.items():
sync_file(k, v, args.dry_run)
|
Add script to sync rules#!/usr/bin/env python3
import os
import shutil
import argparse
import logging
from snakemake_rules import SNAKEMAKE_RULES_PATH
from snakemake.workflow import Workflow
from snakemake.exceptions import print_exception
FORMAT = '%(levelname)s: %(asctime)-15s: %(message)s'
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__file__)
def sync_file(source, dest, dryrun=False):
"""Sync file source to dest"""
if not os.path.exists(v):
if dryrun:
logger.info("DRY_RUN: Copying rule '{}' to '{}'".format(source, dest))
else:
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
logger.info("Copying rule '{}' to '{}'".format(source, dest))
shutil.copy2(source, dest)
else:
srctime = os.path.getmtime(source)
desttime = os.path.getmtime(dest)
if (desttime > srctime):
if dryrun:
logger.info("DRY_RUN: Updating rule '{}' to '{}'".format(source, dest))
else:
logger.info("Updating rule '{}' to '{}'".format(source, dest))
shutil.copy2(source, dest)
else:
if dryrun:
logger.info("DRY_RUN: rule '{}' up to date".format(dest))
else:
logger.info("rule '{}' up to date".format(dest))
parser = argparse.ArgumentParser("Copy/sync rules to a given directory")
parser.add_argument('Snakefile', help="Snakefile to import")
parser.add_argument('-n', '--dry-run', action="store_true", help="Dry run")
parser.add_argument('-d', '--outdir', action="store", default=os.curdir,
help="Snakefile to import")
args = parser.parse_args()
snakefile = os.path.abspath(args.Snakefile)
workflow = Workflow(snakefile=snakefile)
try:
workflow.include(snakefile,
overwrite_first_rule=True,
print_compilation=False)
workflow.check()
except (Exception, BaseException) as ex:
print_exception(ex, workflow.linemaps)
success = False
# Map the rules included from snakemake_rules
DEST=args.outdir
rules = {x:os.path.join(DEST, os.path.relpath(x, SNAKEMAKE_RULES_PATH)) for x in workflow.included if x.startswith(SNAKEMAKE_RULES_PATH)}
# Copy rules to outdir
for k, v in rules.items():
sync_file(k, v, args.dry_run)
|
<commit_before><commit_msg>Add script to sync rules<commit_after>#!/usr/bin/env python3
import os
import shutil
import argparse
import logging
from snakemake_rules import SNAKEMAKE_RULES_PATH
from snakemake.workflow import Workflow
from snakemake.exceptions import print_exception
FORMAT = '%(levelname)s: %(asctime)-15s: %(message)s'
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__file__)
def sync_file(source, dest, dryrun=False):
"""Sync file source to dest"""
if not os.path.exists(v):
if dryrun:
logger.info("DRY_RUN: Copying rule '{}' to '{}'".format(source, dest))
else:
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
logger.info("Copying rule '{}' to '{}'".format(source, dest))
shutil.copy2(source, dest)
else:
srctime = os.path.getmtime(source)
desttime = os.path.getmtime(dest)
if (desttime > srctime):
if dryrun:
logger.info("DRY_RUN: Updating rule '{}' to '{}'".format(source, dest))
else:
logger.info("Updating rule '{}' to '{}'".format(source, dest))
shutil.copy2(source, dest)
else:
if dryrun:
logger.info("DRY_RUN: rule '{}' up to date".format(dest))
else:
logger.info("rule '{}' up to date".format(dest))
parser = argparse.ArgumentParser("Copy/sync rules to a given directory")
parser.add_argument('Snakefile', help="Snakefile to import")
parser.add_argument('-n', '--dry-run', action="store_true", help="Dry run")
parser.add_argument('-d', '--outdir', action="store", default=os.curdir,
help="Snakefile to import")
args = parser.parse_args()
snakefile = os.path.abspath(args.Snakefile)
workflow = Workflow(snakefile=snakefile)
try:
workflow.include(snakefile,
overwrite_first_rule=True,
print_compilation=False)
workflow.check()
except (Exception, BaseException) as ex:
print_exception(ex, workflow.linemaps)
success = False
# Map the rules included from snakemake_rules
DEST=args.outdir
rules = {x:os.path.join(DEST, os.path.relpath(x, SNAKEMAKE_RULES_PATH)) for x in workflow.included if x.startswith(SNAKEMAKE_RULES_PATH)}
# Copy rules to outdir
for k, v in rules.items():
sync_file(k, v, args.dry_run)
|
|
ab03d7e0ffb4c3a82f5b9bee88d6b06a375f9276
|
Utilities/Maintenance/GeneratePythonDownloadsPage.py
|
Utilities/Maintenance/GeneratePythonDownloadsPage.py
|
#!/usr/bin/env python
#=========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
import hashlib
import argparse
import re
import os
parser = argparse.ArgumentParser( description="Given a list of python wheels, generate a list of hyperlinks to GitHub with sha512 fragment identifier" )
parser.add_argument( '--hash', choices=['md5','sha256', 'sha512'], default='sha512')
parser.add_argument( '-f', '--format', choices=['html','md'], default='html')
parser.add_argument( 'files', metavar="python.whl", type=argparse.FileType(mode='rb'), nargs='+' )
args = parser.parse_args()
for f in args.files:
name = os.path.basename(f.name)
#version="1.1.0"
version = re.match(r'SimpleITK-([0-9]+\.[0-9]+(\.[0-9]+)?(rc[0-9]+)?)', name ).group(1)
print("version:{0}".format(version))
if args.hash == "md5":
hash_value = hashlib.md5(f.read()).hexdigest()
elif args.hash == "sha256":
hash_value = hashlib.sha256(f.read()).hexdigest()
elif args.hash == "sha512":
hash_value = hashlib.sha512(f.read()).hexdigest()
tag = "v{0}".format(version)
#host="SourceForge"
#url = "https://sourceforge.net/projects/simpleitk/files/SimpleITK/{0}/Python/{1}#{2}={3}".format(version,name,args.hash,hash_value)
host = "GitHub"
url = "https://github.com/SimpleITK/SimpleITK/releases/download/{0}/{1}#{2}={3}".format(tag,name,args.hash,hash_value)
if args.format == 'html':
print "<li><a href=\"{0}\" title=\"Click to download {1}\">{1} (hosted at {2})</a></li>".format(url,name,host)
elif args.format == 'md':
print "[{1}]({0})".format(url,name)
f.close()
|
Add script to generate download links used on simpleitk.org
|
Add script to generate download links used on simpleitk.org
|
Python
|
apache-2.0
|
blowekamp/SimpleITK,InsightSoftwareConsortium/SimpleITK,SimpleITK/SimpleITK,richardbeare/SimpleITK,InsightSoftwareConsortium/SimpleITK,InsightSoftwareConsortium/SimpleITK,richardbeare/SimpleITK,SimpleITK/SimpleITK,SimpleITK/SimpleITK,SimpleITK/SimpleITK,InsightSoftwareConsortium/SimpleITK,blowekamp/SimpleITK,richardbeare/SimpleITK,SimpleITK/SimpleITK,InsightSoftwareConsortium/SimpleITK,blowekamp/SimpleITK,SimpleITK/SimpleITK,InsightSoftwareConsortium/SimpleITK,richardbeare/SimpleITK,richardbeare/SimpleITK,SimpleITK/SimpleITK,blowekamp/SimpleITK,richardbeare/SimpleITK,blowekamp/SimpleITK,richardbeare/SimpleITK,richardbeare/SimpleITK,InsightSoftwareConsortium/SimpleITK,InsightSoftwareConsortium/SimpleITK,SimpleITK/SimpleITK,blowekamp/SimpleITK,blowekamp/SimpleITK,blowekamp/SimpleITK
|
Add script to generate download links used on simpleitk.org
|
#!/usr/bin/env python
#=========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
import hashlib
import argparse
import re
import os
parser = argparse.ArgumentParser( description="Given a list of python wheels, generate a list of hyperlinks to GitHub with sha512 fragment identifier" )
parser.add_argument( '--hash', choices=['md5','sha256', 'sha512'], default='sha512')
parser.add_argument( '-f', '--format', choices=['html','md'], default='html')
parser.add_argument( 'files', metavar="python.whl", type=argparse.FileType(mode='rb'), nargs='+' )
args = parser.parse_args()
for f in args.files:
name = os.path.basename(f.name)
#version="1.1.0"
version = re.match(r'SimpleITK-([0-9]+\.[0-9]+(\.[0-9]+)?(rc[0-9]+)?)', name ).group(1)
print("version:{0}".format(version))
if args.hash == "md5":
hash_value = hashlib.md5(f.read()).hexdigest()
elif args.hash == "sha256":
hash_value = hashlib.sha256(f.read()).hexdigest()
elif args.hash == "sha512":
hash_value = hashlib.sha512(f.read()).hexdigest()
tag = "v{0}".format(version)
#host="SourceForge"
#url = "https://sourceforge.net/projects/simpleitk/files/SimpleITK/{0}/Python/{1}#{2}={3}".format(version,name,args.hash,hash_value)
host = "GitHub"
url = "https://github.com/SimpleITK/SimpleITK/releases/download/{0}/{1}#{2}={3}".format(tag,name,args.hash,hash_value)
if args.format == 'html':
print "<li><a href=\"{0}\" title=\"Click to download {1}\">{1} (hosted at {2})</a></li>".format(url,name,host)
elif args.format == 'md':
print "[{1}]({0})".format(url,name)
f.close()
|
<commit_before><commit_msg>Add script to generate download links used on simpleitk.org<commit_after>
|
#!/usr/bin/env python
#=========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
import hashlib
import argparse
import re
import os
parser = argparse.ArgumentParser( description="Given a list of python wheels, generate a list of hyperlinks to GitHub with sha512 fragment identifier" )
parser.add_argument( '--hash', choices=['md5','sha256', 'sha512'], default='sha512')
parser.add_argument( '-f', '--format', choices=['html','md'], default='html')
parser.add_argument( 'files', metavar="python.whl", type=argparse.FileType(mode='rb'), nargs='+' )
args = parser.parse_args()
for f in args.files:
name = os.path.basename(f.name)
#version="1.1.0"
version = re.match(r'SimpleITK-([0-9]+\.[0-9]+(\.[0-9]+)?(rc[0-9]+)?)', name ).group(1)
print("version:{0}".format(version))
if args.hash == "md5":
hash_value = hashlib.md5(f.read()).hexdigest()
elif args.hash == "sha256":
hash_value = hashlib.sha256(f.read()).hexdigest()
elif args.hash == "sha512":
hash_value = hashlib.sha512(f.read()).hexdigest()
tag = "v{0}".format(version)
#host="SourceForge"
#url = "https://sourceforge.net/projects/simpleitk/files/SimpleITK/{0}/Python/{1}#{2}={3}".format(version,name,args.hash,hash_value)
host = "GitHub"
url = "https://github.com/SimpleITK/SimpleITK/releases/download/{0}/{1}#{2}={3}".format(tag,name,args.hash,hash_value)
if args.format == 'html':
print "<li><a href=\"{0}\" title=\"Click to download {1}\">{1} (hosted at {2})</a></li>".format(url,name,host)
elif args.format == 'md':
print "[{1}]({0})".format(url,name)
f.close()
|
Add script to generate download links used on simpleitk.org#!/usr/bin/env python
#=========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
import hashlib
import argparse
import re
import os
parser = argparse.ArgumentParser( description="Given a list of python wheels, generate a list of hyperlinks to GitHub with sha512 fragment identifier" )
parser.add_argument( '--hash', choices=['md5','sha256', 'sha512'], default='sha512')
parser.add_argument( '-f', '--format', choices=['html','md'], default='html')
parser.add_argument( 'files', metavar="python.whl", type=argparse.FileType(mode='rb'), nargs='+' )
args = parser.parse_args()
for f in args.files:
name = os.path.basename(f.name)
#version="1.1.0"
version = re.match(r'SimpleITK-([0-9]+\.[0-9]+(\.[0-9]+)?(rc[0-9]+)?)', name ).group(1)
print("version:{0}".format(version))
if args.hash == "md5":
hash_value = hashlib.md5(f.read()).hexdigest()
elif args.hash == "sha256":
hash_value = hashlib.sha256(f.read()).hexdigest()
elif args.hash == "sha512":
hash_value = hashlib.sha512(f.read()).hexdigest()
tag = "v{0}".format(version)
#host="SourceForge"
#url = "https://sourceforge.net/projects/simpleitk/files/SimpleITK/{0}/Python/{1}#{2}={3}".format(version,name,args.hash,hash_value)
host = "GitHub"
url = "https://github.com/SimpleITK/SimpleITK/releases/download/{0}/{1}#{2}={3}".format(tag,name,args.hash,hash_value)
if args.format == 'html':
print "<li><a href=\"{0}\" title=\"Click to download {1}\">{1} (hosted at {2})</a></li>".format(url,name,host)
elif args.format == 'md':
print "[{1}]({0})".format(url,name)
f.close()
|
<commit_before><commit_msg>Add script to generate download links used on simpleitk.org<commit_after>#!/usr/bin/env python
#=========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
import hashlib
import argparse
import re
import os
parser = argparse.ArgumentParser( description="Given a list of python wheels, generate a list of hyperlinks to GitHub with sha512 fragment identifier" )
parser.add_argument( '--hash', choices=['md5','sha256', 'sha512'], default='sha512')
parser.add_argument( '-f', '--format', choices=['html','md'], default='html')
parser.add_argument( 'files', metavar="python.whl", type=argparse.FileType(mode='rb'), nargs='+' )
args = parser.parse_args()
for f in args.files:
name = os.path.basename(f.name)
#version="1.1.0"
version = re.match(r'SimpleITK-([0-9]+\.[0-9]+(\.[0-9]+)?(rc[0-9]+)?)', name ).group(1)
print("version:{0}".format(version))
if args.hash == "md5":
hash_value = hashlib.md5(f.read()).hexdigest()
elif args.hash == "sha256":
hash_value = hashlib.sha256(f.read()).hexdigest()
elif args.hash == "sha512":
hash_value = hashlib.sha512(f.read()).hexdigest()
tag = "v{0}".format(version)
#host="SourceForge"
#url = "https://sourceforge.net/projects/simpleitk/files/SimpleITK/{0}/Python/{1}#{2}={3}".format(version,name,args.hash,hash_value)
host = "GitHub"
url = "https://github.com/SimpleITK/SimpleITK/releases/download/{0}/{1}#{2}={3}".format(tag,name,args.hash,hash_value)
if args.format == 'html':
print "<li><a href=\"{0}\" title=\"Click to download {1}\">{1} (hosted at {2})</a></li>".format(url,name,host)
elif args.format == 'md':
print "[{1}]({0})".format(url,name)
f.close()
|
|
c552bfd620682d76afe17f6a70f901500e090b10
|
Day1/PrimeTestCases.py
|
Day1/PrimeTestCases.py
|
#prime numbers function test cases
import unittest
from prime import prime_numbers
class PrimeTest(unittest.TestCase):
def test_returns_prime_numbers(self):
self.assertListEqual(prime_numbers(6), [2,3,5], msg="Range of 0-6 should return [2,3,5] as the prime numbers")
def test_input_is_a_number(self):
with self.assertRaises(TypeError, msg="Should raise type error if a string is passed as argument"):
prime_numbers("String")
def test_return_value_is_list(self):
self.assertTrue(isinstance(prime_numbers(10), list), msg="The function should return a list")
def test_returns_None_for_negative_input(self):
self.assertEqual(prime_numbers(-30),"No prime numbers within that range! All prime numbers are positive",msg="There are no negative primes")
def test_does_not_return_negative_primes(self):
self.assertGreater(min(prime_numbers(50)),0)
def test_does_not_include_non_primes(self):
self.assertNotIn([0,1,4,6],prime_numbers(6))
if __name__=='__main__':
unittest.main()
|
Add test cases for prime numbers generator
|
Add test cases for prime numbers generator
|
Python
|
mit
|
JoshuaOndieki/joshua-ondieki-bootcamp-19
|
Add test cases for prime numbers generator
|
#prime numbers function test cases
import unittest
from prime import prime_numbers
class PrimeTest(unittest.TestCase):
def test_returns_prime_numbers(self):
self.assertListEqual(prime_numbers(6), [2,3,5], msg="Range of 0-6 should return [2,3,5] as the prime numbers")
def test_input_is_a_number(self):
with self.assertRaises(TypeError, msg="Should raise type error if a string is passed as argument"):
prime_numbers("String")
def test_return_value_is_list(self):
self.assertTrue(isinstance(prime_numbers(10), list), msg="The function should return a list")
def test_returns_None_for_negative_input(self):
self.assertEqual(prime_numbers(-30),"No prime numbers within that range! All prime numbers are positive",msg="There are no negative primes")
def test_does_not_return_negative_primes(self):
self.assertGreater(min(prime_numbers(50)),0)
def test_does_not_include_non_primes(self):
self.assertNotIn([0,1,4,6],prime_numbers(6))
if __name__=='__main__':
unittest.main()
|
<commit_before><commit_msg>Add test cases for prime numbers generator<commit_after>
|
#prime numbers function test cases
import unittest
from prime import prime_numbers
class PrimeTest(unittest.TestCase):
def test_returns_prime_numbers(self):
self.assertListEqual(prime_numbers(6), [2,3,5], msg="Range of 0-6 should return [2,3,5] as the prime numbers")
def test_input_is_a_number(self):
with self.assertRaises(TypeError, msg="Should raise type error if a string is passed as argument"):
prime_numbers("String")
def test_return_value_is_list(self):
self.assertTrue(isinstance(prime_numbers(10), list), msg="The function should return a list")
def test_returns_None_for_negative_input(self):
self.assertEqual(prime_numbers(-30),"No prime numbers within that range! All prime numbers are positive",msg="There are no negative primes")
def test_does_not_return_negative_primes(self):
self.assertGreater(min(prime_numbers(50)),0)
def test_does_not_include_non_primes(self):
self.assertNotIn([0,1,4,6],prime_numbers(6))
if __name__=='__main__':
unittest.main()
|
Add test cases for prime numbers generator#prime numbers function test cases
import unittest
from prime import prime_numbers
class PrimeTest(unittest.TestCase):
def test_returns_prime_numbers(self):
self.assertListEqual(prime_numbers(6), [2,3,5], msg="Range of 0-6 should return [2,3,5] as the prime numbers")
def test_input_is_a_number(self):
with self.assertRaises(TypeError, msg="Should raise type error if a string is passed as argument"):
prime_numbers("String")
def test_return_value_is_list(self):
self.assertTrue(isinstance(prime_numbers(10), list), msg="The function should return a list")
def test_returns_None_for_negative_input(self):
self.assertEqual(prime_numbers(-30),"No prime numbers within that range! All prime numbers are positive",msg="There are no negative primes")
def test_does_not_return_negative_primes(self):
self.assertGreater(min(prime_numbers(50)),0)
def test_does_not_include_non_primes(self):
self.assertNotIn([0,1,4,6],prime_numbers(6))
if __name__=='__main__':
unittest.main()
|
<commit_before><commit_msg>Add test cases for prime numbers generator<commit_after>#prime numbers function test cases
import unittest
from prime import prime_numbers
class PrimeTest(unittest.TestCase):
def test_returns_prime_numbers(self):
self.assertListEqual(prime_numbers(6), [2,3,5], msg="Range of 0-6 should return [2,3,5] as the prime numbers")
def test_input_is_a_number(self):
with self.assertRaises(TypeError, msg="Should raise type error if a string is passed as argument"):
prime_numbers("String")
def test_return_value_is_list(self):
self.assertTrue(isinstance(prime_numbers(10), list), msg="The function should return a list")
def test_returns_None_for_negative_input(self):
self.assertEqual(prime_numbers(-30),"No prime numbers within that range! All prime numbers are positive",msg="There are no negative primes")
def test_does_not_return_negative_primes(self):
self.assertGreater(min(prime_numbers(50)),0)
def test_does_not_include_non_primes(self):
self.assertNotIn([0,1,4,6],prime_numbers(6))
if __name__=='__main__':
unittest.main()
|
|
d243017affa1397a8a08f34b3c1a01177f6f315e
|
tests/test_helpers.py
|
tests/test_helpers.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
import pytest
import re
from dosagelib.helpers import joinPathPartsNamer
class TestNamer(object):
"""
Tests for comic namer.
"""
def test_joinPathPartsNamer(self):
imgurl = 'https://HOST/wp-content/uploads/2019/02/tennis5wp-1.png'
pageurl = 'https://HOST/2019/03/11/12450/'
assert joinPathPartsNamer((0, 1, 2))(self, imgurl, pageurl) == '2019_03_11_tennis5wp-1.png'
assert joinPathPartsNamer((0, 1, 2), (-1,), '-')(self, imgurl, pageurl) == '2019-03-11-tennis5wp-1.png'
assert joinPathPartsNamer((0, -2), ())(self, imgurl, pageurl) == '2019_12450'
|
Add test for recently added helper
|
Add test for recently added helper
|
Python
|
mit
|
webcomics/dosage,peterjanes/dosage,peterjanes/dosage,webcomics/dosage
|
Add test for recently added helper
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
import pytest
import re
from dosagelib.helpers import joinPathPartsNamer
class TestNamer(object):
"""
Tests for comic namer.
"""
def test_joinPathPartsNamer(self):
imgurl = 'https://HOST/wp-content/uploads/2019/02/tennis5wp-1.png'
pageurl = 'https://HOST/2019/03/11/12450/'
assert joinPathPartsNamer((0, 1, 2))(self, imgurl, pageurl) == '2019_03_11_tennis5wp-1.png'
assert joinPathPartsNamer((0, 1, 2), (-1,), '-')(self, imgurl, pageurl) == '2019-03-11-tennis5wp-1.png'
assert joinPathPartsNamer((0, -2), ())(self, imgurl, pageurl) == '2019_12450'
|
<commit_before><commit_msg>Add test for recently added helper<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
import pytest
import re
from dosagelib.helpers import joinPathPartsNamer
class TestNamer(object):
"""
Tests for comic namer.
"""
def test_joinPathPartsNamer(self):
imgurl = 'https://HOST/wp-content/uploads/2019/02/tennis5wp-1.png'
pageurl = 'https://HOST/2019/03/11/12450/'
assert joinPathPartsNamer((0, 1, 2))(self, imgurl, pageurl) == '2019_03_11_tennis5wp-1.png'
assert joinPathPartsNamer((0, 1, 2), (-1,), '-')(self, imgurl, pageurl) == '2019-03-11-tennis5wp-1.png'
assert joinPathPartsNamer((0, -2), ())(self, imgurl, pageurl) == '2019_12450'
|
Add test for recently added helper# -*- coding: utf-8 -*-
# Copyright (C) 2019 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
import pytest
import re
from dosagelib.helpers import joinPathPartsNamer
class TestNamer(object):
"""
Tests for comic namer.
"""
def test_joinPathPartsNamer(self):
imgurl = 'https://HOST/wp-content/uploads/2019/02/tennis5wp-1.png'
pageurl = 'https://HOST/2019/03/11/12450/'
assert joinPathPartsNamer((0, 1, 2))(self, imgurl, pageurl) == '2019_03_11_tennis5wp-1.png'
assert joinPathPartsNamer((0, 1, 2), (-1,), '-')(self, imgurl, pageurl) == '2019-03-11-tennis5wp-1.png'
assert joinPathPartsNamer((0, -2), ())(self, imgurl, pageurl) == '2019_12450'
|
<commit_before><commit_msg>Add test for recently added helper<commit_after># -*- coding: utf-8 -*-
# Copyright (C) 2019 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
import pytest
import re
from dosagelib.helpers import joinPathPartsNamer
class TestNamer(object):
"""
Tests for comic namer.
"""
def test_joinPathPartsNamer(self):
imgurl = 'https://HOST/wp-content/uploads/2019/02/tennis5wp-1.png'
pageurl = 'https://HOST/2019/03/11/12450/'
assert joinPathPartsNamer((0, 1, 2))(self, imgurl, pageurl) == '2019_03_11_tennis5wp-1.png'
assert joinPathPartsNamer((0, 1, 2), (-1,), '-')(self, imgurl, pageurl) == '2019-03-11-tennis5wp-1.png'
assert joinPathPartsNamer((0, -2), ())(self, imgurl, pageurl) == '2019_12450'
|
|
4c356d698b7abc59195989bcddcbcb210cc6c096
|
tools/include_wash.py
|
tools/include_wash.py
|
#!/usr/bin/env python
from __future__ import print_function
import os
def check(fn):
l = []
for i in open(fn):
i = i.strip()
if len(i) == 0:
continue
if i[0] != "#":
continue
if i.find("include") == -1:
continue
if i.find('"') == -1:
l.append(i.split('<')[1].split('>')[0])
else:
l.append(i.split('"')[1])
if "vrt.h" in l:
vrt = l.index("vrt.h")
if not "vdef.h" in l:
print(fn, "vdef.h not included with vrt.h")
vdef = l.index("vdef.h")
if vdef > vrt:
print(fn, "vdef.h included after vrt.h")
for i in ("stddef.h", "stdint.h", "cache/cache.h", "cache.h"):
if i in l:
print(fn, i + " included with vrt.h")
for i in ("cache/cache.h", "cache.h"):
if i in l:
for i in (
"stddef.h", "stdint.h", "vrt.h",
"math.h", "pthread.h", "stdarg.h", "sys/types.h",
"vdef.h", "miniobj.h", "vas.h", "vqueue.h",
):
if i in l:
print(fn, i + " included with cache.h")
for (dir, dns, fns) in os.walk("."):
for f in fns:
if f[-2:] == ".c":
check(dir + "/" + f)
|
Make vrt.h include <stddef.h> and <stdint.h>
|
Make vrt.h include <stddef.h> and <stdint.h>
Originally we decided that the output of VCC should be 100%
stand-alone and therefore contain no #includes at all. This
was hoped to avoid unspecified trouble with C-compilers at runtime.
But C99 is old enough to drink now, so we move forward.
The script in tools/include_wash.py checks *.c files and
complains about violations of our intended #include orders.
|
Python
|
bsd-2-clause
|
gquintard/Varnish-Cache,gquintard/Varnish-Cache,gquintard/Varnish-Cache,gquintard/Varnish-Cache
|
Make vrt.h include <stddef.h> and <stdint.h>
Originally we decided that the output of VCC should be 100%
stand-alone and therefore contain no #includes at all. This
was hoped to avoid unspecified trouble with C-compilers at runtime.
But C99 is old enough to drink now, so we move forward.
The script in tools/include_wash.py checks *.c files and
complains about violations of our intended #include orders.
|
#!/usr/bin/env python
from __future__ import print_function
import os
def check(fn):
l = []
for i in open(fn):
i = i.strip()
if len(i) == 0:
continue
if i[0] != "#":
continue
if i.find("include") == -1:
continue
if i.find('"') == -1:
l.append(i.split('<')[1].split('>')[0])
else:
l.append(i.split('"')[1])
if "vrt.h" in l:
vrt = l.index("vrt.h")
if not "vdef.h" in l:
print(fn, "vdef.h not included with vrt.h")
vdef = l.index("vdef.h")
if vdef > vrt:
print(fn, "vdef.h included after vrt.h")
for i in ("stddef.h", "stdint.h", "cache/cache.h", "cache.h"):
if i in l:
print(fn, i + " included with vrt.h")
for i in ("cache/cache.h", "cache.h"):
if i in l:
for i in (
"stddef.h", "stdint.h", "vrt.h",
"math.h", "pthread.h", "stdarg.h", "sys/types.h",
"vdef.h", "miniobj.h", "vas.h", "vqueue.h",
):
if i in l:
print(fn, i + " included with cache.h")
for (dir, dns, fns) in os.walk("."):
for f in fns:
if f[-2:] == ".c":
check(dir + "/" + f)
|
<commit_before><commit_msg>Make vrt.h include <stddef.h> and <stdint.h>
Originally we decided that the output of VCC should be 100%
stand-alone and therefore contain no #includes at all. This
was hoped to avoid unspecified trouble with C-compilers at runtime.
But C99 is old enough to drink now, so we move forward.
The script in tools/include_wash.py checks *.c files and
complains about violations of our intended #include orders.<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
import os
def check(fn):
l = []
for i in open(fn):
i = i.strip()
if len(i) == 0:
continue
if i[0] != "#":
continue
if i.find("include") == -1:
continue
if i.find('"') == -1:
l.append(i.split('<')[1].split('>')[0])
else:
l.append(i.split('"')[1])
if "vrt.h" in l:
vrt = l.index("vrt.h")
if not "vdef.h" in l:
print(fn, "vdef.h not included with vrt.h")
vdef = l.index("vdef.h")
if vdef > vrt:
print(fn, "vdef.h included after vrt.h")
for i in ("stddef.h", "stdint.h", "cache/cache.h", "cache.h"):
if i in l:
print(fn, i + " included with vrt.h")
for i in ("cache/cache.h", "cache.h"):
if i in l:
for i in (
"stddef.h", "stdint.h", "vrt.h",
"math.h", "pthread.h", "stdarg.h", "sys/types.h",
"vdef.h", "miniobj.h", "vas.h", "vqueue.h",
):
if i in l:
print(fn, i + " included with cache.h")
for (dir, dns, fns) in os.walk("."):
for f in fns:
if f[-2:] == ".c":
check(dir + "/" + f)
|
Make vrt.h include <stddef.h> and <stdint.h>
Originally we decided that the output of VCC should be 100%
stand-alone and therefore contain no #includes at all. This
was hoped to avoid unspecified trouble with C-compilers at runtime.
But C99 is old enough to drink now, so we move forward.
The script in tools/include_wash.py checks *.c files and
complains about violations of our intended #include orders.#!/usr/bin/env python
from __future__ import print_function
import os
def check(fn):
l = []
for i in open(fn):
i = i.strip()
if len(i) == 0:
continue
if i[0] != "#":
continue
if i.find("include") == -1:
continue
if i.find('"') == -1:
l.append(i.split('<')[1].split('>')[0])
else:
l.append(i.split('"')[1])
if "vrt.h" in l:
vrt = l.index("vrt.h")
if not "vdef.h" in l:
print(fn, "vdef.h not included with vrt.h")
vdef = l.index("vdef.h")
if vdef > vrt:
print(fn, "vdef.h included after vrt.h")
for i in ("stddef.h", "stdint.h", "cache/cache.h", "cache.h"):
if i in l:
print(fn, i + " included with vrt.h")
for i in ("cache/cache.h", "cache.h"):
if i in l:
for i in (
"stddef.h", "stdint.h", "vrt.h",
"math.h", "pthread.h", "stdarg.h", "sys/types.h",
"vdef.h", "miniobj.h", "vas.h", "vqueue.h",
):
if i in l:
print(fn, i + " included with cache.h")
for (dir, dns, fns) in os.walk("."):
for f in fns:
if f[-2:] == ".c":
check(dir + "/" + f)
|
<commit_before><commit_msg>Make vrt.h include <stddef.h> and <stdint.h>
Originally we decided that the output of VCC should be 100%
stand-alone and therefore contain no #includes at all. This
was hoped to avoid unspecified trouble with C-compilers at runtime.
But C99 is old enough to drink now, so we move forward.
The script in tools/include_wash.py checks *.c files and
complains about violations of our intended #include orders.<commit_after>#!/usr/bin/env python
from __future__ import print_function
import os
def check(fn):
l = []
for i in open(fn):
i = i.strip()
if len(i) == 0:
continue
if i[0] != "#":
continue
if i.find("include") == -1:
continue
if i.find('"') == -1:
l.append(i.split('<')[1].split('>')[0])
else:
l.append(i.split('"')[1])
if "vrt.h" in l:
vrt = l.index("vrt.h")
if not "vdef.h" in l:
print(fn, "vdef.h not included with vrt.h")
vdef = l.index("vdef.h")
if vdef > vrt:
print(fn, "vdef.h included after vrt.h")
for i in ("stddef.h", "stdint.h", "cache/cache.h", "cache.h"):
if i in l:
print(fn, i + " included with vrt.h")
for i in ("cache/cache.h", "cache.h"):
if i in l:
for i in (
"stddef.h", "stdint.h", "vrt.h",
"math.h", "pthread.h", "stdarg.h", "sys/types.h",
"vdef.h", "miniobj.h", "vas.h", "vqueue.h",
):
if i in l:
print(fn, i + " included with cache.h")
for (dir, dns, fns) in os.walk("."):
for f in fns:
if f[-2:] == ".c":
check(dir + "/" + f)
|
|
96d7f942ca87c37485eaf95f2df67b376cf2ee40
|
reporter/reporter/reports/scad/data_quality/duplicate_scad_id_in_civi.py
|
reporter/reporter/reports/scad/data_quality/duplicate_scad_id_in_civi.py
|
#!/usr/bin/env python3
from reporter.reports import Report
from reporter import (
get_contact_id_search_link, RECIPIENT_SCAD_ADMIN
)
class DuplicateScadIdInCivi(Report):
def __init__(self):
super().__init__(
introduction=("The following SCAD IDs are "
"duplicated in CiviCRM: "),
recipients=[RECIPIENT_SCAD_ADMIN],
sql='''
SELECT scad_id_58 AS scad_id
FROM STG_CiviCRM.dbo.civicrm_value_scad_15 scad
JOIN STG_CiviCRM.dbo.civicrm_case cas
ON cas.id = scad.entity_id
AND cas.is_deleted = 0
AND cas.status_id IN (5, 8)
AND cas.case_type_id = 9
JOIN STG_CiviCRM.dbo.civicrm_case_contact cas_con
ON cas_con.case_id = cas.id
JOIN STG_CiviCRM.dbo.civicrm_contact con
ON con.id = cas_con.contact_id
AND con.is_deleted = 0
WHERE i2b2ClinDataIntegration.dbo.isNullOrEmpty(scad_id_58) = 0
GROUP BY scad_id_58
HAVING COUNT(*) > 1
'''
)
def get_report_line(self, row):
return "- **{}**\r\n".format(get_contact_id_search_link(
row['scad_id'], row['scad_id']))
|
Duplicate SCAD ID in civi
|
Duplicate SCAD ID in civi
|
Python
|
mit
|
LCBRU/reporter,LCBRU/reporter
|
Duplicate SCAD ID in civi
|
#!/usr/bin/env python3
from reporter.reports import Report
from reporter import (
get_contact_id_search_link, RECIPIENT_SCAD_ADMIN
)
class DuplicateScadIdInCivi(Report):
def __init__(self):
super().__init__(
introduction=("The following SCAD IDs are "
"duplicated in CiviCRM: "),
recipients=[RECIPIENT_SCAD_ADMIN],
sql='''
SELECT scad_id_58 AS scad_id
FROM STG_CiviCRM.dbo.civicrm_value_scad_15 scad
JOIN STG_CiviCRM.dbo.civicrm_case cas
ON cas.id = scad.entity_id
AND cas.is_deleted = 0
AND cas.status_id IN (5, 8)
AND cas.case_type_id = 9
JOIN STG_CiviCRM.dbo.civicrm_case_contact cas_con
ON cas_con.case_id = cas.id
JOIN STG_CiviCRM.dbo.civicrm_contact con
ON con.id = cas_con.contact_id
AND con.is_deleted = 0
WHERE i2b2ClinDataIntegration.dbo.isNullOrEmpty(scad_id_58) = 0
GROUP BY scad_id_58
HAVING COUNT(*) > 1
'''
)
def get_report_line(self, row):
return "- **{}**\r\n".format(get_contact_id_search_link(
row['scad_id'], row['scad_id']))
|
<commit_before><commit_msg>Duplicate SCAD ID in civi<commit_after>
|
#!/usr/bin/env python3
from reporter.reports import Report
from reporter import (
get_contact_id_search_link, RECIPIENT_SCAD_ADMIN
)
class DuplicateScadIdInCivi(Report):
def __init__(self):
super().__init__(
introduction=("The following SCAD IDs are "
"duplicated in CiviCRM: "),
recipients=[RECIPIENT_SCAD_ADMIN],
sql='''
SELECT scad_id_58 AS scad_id
FROM STG_CiviCRM.dbo.civicrm_value_scad_15 scad
JOIN STG_CiviCRM.dbo.civicrm_case cas
ON cas.id = scad.entity_id
AND cas.is_deleted = 0
AND cas.status_id IN (5, 8)
AND cas.case_type_id = 9
JOIN STG_CiviCRM.dbo.civicrm_case_contact cas_con
ON cas_con.case_id = cas.id
JOIN STG_CiviCRM.dbo.civicrm_contact con
ON con.id = cas_con.contact_id
AND con.is_deleted = 0
WHERE i2b2ClinDataIntegration.dbo.isNullOrEmpty(scad_id_58) = 0
GROUP BY scad_id_58
HAVING COUNT(*) > 1
'''
)
def get_report_line(self, row):
return "- **{}**\r\n".format(get_contact_id_search_link(
row['scad_id'], row['scad_id']))
|
Duplicate SCAD ID in civi#!/usr/bin/env python3
from reporter.reports import Report
from reporter import (
get_contact_id_search_link, RECIPIENT_SCAD_ADMIN
)
class DuplicateScadIdInCivi(Report):
def __init__(self):
super().__init__(
introduction=("The following SCAD IDs are "
"duplicated in CiviCRM: "),
recipients=[RECIPIENT_SCAD_ADMIN],
sql='''
SELECT scad_id_58 AS scad_id
FROM STG_CiviCRM.dbo.civicrm_value_scad_15 scad
JOIN STG_CiviCRM.dbo.civicrm_case cas
ON cas.id = scad.entity_id
AND cas.is_deleted = 0
AND cas.status_id IN (5, 8)
AND cas.case_type_id = 9
JOIN STG_CiviCRM.dbo.civicrm_case_contact cas_con
ON cas_con.case_id = cas.id
JOIN STG_CiviCRM.dbo.civicrm_contact con
ON con.id = cas_con.contact_id
AND con.is_deleted = 0
WHERE i2b2ClinDataIntegration.dbo.isNullOrEmpty(scad_id_58) = 0
GROUP BY scad_id_58
HAVING COUNT(*) > 1
'''
)
def get_report_line(self, row):
return "- **{}**\r\n".format(get_contact_id_search_link(
row['scad_id'], row['scad_id']))
|
<commit_before><commit_msg>Duplicate SCAD ID in civi<commit_after>#!/usr/bin/env python3
from reporter.reports import Report
from reporter import (
get_contact_id_search_link, RECIPIENT_SCAD_ADMIN
)
class DuplicateScadIdInCivi(Report):
def __init__(self):
super().__init__(
introduction=("The following SCAD IDs are "
"duplicated in CiviCRM: "),
recipients=[RECIPIENT_SCAD_ADMIN],
sql='''
SELECT scad_id_58 AS scad_id
FROM STG_CiviCRM.dbo.civicrm_value_scad_15 scad
JOIN STG_CiviCRM.dbo.civicrm_case cas
ON cas.id = scad.entity_id
AND cas.is_deleted = 0
AND cas.status_id IN (5, 8)
AND cas.case_type_id = 9
JOIN STG_CiviCRM.dbo.civicrm_case_contact cas_con
ON cas_con.case_id = cas.id
JOIN STG_CiviCRM.dbo.civicrm_contact con
ON con.id = cas_con.contact_id
AND con.is_deleted = 0
WHERE i2b2ClinDataIntegration.dbo.isNullOrEmpty(scad_id_58) = 0
GROUP BY scad_id_58
HAVING COUNT(*) > 1
'''
)
def get_report_line(self, row):
return "- **{}**\r\n".format(get_contact_id_search_link(
row['scad_id'], row['scad_id']))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.