commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
77340cfe9580ec8510f4af9333cf1aba2d09e70b
|
wagtail/wagtailadmin/tests/test_password_reset.py
|
wagtail/wagtailadmin/tests/test_password_reset.py
|
from django.test import TestCase, override_settings
from django.core import mail
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Site
class TestUserPasswordReset(TestCase, WagtailTestUtils):
fixtures = ['test.json']
# need to clear urlresolver caches before/after tests, because we override ROOT_URLCONF
# in some tests here
def setUp(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
def tearDown(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
@override_settings(ROOT_URLCONF="wagtail.wagtailadmin.urls")
def test_email_found_default_url(self):
response = self.client.post('/password_reset/', {'email': 'siteeditor@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("testserver", mail.outbox[0].body)
@override_settings(ROOT_URLCONF="wagtail.wagtailadmin.urls", BASE_URL='http://mysite.com')
def test_email_found_base_url(self):
response = self.client.post('/password_reset/', {'email': 'siteeditor@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("mysite.com", mail.outbox[0].body)
|
Add tests for the password reset emails
|
Add tests for the password reset emails
|
Python
|
bsd-3-clause
|
quru/wagtail,mixxorz/wagtail,timorieber/wagtail,nilnvoid/wagtail,gasman/wagtail,timorieber/wagtail,Klaudit/wagtail,nutztherookie/wagtail,wagtail/wagtail,marctc/wagtail,zerolab/wagtail,jorge-marques/wagtail,nrsimha/wagtail,mayapurmedia/wagtail,rjsproxy/wagtail,takeshineshiro/wagtail,bjesus/wagtail,kurtw/wagtail,iansprice/wagtail,kaedroho/wagtail,davecranwell/wagtail,Toshakins/wagtail,thenewguy/wagtail,iansprice/wagtail,Pennebaker/wagtail,rsalmaso/wagtail,chrxr/wagtail,mjec/wagtail,bjesus/wagtail,WQuanfeng/wagtail,KimGlazebrook/wagtail-experiment,jordij/wagtail,jorge-marques/wagtail,inonit/wagtail,thenewguy/wagtail,davecranwell/wagtail,taedori81/wagtail,Tivix/wagtail,Toshakins/wagtail,stevenewey/wagtail,inonit/wagtail,Klaudit/wagtail,FlipperPA/wagtail,janusnic/wagtail,takeshineshiro/wagtail,zerolab/wagtail,iho/wagtail,iansprice/wagtail,darith27/wagtail,tangentlabs/wagtail,mjec/wagtail,takeflight/wagtail,jnns/wagtail,nrsimha/wagtail,WQuanfeng/wagtail,WQuanfeng/wagtail,JoshBarr/wagtail,taedori81/wagtail,FlipperPA/wagtail,gogobook/wagtail,hanpama/wagtail,quru/wagtail,rjsproxy/wagtail,Tivix/wagtail,rjsproxy/wagtail,nimasmi/wagtail,thenewguy/wagtail,wagtail/wagtail,rsalmaso/wagtail,nutztherookie/wagtail,nutztherookie/wagtail,iho/wagtail,tangentlabs/wagtail,jordij/wagtail,mjec/wagtail,tangentlabs/wagtail,dresiu/wagtail,rv816/wagtail,wagtail/wagtail,takeflight/wagtail,nilnvoid/wagtail,zerolab/wagtail,iansprice/wagtail,rjsproxy/wagtail,timorieber/wagtail,thenewguy/wagtail,nealtodd/wagtail,kurtrwall/wagtail,Pennebaker/wagtail,inonit/wagtail,takeshineshiro/wagtail,mixxorz/wagtail,tangentlabs/wagtail,FlipperPA/wagtail,kurtrwall/wagtail,hanpama/wagtail,hanpama/wagtail,taedori81/wagtail,serzans/wagtail,inonit/wagtail,nutztherookie/wagtail,Toshakins/wagtail,hamsterbacke23/wagtail,torchbox/wagtail,rsalmaso/wagtail,mayapurmedia/wagtail,dresiu/wagtail,serzans/wagtail,mixxorz/wagtail,Tivix/wagtail,mikedingjan/wagtail,mixxorz/wagtail,KimGlazebrook/wagtail-experiment,bjesus/wagtail,jorge-marques/wagtail,chimeno/wagtail,KimGlazebrook/wagtail-experiment,rv816/wagtail,rv816/wagtail,stevenewey/wagtail,dresiu/wagtail,kaedroho/wagtail,kaedroho/wagtail,kaedroho/wagtail,WQuanfeng/wagtail,chimeno/wagtail,gasman/wagtail,jorge-marques/wagtail,JoshBarr/wagtail,mjec/wagtail,jnns/wagtail,m-sanders/wagtail,davecranwell/wagtail,mikedingjan/wagtail,bjesus/wagtail,m-sanders/wagtail,darith27/wagtail,gasman/wagtail,mayapurmedia/wagtail,mikedingjan/wagtail,hamsterbacke23/wagtail,torchbox/wagtail,nilnvoid/wagtail,marctc/wagtail,zerolab/wagtail,serzans/wagtail,m-sanders/wagtail,FlipperPA/wagtail,janusnic/wagtail,darith27/wagtail,nrsimha/wagtail,kurtrwall/wagtail,jordij/wagtail,iho/wagtail,jnns/wagtail,nimasmi/wagtail,takeshineshiro/wagtail,janusnic/wagtail,mephizzle/wagtail,stevenewey/wagtail,janusnic/wagtail,kurtrwall/wagtail,wagtail/wagtail,hamsterbacke23/wagtail,rsalmaso/wagtail,chimeno/wagtail,kurtw/wagtail,torchbox/wagtail,mephizzle/wagtail,serzans/wagtail,taedori81/wagtail,marctc/wagtail,takeflight/wagtail,dresiu/wagtail,kurtw/wagtail,gasman/wagtail,Pennebaker/wagtail,thenewguy/wagtail,nealtodd/wagtail,torchbox/wagtail,KimGlazebrook/wagtail-experiment,nimasmi/wagtail,quru/wagtail,dresiu/wagtail,hanpama/wagtail,chrxr/wagtail,Pennebaker/wagtail,mayapurmedia/wagtail,mixxorz/wagtail,timorieber/wagtail,rsalmaso/wagtail,gasman/wagtail,marctc/wagtail,gogobook/wagtail,nrsimha/wagtail,wagtail/wagtail,hamsterbacke23/wagtail,rv816/wagtail,taedori81/wagtail,jnns/wagtail,mephizzle/wagtail,stevenewey/wagtail,davecranwell/wagtail,jorge-marques/wagtail,zerolab/wagtail,nilnvoid/wagtail,mephizzle/wagtail,JoshBarr/wagtail,takeflight/wagtail,darith27/wagtail,kaedroho/wagtail,quru/wagtail,chimeno/wagtail,JoshBarr/wagtail,Klaudit/wagtail,chrxr/wagtail,gogobook/wagtail,chrxr/wagtail,Toshakins/wagtail,Klaudit/wagtail,mikedingjan/wagtail,kurtw/wagtail,nimasmi/wagtail,m-sanders/wagtail,jordij/wagtail,nealtodd/wagtail,chimeno/wagtail,gogobook/wagtail,Tivix/wagtail,iho/wagtail,nealtodd/wagtail
|
Add tests for the password reset emails
|
from django.test import TestCase, override_settings
from django.core import mail
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Site
class TestUserPasswordReset(TestCase, WagtailTestUtils):
fixtures = ['test.json']
# need to clear urlresolver caches before/after tests, because we override ROOT_URLCONF
# in some tests here
def setUp(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
def tearDown(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
@override_settings(ROOT_URLCONF="wagtail.wagtailadmin.urls")
def test_email_found_default_url(self):
response = self.client.post('/password_reset/', {'email': 'siteeditor@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("testserver", mail.outbox[0].body)
@override_settings(ROOT_URLCONF="wagtail.wagtailadmin.urls", BASE_URL='http://mysite.com')
def test_email_found_base_url(self):
response = self.client.post('/password_reset/', {'email': 'siteeditor@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("mysite.com", mail.outbox[0].body)
|
<commit_before><commit_msg>Add tests for the password reset emails<commit_after>
|
from django.test import TestCase, override_settings
from django.core import mail
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Site
class TestUserPasswordReset(TestCase, WagtailTestUtils):
fixtures = ['test.json']
# need to clear urlresolver caches before/after tests, because we override ROOT_URLCONF
# in some tests here
def setUp(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
def tearDown(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
@override_settings(ROOT_URLCONF="wagtail.wagtailadmin.urls")
def test_email_found_default_url(self):
response = self.client.post('/password_reset/', {'email': 'siteeditor@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("testserver", mail.outbox[0].body)
@override_settings(ROOT_URLCONF="wagtail.wagtailadmin.urls", BASE_URL='http://mysite.com')
def test_email_found_base_url(self):
response = self.client.post('/password_reset/', {'email': 'siteeditor@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("mysite.com", mail.outbox[0].body)
|
Add tests for the password reset emailsfrom django.test import TestCase, override_settings
from django.core import mail
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Site
class TestUserPasswordReset(TestCase, WagtailTestUtils):
fixtures = ['test.json']
# need to clear urlresolver caches before/after tests, because we override ROOT_URLCONF
# in some tests here
def setUp(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
def tearDown(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
@override_settings(ROOT_URLCONF="wagtail.wagtailadmin.urls")
def test_email_found_default_url(self):
response = self.client.post('/password_reset/', {'email': 'siteeditor@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("testserver", mail.outbox[0].body)
@override_settings(ROOT_URLCONF="wagtail.wagtailadmin.urls", BASE_URL='http://mysite.com')
def test_email_found_base_url(self):
response = self.client.post('/password_reset/', {'email': 'siteeditor@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("mysite.com", mail.outbox[0].body)
|
<commit_before><commit_msg>Add tests for the password reset emails<commit_after>from django.test import TestCase, override_settings
from django.core import mail
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Site
class TestUserPasswordReset(TestCase, WagtailTestUtils):
fixtures = ['test.json']
# need to clear urlresolver caches before/after tests, because we override ROOT_URLCONF
# in some tests here
def setUp(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
def tearDown(self):
from django.core.urlresolvers import clear_url_caches
clear_url_caches()
@override_settings(ROOT_URLCONF="wagtail.wagtailadmin.urls")
def test_email_found_default_url(self):
response = self.client.post('/password_reset/', {'email': 'siteeditor@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("testserver", mail.outbox[0].body)
@override_settings(ROOT_URLCONF="wagtail.wagtailadmin.urls", BASE_URL='http://mysite.com')
def test_email_found_base_url(self):
response = self.client.post('/password_reset/', {'email': 'siteeditor@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("mysite.com", mail.outbox[0].body)
|
|
5cfb870134d40ecc2330ac2ff0b98cea62232c8d
|
neurodsp/sim/decorators.py
|
neurodsp/sim/decorators.py
|
"""Decorators for neurodsp.sim."""
from functools import wraps
from neurodsp.sim.utils import demean, normalize_variance
###################################################################################################
###################################################################################################
def normalize(func, **kwargs):
@wraps(func)
def decorated(*args, **kwargs):
# Grab variance & mean as possible kwargs, with default values if not
variance = kwargs.pop('variance', 1.)
mean = kwargs.pop('mean', 0.)
# Call sim function, and unpack to get sig variable, if there are multiple returns
out = func(*args, **kwargs)
sig = out[0] if isinstance(out, tuple) else out
# Apply variance & mean transformations
if variance is not None:
sig = normalize_variance(sig, variance=variance)
if mean is not None:
sig = demean(sig, mean=mean)
# Return sig & other outputs, if there were any, or just sig otherwise
return (sig, out[1:]) if isinstance(out, tuple) else sig
return decorated
|
Add decorator to apply normalization
|
Add decorator to apply normalization
|
Python
|
apache-2.0
|
voytekresearch/neurodsp,srcole/neurodsp,srcole/neurodsp
|
Add decorator to apply normalization
|
"""Decorators for neurodsp.sim."""
from functools import wraps
from neurodsp.sim.utils import demean, normalize_variance
###################################################################################################
###################################################################################################
def normalize(func, **kwargs):
@wraps(func)
def decorated(*args, **kwargs):
# Grab variance & mean as possible kwargs, with default values if not
variance = kwargs.pop('variance', 1.)
mean = kwargs.pop('mean', 0.)
# Call sim function, and unpack to get sig variable, if there are multiple returns
out = func(*args, **kwargs)
sig = out[0] if isinstance(out, tuple) else out
# Apply variance & mean transformations
if variance is not None:
sig = normalize_variance(sig, variance=variance)
if mean is not None:
sig = demean(sig, mean=mean)
# Return sig & other outputs, if there were any, or just sig otherwise
return (sig, out[1:]) if isinstance(out, tuple) else sig
return decorated
|
<commit_before><commit_msg>Add decorator to apply normalization<commit_after>
|
"""Decorators for neurodsp.sim."""
from functools import wraps
from neurodsp.sim.utils import demean, normalize_variance
###################################################################################################
###################################################################################################
def normalize(func, **kwargs):
@wraps(func)
def decorated(*args, **kwargs):
# Grab variance & mean as possible kwargs, with default values if not
variance = kwargs.pop('variance', 1.)
mean = kwargs.pop('mean', 0.)
# Call sim function, and unpack to get sig variable, if there are multiple returns
out = func(*args, **kwargs)
sig = out[0] if isinstance(out, tuple) else out
# Apply variance & mean transformations
if variance is not None:
sig = normalize_variance(sig, variance=variance)
if mean is not None:
sig = demean(sig, mean=mean)
# Return sig & other outputs, if there were any, or just sig otherwise
return (sig, out[1:]) if isinstance(out, tuple) else sig
return decorated
|
Add decorator to apply normalization"""Decorators for neurodsp.sim."""
from functools import wraps
from neurodsp.sim.utils import demean, normalize_variance
###################################################################################################
###################################################################################################
def normalize(func, **kwargs):
@wraps(func)
def decorated(*args, **kwargs):
# Grab variance & mean as possible kwargs, with default values if not
variance = kwargs.pop('variance', 1.)
mean = kwargs.pop('mean', 0.)
# Call sim function, and unpack to get sig variable, if there are multiple returns
out = func(*args, **kwargs)
sig = out[0] if isinstance(out, tuple) else out
# Apply variance & mean transformations
if variance is not None:
sig = normalize_variance(sig, variance=variance)
if mean is not None:
sig = demean(sig, mean=mean)
# Return sig & other outputs, if there were any, or just sig otherwise
return (sig, out[1:]) if isinstance(out, tuple) else sig
return decorated
|
<commit_before><commit_msg>Add decorator to apply normalization<commit_after>"""Decorators for neurodsp.sim."""
from functools import wraps
from neurodsp.sim.utils import demean, normalize_variance
###################################################################################################
###################################################################################################
def normalize(func, **kwargs):
@wraps(func)
def decorated(*args, **kwargs):
# Grab variance & mean as possible kwargs, with default values if not
variance = kwargs.pop('variance', 1.)
mean = kwargs.pop('mean', 0.)
# Call sim function, and unpack to get sig variable, if there are multiple returns
out = func(*args, **kwargs)
sig = out[0] if isinstance(out, tuple) else out
# Apply variance & mean transformations
if variance is not None:
sig = normalize_variance(sig, variance=variance)
if mean is not None:
sig = demean(sig, mean=mean)
# Return sig & other outputs, if there were any, or just sig otherwise
return (sig, out[1:]) if isinstance(out, tuple) else sig
return decorated
|
|
a4e4a67b631083535a413d56aaa1afff3b88a67f
|
pymt/framework/bmi_plot.py
|
pymt/framework/bmi_plot.py
|
#! /usr/bin/env python
import matplotlib.pyplot as plt
def quick_plot(bmi, name, **kwds):
gid = bmi.get_var_grid(name)
gtype = bmi.get_grid_type(gid)
grid = bmi.grid[gid]
x, y = grid.node_x.values, grid.node_y.values
z = bmi.get_value(name)
x_label = '{name} ({units})'.format(name=grid.node_x.standard_name,
units=grid.node_x.units)
y_label = '{name} ({units})'.format(name=grid.node_y.standard_name,
units=grid.node_y.units)
if gtype in ('unstructured_triangular', ):
tris = bmi.get_grid_face_node_connectivity(gid).reshape((-1, 3))
plt.tripcolor(x, y, tris, z, **kwds)
elif gtype in ('uniform_rectilinear', 'structured_quad'):
shape = bmi.get_grid_shape(gid)
plt.pcolormesh(x, y, z.reshape(shape), **kwds)
else:
raise ValueError('no plotter for {gtype}'.format(gtype=gtype))
plt.gca().set_aspect('equal')
plt.xlabel(x_label)
plt.ylabel(y_label)
cbar = plt.colorbar()
cbar.ax.set_ylabel(
'{name} ({units})'.format(name=name, units=bmi.get_var_units(name)))
|
Add module for quick plotting of bmi values.
|
Add module for quick plotting of bmi values.
|
Python
|
mit
|
csdms/coupling,csdms/coupling,csdms/pymt
|
Add module for quick plotting of bmi values.
|
#! /usr/bin/env python
import matplotlib.pyplot as plt
def quick_plot(bmi, name, **kwds):
gid = bmi.get_var_grid(name)
gtype = bmi.get_grid_type(gid)
grid = bmi.grid[gid]
x, y = grid.node_x.values, grid.node_y.values
z = bmi.get_value(name)
x_label = '{name} ({units})'.format(name=grid.node_x.standard_name,
units=grid.node_x.units)
y_label = '{name} ({units})'.format(name=grid.node_y.standard_name,
units=grid.node_y.units)
if gtype in ('unstructured_triangular', ):
tris = bmi.get_grid_face_node_connectivity(gid).reshape((-1, 3))
plt.tripcolor(x, y, tris, z, **kwds)
elif gtype in ('uniform_rectilinear', 'structured_quad'):
shape = bmi.get_grid_shape(gid)
plt.pcolormesh(x, y, z.reshape(shape), **kwds)
else:
raise ValueError('no plotter for {gtype}'.format(gtype=gtype))
plt.gca().set_aspect('equal')
plt.xlabel(x_label)
plt.ylabel(y_label)
cbar = plt.colorbar()
cbar.ax.set_ylabel(
'{name} ({units})'.format(name=name, units=bmi.get_var_units(name)))
|
<commit_before><commit_msg>Add module for quick plotting of bmi values.<commit_after>
|
#! /usr/bin/env python
import matplotlib.pyplot as plt
def quick_plot(bmi, name, **kwds):
gid = bmi.get_var_grid(name)
gtype = bmi.get_grid_type(gid)
grid = bmi.grid[gid]
x, y = grid.node_x.values, grid.node_y.values
z = bmi.get_value(name)
x_label = '{name} ({units})'.format(name=grid.node_x.standard_name,
units=grid.node_x.units)
y_label = '{name} ({units})'.format(name=grid.node_y.standard_name,
units=grid.node_y.units)
if gtype in ('unstructured_triangular', ):
tris = bmi.get_grid_face_node_connectivity(gid).reshape((-1, 3))
plt.tripcolor(x, y, tris, z, **kwds)
elif gtype in ('uniform_rectilinear', 'structured_quad'):
shape = bmi.get_grid_shape(gid)
plt.pcolormesh(x, y, z.reshape(shape), **kwds)
else:
raise ValueError('no plotter for {gtype}'.format(gtype=gtype))
plt.gca().set_aspect('equal')
plt.xlabel(x_label)
plt.ylabel(y_label)
cbar = plt.colorbar()
cbar.ax.set_ylabel(
'{name} ({units})'.format(name=name, units=bmi.get_var_units(name)))
|
Add module for quick plotting of bmi values.#! /usr/bin/env python
import matplotlib.pyplot as plt
def quick_plot(bmi, name, **kwds):
gid = bmi.get_var_grid(name)
gtype = bmi.get_grid_type(gid)
grid = bmi.grid[gid]
x, y = grid.node_x.values, grid.node_y.values
z = bmi.get_value(name)
x_label = '{name} ({units})'.format(name=grid.node_x.standard_name,
units=grid.node_x.units)
y_label = '{name} ({units})'.format(name=grid.node_y.standard_name,
units=grid.node_y.units)
if gtype in ('unstructured_triangular', ):
tris = bmi.get_grid_face_node_connectivity(gid).reshape((-1, 3))
plt.tripcolor(x, y, tris, z, **kwds)
elif gtype in ('uniform_rectilinear', 'structured_quad'):
shape = bmi.get_grid_shape(gid)
plt.pcolormesh(x, y, z.reshape(shape), **kwds)
else:
raise ValueError('no plotter for {gtype}'.format(gtype=gtype))
plt.gca().set_aspect('equal')
plt.xlabel(x_label)
plt.ylabel(y_label)
cbar = plt.colorbar()
cbar.ax.set_ylabel(
'{name} ({units})'.format(name=name, units=bmi.get_var_units(name)))
|
<commit_before><commit_msg>Add module for quick plotting of bmi values.<commit_after>#! /usr/bin/env python
import matplotlib.pyplot as plt
def quick_plot(bmi, name, **kwds):
gid = bmi.get_var_grid(name)
gtype = bmi.get_grid_type(gid)
grid = bmi.grid[gid]
x, y = grid.node_x.values, grid.node_y.values
z = bmi.get_value(name)
x_label = '{name} ({units})'.format(name=grid.node_x.standard_name,
units=grid.node_x.units)
y_label = '{name} ({units})'.format(name=grid.node_y.standard_name,
units=grid.node_y.units)
if gtype in ('unstructured_triangular', ):
tris = bmi.get_grid_face_node_connectivity(gid).reshape((-1, 3))
plt.tripcolor(x, y, tris, z, **kwds)
elif gtype in ('uniform_rectilinear', 'structured_quad'):
shape = bmi.get_grid_shape(gid)
plt.pcolormesh(x, y, z.reshape(shape), **kwds)
else:
raise ValueError('no plotter for {gtype}'.format(gtype=gtype))
plt.gca().set_aspect('equal')
plt.xlabel(x_label)
plt.ylabel(y_label)
cbar = plt.colorbar()
cbar.ax.set_ylabel(
'{name} ({units})'.format(name=name, units=bmi.get_var_units(name)))
|
|
5e8d869c63154820f67308b4293a156b185fab1f
|
pyface/tests/test_python_editor.py
|
pyface/tests/test_python_editor.py
|
from __future__ import absolute_import
import os
import sys
from traits.testing.unittest_tools import unittest, UnittestTools
from ..gui import GUI
from ..python_editor import PythonEditor
from ..window import Window
PYTHON_SCRIPT = os.path.join(os.path.dirname(__file__), 'python_shell_script.py')
class TestPythonEditor(unittest.TestCase, UnittestTools):
def setUp(self):
self.gui = GUI()
self.window = Window()
self.window._create()
def tearDown(self):
self.widget.destroy()
self.window.destroy()
def test_lifecycle(self):
# test that destroy works
self.widget = PythonEditor(self.window.control)
self.gui.process_events()
self.assertFalse(self.widget.dirty)
self.widget.destroy()
self.gui.process_events()
def test_show_line_numbers(self):
# test that destroy works
self.widget = PythonEditor(self.window.control, show_line_numbers=False)
self.gui.process_events()
self.widget.show_line_numbers = True
self.gui.process_events()
self.widget.show_line_numbers = False
self.gui.process_events()
self.widget.destroy()
self.gui.process_events()
def test_load(self):
# test that destroy works
self.widget = PythonEditor(self.window.control)
self.gui.process_events()
with self.assertTraitChanges(self.widget, 'changed', count=1):
self.widget.path = PYTHON_SCRIPT
self.assertFalse(self.widget.dirty)
self.widget.destroy()
self.gui.process_events()
def test_select_line(self):
# test that destroy works
self.widget = PythonEditor(self.window.control, path=PYTHON_SCRIPT)
self.gui.process_events()
self.widget.select_line(3)
self.widget.destroy()
self.gui.process_events()
|
Add test for python editor.
|
Add test for python editor.
|
Python
|
bsd-3-clause
|
geggo/pyface,brett-patterson/pyface,geggo/pyface
|
Add test for python editor.
|
from __future__ import absolute_import
import os
import sys
from traits.testing.unittest_tools import unittest, UnittestTools
from ..gui import GUI
from ..python_editor import PythonEditor
from ..window import Window
PYTHON_SCRIPT = os.path.join(os.path.dirname(__file__), 'python_shell_script.py')
class TestPythonEditor(unittest.TestCase, UnittestTools):
def setUp(self):
self.gui = GUI()
self.window = Window()
self.window._create()
def tearDown(self):
self.widget.destroy()
self.window.destroy()
def test_lifecycle(self):
# test that destroy works
self.widget = PythonEditor(self.window.control)
self.gui.process_events()
self.assertFalse(self.widget.dirty)
self.widget.destroy()
self.gui.process_events()
def test_show_line_numbers(self):
# test that destroy works
self.widget = PythonEditor(self.window.control, show_line_numbers=False)
self.gui.process_events()
self.widget.show_line_numbers = True
self.gui.process_events()
self.widget.show_line_numbers = False
self.gui.process_events()
self.widget.destroy()
self.gui.process_events()
def test_load(self):
# test that destroy works
self.widget = PythonEditor(self.window.control)
self.gui.process_events()
with self.assertTraitChanges(self.widget, 'changed', count=1):
self.widget.path = PYTHON_SCRIPT
self.assertFalse(self.widget.dirty)
self.widget.destroy()
self.gui.process_events()
def test_select_line(self):
# test that destroy works
self.widget = PythonEditor(self.window.control, path=PYTHON_SCRIPT)
self.gui.process_events()
self.widget.select_line(3)
self.widget.destroy()
self.gui.process_events()
|
<commit_before><commit_msg>Add test for python editor.<commit_after>
|
from __future__ import absolute_import
import os
import sys
from traits.testing.unittest_tools import unittest, UnittestTools
from ..gui import GUI
from ..python_editor import PythonEditor
from ..window import Window
PYTHON_SCRIPT = os.path.join(os.path.dirname(__file__), 'python_shell_script.py')
class TestPythonEditor(unittest.TestCase, UnittestTools):
def setUp(self):
self.gui = GUI()
self.window = Window()
self.window._create()
def tearDown(self):
self.widget.destroy()
self.window.destroy()
def test_lifecycle(self):
# test that destroy works
self.widget = PythonEditor(self.window.control)
self.gui.process_events()
self.assertFalse(self.widget.dirty)
self.widget.destroy()
self.gui.process_events()
def test_show_line_numbers(self):
# test that destroy works
self.widget = PythonEditor(self.window.control, show_line_numbers=False)
self.gui.process_events()
self.widget.show_line_numbers = True
self.gui.process_events()
self.widget.show_line_numbers = False
self.gui.process_events()
self.widget.destroy()
self.gui.process_events()
def test_load(self):
# test that destroy works
self.widget = PythonEditor(self.window.control)
self.gui.process_events()
with self.assertTraitChanges(self.widget, 'changed', count=1):
self.widget.path = PYTHON_SCRIPT
self.assertFalse(self.widget.dirty)
self.widget.destroy()
self.gui.process_events()
def test_select_line(self):
# test that destroy works
self.widget = PythonEditor(self.window.control, path=PYTHON_SCRIPT)
self.gui.process_events()
self.widget.select_line(3)
self.widget.destroy()
self.gui.process_events()
|
Add test for python editor.from __future__ import absolute_import
import os
import sys
from traits.testing.unittest_tools import unittest, UnittestTools
from ..gui import GUI
from ..python_editor import PythonEditor
from ..window import Window
PYTHON_SCRIPT = os.path.join(os.path.dirname(__file__), 'python_shell_script.py')
class TestPythonEditor(unittest.TestCase, UnittestTools):
def setUp(self):
self.gui = GUI()
self.window = Window()
self.window._create()
def tearDown(self):
self.widget.destroy()
self.window.destroy()
def test_lifecycle(self):
# test that destroy works
self.widget = PythonEditor(self.window.control)
self.gui.process_events()
self.assertFalse(self.widget.dirty)
self.widget.destroy()
self.gui.process_events()
def test_show_line_numbers(self):
# test that destroy works
self.widget = PythonEditor(self.window.control, show_line_numbers=False)
self.gui.process_events()
self.widget.show_line_numbers = True
self.gui.process_events()
self.widget.show_line_numbers = False
self.gui.process_events()
self.widget.destroy()
self.gui.process_events()
def test_load(self):
# test that destroy works
self.widget = PythonEditor(self.window.control)
self.gui.process_events()
with self.assertTraitChanges(self.widget, 'changed', count=1):
self.widget.path = PYTHON_SCRIPT
self.assertFalse(self.widget.dirty)
self.widget.destroy()
self.gui.process_events()
def test_select_line(self):
# test that destroy works
self.widget = PythonEditor(self.window.control, path=PYTHON_SCRIPT)
self.gui.process_events()
self.widget.select_line(3)
self.widget.destroy()
self.gui.process_events()
|
<commit_before><commit_msg>Add test for python editor.<commit_after>from __future__ import absolute_import
import os
import sys
from traits.testing.unittest_tools import unittest, UnittestTools
from ..gui import GUI
from ..python_editor import PythonEditor
from ..window import Window
PYTHON_SCRIPT = os.path.join(os.path.dirname(__file__), 'python_shell_script.py')
class TestPythonEditor(unittest.TestCase, UnittestTools):
def setUp(self):
self.gui = GUI()
self.window = Window()
self.window._create()
def tearDown(self):
self.widget.destroy()
self.window.destroy()
def test_lifecycle(self):
# test that destroy works
self.widget = PythonEditor(self.window.control)
self.gui.process_events()
self.assertFalse(self.widget.dirty)
self.widget.destroy()
self.gui.process_events()
def test_show_line_numbers(self):
# test that destroy works
self.widget = PythonEditor(self.window.control, show_line_numbers=False)
self.gui.process_events()
self.widget.show_line_numbers = True
self.gui.process_events()
self.widget.show_line_numbers = False
self.gui.process_events()
self.widget.destroy()
self.gui.process_events()
def test_load(self):
# test that destroy works
self.widget = PythonEditor(self.window.control)
self.gui.process_events()
with self.assertTraitChanges(self.widget, 'changed', count=1):
self.widget.path = PYTHON_SCRIPT
self.assertFalse(self.widget.dirty)
self.widget.destroy()
self.gui.process_events()
def test_select_line(self):
# test that destroy works
self.widget = PythonEditor(self.window.control, path=PYTHON_SCRIPT)
self.gui.process_events()
self.widget.select_line(3)
self.widget.destroy()
self.gui.process_events()
|
|
50aacf395fef416781b86bb0c09035f2d7cf129e
|
tests/test_crypto.py
|
tests/test_crypto.py
|
#!/usr/bin/env python
"""Tests for `tiingo` package."""
from unittest import TestCase
from tiingo import TiingoClient
# CRYPTO ENDPOINTS
class TestCryptoEndpoints(TestCase):
def setUp(self):
self._client = TiingoClient()
def test_crypto_metadata(self):
metadata = self._client.get_crypto_meta_data(ticker=['btcusd', 'fldcbtc'])
assert len(metadata) == 2
assert metadata[0].ticker == 'btcusd'
assert metadata[1].ticker == 'fldcbtc'
def test_crypto_top_of_book(self):
top_of_book = self._client.get_crypto_top_of_book(tickers=['btcusd', 'fldcbtc'],
endDate='2018-01-02', includeRawExchangeData=True)
assert 'topOfBookData' in top_of_book
assert len(top_of_book['topOfBookData']) == 2
assert 'exchangeData' in top_of_book
def test_crypto_price_history(self):
price_history = self._client.get_crypto_price_history(tickers=['btcusd', 'fldcbtc'],
startDate='2019-05-01', endDate='2019-05-02',
includeRawExchangeData=True, resampleFreq='daily')
assert len(price_history) == 2
assert len(price_history[0]['priceData']) in [6, 7]
assert 'exchangeData' in price_history[0]
|
Add tests for crypto endpoints
|
Add tests for crypto endpoints
|
Python
|
mit
|
hydrosquall/tiingo-python,hydrosquall/tiingo-python
|
Add tests for crypto endpoints
|
#!/usr/bin/env python
"""Tests for `tiingo` package."""
from unittest import TestCase
from tiingo import TiingoClient
# CRYPTO ENDPOINTS
class TestCryptoEndpoints(TestCase):
def setUp(self):
self._client = TiingoClient()
def test_crypto_metadata(self):
metadata = self._client.get_crypto_meta_data(ticker=['btcusd', 'fldcbtc'])
assert len(metadata) == 2
assert metadata[0].ticker == 'btcusd'
assert metadata[1].ticker == 'fldcbtc'
def test_crypto_top_of_book(self):
top_of_book = self._client.get_crypto_top_of_book(tickers=['btcusd', 'fldcbtc'],
endDate='2018-01-02', includeRawExchangeData=True)
assert 'topOfBookData' in top_of_book
assert len(top_of_book['topOfBookData']) == 2
assert 'exchangeData' in top_of_book
def test_crypto_price_history(self):
price_history = self._client.get_crypto_price_history(tickers=['btcusd', 'fldcbtc'],
startDate='2019-05-01', endDate='2019-05-02',
includeRawExchangeData=True, resampleFreq='daily')
assert len(price_history) == 2
assert len(price_history[0]['priceData']) in [6, 7]
assert 'exchangeData' in price_history[0]
|
<commit_before><commit_msg>Add tests for crypto endpoints<commit_after>
|
#!/usr/bin/env python
"""Tests for `tiingo` package."""
from unittest import TestCase
from tiingo import TiingoClient
# CRYPTO ENDPOINTS
class TestCryptoEndpoints(TestCase):
def setUp(self):
self._client = TiingoClient()
def test_crypto_metadata(self):
metadata = self._client.get_crypto_meta_data(ticker=['btcusd', 'fldcbtc'])
assert len(metadata) == 2
assert metadata[0].ticker == 'btcusd'
assert metadata[1].ticker == 'fldcbtc'
def test_crypto_top_of_book(self):
top_of_book = self._client.get_crypto_top_of_book(tickers=['btcusd', 'fldcbtc'],
endDate='2018-01-02', includeRawExchangeData=True)
assert 'topOfBookData' in top_of_book
assert len(top_of_book['topOfBookData']) == 2
assert 'exchangeData' in top_of_book
def test_crypto_price_history(self):
price_history = self._client.get_crypto_price_history(tickers=['btcusd', 'fldcbtc'],
startDate='2019-05-01', endDate='2019-05-02',
includeRawExchangeData=True, resampleFreq='daily')
assert len(price_history) == 2
assert len(price_history[0]['priceData']) in [6, 7]
assert 'exchangeData' in price_history[0]
|
Add tests for crypto endpoints#!/usr/bin/env python
"""Tests for `tiingo` package."""
from unittest import TestCase
from tiingo import TiingoClient
# CRYPTO ENDPOINTS
class TestCryptoEndpoints(TestCase):
def setUp(self):
self._client = TiingoClient()
def test_crypto_metadata(self):
metadata = self._client.get_crypto_meta_data(ticker=['btcusd', 'fldcbtc'])
assert len(metadata) == 2
assert metadata[0].ticker == 'btcusd'
assert metadata[1].ticker == 'fldcbtc'
def test_crypto_top_of_book(self):
top_of_book = self._client.get_crypto_top_of_book(tickers=['btcusd', 'fldcbtc'],
endDate='2018-01-02', includeRawExchangeData=True)
assert 'topOfBookData' in top_of_book
assert len(top_of_book['topOfBookData']) == 2
assert 'exchangeData' in top_of_book
def test_crypto_price_history(self):
price_history = self._client.get_crypto_price_history(tickers=['btcusd', 'fldcbtc'],
startDate='2019-05-01', endDate='2019-05-02',
includeRawExchangeData=True, resampleFreq='daily')
assert len(price_history) == 2
assert len(price_history[0]['priceData']) in [6, 7]
assert 'exchangeData' in price_history[0]
|
<commit_before><commit_msg>Add tests for crypto endpoints<commit_after>#!/usr/bin/env python
"""Tests for `tiingo` package."""
from unittest import TestCase
from tiingo import TiingoClient
# CRYPTO ENDPOINTS
class TestCryptoEndpoints(TestCase):
def setUp(self):
self._client = TiingoClient()
def test_crypto_metadata(self):
metadata = self._client.get_crypto_meta_data(ticker=['btcusd', 'fldcbtc'])
assert len(metadata) == 2
assert metadata[0].ticker == 'btcusd'
assert metadata[1].ticker == 'fldcbtc'
def test_crypto_top_of_book(self):
top_of_book = self._client.get_crypto_top_of_book(tickers=['btcusd', 'fldcbtc'],
endDate='2018-01-02', includeRawExchangeData=True)
assert 'topOfBookData' in top_of_book
assert len(top_of_book['topOfBookData']) == 2
assert 'exchangeData' in top_of_book
def test_crypto_price_history(self):
price_history = self._client.get_crypto_price_history(tickers=['btcusd', 'fldcbtc'],
startDate='2019-05-01', endDate='2019-05-02',
includeRawExchangeData=True, resampleFreq='daily')
assert len(price_history) == 2
assert len(price_history[0]['priceData']) in [6, 7]
assert 'exchangeData' in price_history[0]
|
|
d2d3b27822d5773302f2e1cd28de8b231becb5ea
|
ports/raspberrypi/boards/solderparty_rp2040_stamp/stamp_round_carrier_board.py
|
ports/raspberrypi/boards/solderparty_rp2040_stamp/stamp_round_carrier_board.py
|
from board import *
import busio
_SPI = None
_UART = None
_I2C = None
D0 = GP0
SDA = D0
D1 = GP1
SCL = D1
D8 = GP8
CIPO = D8
MISO = D8
D9 = GP9
CS = D9
D10 = GP10
SCK = D10
D11 = GP11
COPI = D11
MOSI = D11
D14 = GP14
D15 = GP15
D16 = GP16
TX = D16
D17 = GP17
RX = D17
D26 = GP26
A0 = D26
D27 = GP27
A1 = D27
D28 = GP28
A2 = D28
D29 = GP29
A3 = D29
D24 = GP24
NEOPIXEL = D24
D25 = GP25
LED = D25
def SPI():
global _SPI
if not _SPI:
_SPI = busio.SPI(SCK, COPI, CIPO)
return _SPI
def UART():
global _UART
if not _UART:
_UART = busio.UART(TX, RX)
return _UART
def I2C():
global _I2C
if not _I2C:
_I2C = busio.I2C(SCL, SDA)
return _I2C
|
Add a Round Carrier board file to the RP2040 Stamp build
|
Add a Round Carrier board file to the RP2040 Stamp build
|
Python
|
mit
|
adafruit/circuitpython,adafruit/circuitpython,adafruit/circuitpython,adafruit/circuitpython,adafruit/circuitpython,adafruit/circuitpython
|
Add a Round Carrier board file to the RP2040 Stamp build
|
from board import *
import busio
_SPI = None
_UART = None
_I2C = None
D0 = GP0
SDA = D0
D1 = GP1
SCL = D1
D8 = GP8
CIPO = D8
MISO = D8
D9 = GP9
CS = D9
D10 = GP10
SCK = D10
D11 = GP11
COPI = D11
MOSI = D11
D14 = GP14
D15 = GP15
D16 = GP16
TX = D16
D17 = GP17
RX = D17
D26 = GP26
A0 = D26
D27 = GP27
A1 = D27
D28 = GP28
A2 = D28
D29 = GP29
A3 = D29
D24 = GP24
NEOPIXEL = D24
D25 = GP25
LED = D25
def SPI():
global _SPI
if not _SPI:
_SPI = busio.SPI(SCK, COPI, CIPO)
return _SPI
def UART():
global _UART
if not _UART:
_UART = busio.UART(TX, RX)
return _UART
def I2C():
global _I2C
if not _I2C:
_I2C = busio.I2C(SCL, SDA)
return _I2C
|
<commit_before><commit_msg>Add a Round Carrier board file to the RP2040 Stamp build<commit_after>
|
from board import *
import busio
_SPI = None
_UART = None
_I2C = None
D0 = GP0
SDA = D0
D1 = GP1
SCL = D1
D8 = GP8
CIPO = D8
MISO = D8
D9 = GP9
CS = D9
D10 = GP10
SCK = D10
D11 = GP11
COPI = D11
MOSI = D11
D14 = GP14
D15 = GP15
D16 = GP16
TX = D16
D17 = GP17
RX = D17
D26 = GP26
A0 = D26
D27 = GP27
A1 = D27
D28 = GP28
A2 = D28
D29 = GP29
A3 = D29
D24 = GP24
NEOPIXEL = D24
D25 = GP25
LED = D25
def SPI():
global _SPI
if not _SPI:
_SPI = busio.SPI(SCK, COPI, CIPO)
return _SPI
def UART():
global _UART
if not _UART:
_UART = busio.UART(TX, RX)
return _UART
def I2C():
global _I2C
if not _I2C:
_I2C = busio.I2C(SCL, SDA)
return _I2C
|
Add a Round Carrier board file to the RP2040 Stamp buildfrom board import *
import busio
_SPI = None
_UART = None
_I2C = None
D0 = GP0
SDA = D0
D1 = GP1
SCL = D1
D8 = GP8
CIPO = D8
MISO = D8
D9 = GP9
CS = D9
D10 = GP10
SCK = D10
D11 = GP11
COPI = D11
MOSI = D11
D14 = GP14
D15 = GP15
D16 = GP16
TX = D16
D17 = GP17
RX = D17
D26 = GP26
A0 = D26
D27 = GP27
A1 = D27
D28 = GP28
A2 = D28
D29 = GP29
A3 = D29
D24 = GP24
NEOPIXEL = D24
D25 = GP25
LED = D25
def SPI():
global _SPI
if not _SPI:
_SPI = busio.SPI(SCK, COPI, CIPO)
return _SPI
def UART():
global _UART
if not _UART:
_UART = busio.UART(TX, RX)
return _UART
def I2C():
global _I2C
if not _I2C:
_I2C = busio.I2C(SCL, SDA)
return _I2C
|
<commit_before><commit_msg>Add a Round Carrier board file to the RP2040 Stamp build<commit_after>from board import *
import busio
_SPI = None
_UART = None
_I2C = None
D0 = GP0
SDA = D0
D1 = GP1
SCL = D1
D8 = GP8
CIPO = D8
MISO = D8
D9 = GP9
CS = D9
D10 = GP10
SCK = D10
D11 = GP11
COPI = D11
MOSI = D11
D14 = GP14
D15 = GP15
D16 = GP16
TX = D16
D17 = GP17
RX = D17
D26 = GP26
A0 = D26
D27 = GP27
A1 = D27
D28 = GP28
A2 = D28
D29 = GP29
A3 = D29
D24 = GP24
NEOPIXEL = D24
D25 = GP25
LED = D25
def SPI():
global _SPI
if not _SPI:
_SPI = busio.SPI(SCK, COPI, CIPO)
return _SPI
def UART():
global _UART
if not _UART:
_UART = busio.UART(TX, RX)
return _UART
def I2C():
global _I2C
if not _I2C:
_I2C = busio.I2C(SCL, SDA)
return _I2C
|
|
f94d5b3c4eb3fff94c98e9efcdffdafa89f4841b
|
test/vis_data.py
|
test/vis_data.py
|
''' Generate image and ground truth from the scene, for development purpose '''
from unrealcv import client
import numpy as np
import matplotlib.pyplot as plt
def read_png(res):
import StringIO, PIL.Image
img = PIL.Image.open(StringIO.StringIO(res))
return np.asarray(img)
def read_npy(res):
import StringIO
return np.load(StringIO.StringIO(res))
def request(cmd):
res = client.request(cmd)
if res.startswith('error'):
print(res)
return res
if __name__ == '__main__':
client.connect()
print('Client is connected')
# Save lit png
res = request('vget /camera/0/lit png')
lit_png = read_png(res)
print(lit_png.shape)
plt.imsave('lit.png', lit_png)
res = request('vget /camera/0/normal png')
normal_png = read_png(res)
print(normal_png.shape)
plt.imsave('normal.png', normal_png)
res = request('vget /camera/0/depth npy')
depth_npy = read_npy(res)
print(depth_npy.shape)
np.save('depth.npy', depth_npy)
res = request('vget /camera/0/normal npy')
normal_npy = read_npy(res)
print(normal_npy.shape)
np.save('normal.npy', normal_npy)
|
Add a script to generate data for helping development.
|
Add a script to generate data for helping development.
|
Python
|
mit
|
unrealcv/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv
|
Add a script to generate data for helping development.
|
''' Generate image and ground truth from the scene, for development purpose '''
from unrealcv import client
import numpy as np
import matplotlib.pyplot as plt
def read_png(res):
import StringIO, PIL.Image
img = PIL.Image.open(StringIO.StringIO(res))
return np.asarray(img)
def read_npy(res):
import StringIO
return np.load(StringIO.StringIO(res))
def request(cmd):
res = client.request(cmd)
if res.startswith('error'):
print(res)
return res
if __name__ == '__main__':
client.connect()
print('Client is connected')
# Save lit png
res = request('vget /camera/0/lit png')
lit_png = read_png(res)
print(lit_png.shape)
plt.imsave('lit.png', lit_png)
res = request('vget /camera/0/normal png')
normal_png = read_png(res)
print(normal_png.shape)
plt.imsave('normal.png', normal_png)
res = request('vget /camera/0/depth npy')
depth_npy = read_npy(res)
print(depth_npy.shape)
np.save('depth.npy', depth_npy)
res = request('vget /camera/0/normal npy')
normal_npy = read_npy(res)
print(normal_npy.shape)
np.save('normal.npy', normal_npy)
|
<commit_before><commit_msg>Add a script to generate data for helping development.<commit_after>
|
''' Generate image and ground truth from the scene, for development purpose '''
from unrealcv import client
import numpy as np
import matplotlib.pyplot as plt
def read_png(res):
import StringIO, PIL.Image
img = PIL.Image.open(StringIO.StringIO(res))
return np.asarray(img)
def read_npy(res):
import StringIO
return np.load(StringIO.StringIO(res))
def request(cmd):
res = client.request(cmd)
if res.startswith('error'):
print(res)
return res
if __name__ == '__main__':
client.connect()
print('Client is connected')
# Save lit png
res = request('vget /camera/0/lit png')
lit_png = read_png(res)
print(lit_png.shape)
plt.imsave('lit.png', lit_png)
res = request('vget /camera/0/normal png')
normal_png = read_png(res)
print(normal_png.shape)
plt.imsave('normal.png', normal_png)
res = request('vget /camera/0/depth npy')
depth_npy = read_npy(res)
print(depth_npy.shape)
np.save('depth.npy', depth_npy)
res = request('vget /camera/0/normal npy')
normal_npy = read_npy(res)
print(normal_npy.shape)
np.save('normal.npy', normal_npy)
|
Add a script to generate data for helping development.''' Generate image and ground truth from the scene, for development purpose '''
from unrealcv import client
import numpy as np
import matplotlib.pyplot as plt
def read_png(res):
import StringIO, PIL.Image
img = PIL.Image.open(StringIO.StringIO(res))
return np.asarray(img)
def read_npy(res):
import StringIO
return np.load(StringIO.StringIO(res))
def request(cmd):
res = client.request(cmd)
if res.startswith('error'):
print(res)
return res
if __name__ == '__main__':
client.connect()
print('Client is connected')
# Save lit png
res = request('vget /camera/0/lit png')
lit_png = read_png(res)
print(lit_png.shape)
plt.imsave('lit.png', lit_png)
res = request('vget /camera/0/normal png')
normal_png = read_png(res)
print(normal_png.shape)
plt.imsave('normal.png', normal_png)
res = request('vget /camera/0/depth npy')
depth_npy = read_npy(res)
print(depth_npy.shape)
np.save('depth.npy', depth_npy)
res = request('vget /camera/0/normal npy')
normal_npy = read_npy(res)
print(normal_npy.shape)
np.save('normal.npy', normal_npy)
|
<commit_before><commit_msg>Add a script to generate data for helping development.<commit_after>''' Generate image and ground truth from the scene, for development purpose '''
from unrealcv import client
import numpy as np
import matplotlib.pyplot as plt
def read_png(res):
import StringIO, PIL.Image
img = PIL.Image.open(StringIO.StringIO(res))
return np.asarray(img)
def read_npy(res):
import StringIO
return np.load(StringIO.StringIO(res))
def request(cmd):
res = client.request(cmd)
if res.startswith('error'):
print(res)
return res
if __name__ == '__main__':
client.connect()
print('Client is connected')
# Save lit png
res = request('vget /camera/0/lit png')
lit_png = read_png(res)
print(lit_png.shape)
plt.imsave('lit.png', lit_png)
res = request('vget /camera/0/normal png')
normal_png = read_png(res)
print(normal_png.shape)
plt.imsave('normal.png', normal_png)
res = request('vget /camera/0/depth npy')
depth_npy = read_npy(res)
print(depth_npy.shape)
np.save('depth.npy', depth_npy)
res = request('vget /camera/0/normal npy')
normal_npy = read_npy(res)
print(normal_npy.shape)
np.save('normal.npy', normal_npy)
|
|
af712fedf4963d66f56f5ab9054318c493572ab1
|
src/oscar/apps/dashboard/shipping/forms.py
|
src/oscar/apps/dashboard/shipping/forms.py
|
from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['name', 'description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
|
from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
|
Remove non-existing field from WeightBasedForm
|
Remove non-existing field from WeightBasedForm
Django 1.10 errors (correctly) on this
|
Python
|
bsd-3-clause
|
okfish/django-oscar,sonofatailor/django-oscar,solarissmoke/django-oscar,sasha0/django-oscar,anentropic/django-oscar,anentropic/django-oscar,solarissmoke/django-oscar,john-parton/django-oscar,sonofatailor/django-oscar,okfish/django-oscar,django-oscar/django-oscar,john-parton/django-oscar,solarissmoke/django-oscar,solarissmoke/django-oscar,sonofatailor/django-oscar,john-parton/django-oscar,john-parton/django-oscar,sasha0/django-oscar,anentropic/django-oscar,okfish/django-oscar,django-oscar/django-oscar,sasha0/django-oscar,django-oscar/django-oscar,anentropic/django-oscar,sonofatailor/django-oscar,sasha0/django-oscar,django-oscar/django-oscar,okfish/django-oscar
|
from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['name', 'description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
Remove non-existing field from WeightBasedForm
Django 1.10 errors (correctly) on this
|
from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
|
<commit_before>from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['name', 'description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
<commit_msg>Remove non-existing field from WeightBasedForm
Django 1.10 errors (correctly) on this<commit_after>
|
from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
|
from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['name', 'description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
Remove non-existing field from WeightBasedForm
Django 1.10 errors (correctly) on thisfrom django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
|
<commit_before>from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['name', 'description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
<commit_msg>Remove non-existing field from WeightBasedForm
Django 1.10 errors (correctly) on this<commit_after>from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
|
dd14093e23c651e266752267f98170b73c25dc0a
|
python/logging/multiple_files.py
|
python/logging/multiple_files.py
|
import logging
class MyFilter(object):
def __init__(self, level):
self.__level = level
def filter(self, logRecord):
# https://docs.python.org/3/library/logging.html#logrecord-attributes
return logRecord.levelno == self.__level
basic_config = {
"level": logging.DEBUG,
"format": '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
"filename": 'log/debug.log',
"filemode": 'w'
}
logging.basicConfig(**basic_config)
logging.info('Jackdaws love my big sphinx of quartz.')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logger1 = logging.getLogger('myapp.area1')
logger2 = logging.getLogger('myapp.area2')
logger1.addHandler(console)
my_filter = MyFilter(logging.ERROR)
logger2.addFilter(my_filter)
logger1.debug('Quick zephyrs blow, vexing daft Jim.')
logger2.info('How quickly daft jumping zebras vex.')
logger1.warning('Jail zesty vixen who grabbed pay from quack.')
logger2.error('The five boxing wizards jump quickly.')
|
Add a more complex example
|
feat(logging): Add a more complex example
This example shows how to use multiple loggers, multiple output
destinations and filters.
|
Python
|
mit
|
sblancov/hello_world,sblancov/hello_world,sblancov/hello_world,sblancov/hello_world
|
feat(logging): Add a more complex example
This example shows how to use multiple loggers, multiple output
destinations and filters.
|
import logging
class MyFilter(object):
def __init__(self, level):
self.__level = level
def filter(self, logRecord):
# https://docs.python.org/3/library/logging.html#logrecord-attributes
return logRecord.levelno == self.__level
basic_config = {
"level": logging.DEBUG,
"format": '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
"filename": 'log/debug.log',
"filemode": 'w'
}
logging.basicConfig(**basic_config)
logging.info('Jackdaws love my big sphinx of quartz.')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logger1 = logging.getLogger('myapp.area1')
logger2 = logging.getLogger('myapp.area2')
logger1.addHandler(console)
my_filter = MyFilter(logging.ERROR)
logger2.addFilter(my_filter)
logger1.debug('Quick zephyrs blow, vexing daft Jim.')
logger2.info('How quickly daft jumping zebras vex.')
logger1.warning('Jail zesty vixen who grabbed pay from quack.')
logger2.error('The five boxing wizards jump quickly.')
|
<commit_before><commit_msg>feat(logging): Add a more complex example
This example shows how to use multiple loggers, multiple output
destinations and filters.<commit_after>
|
import logging
class MyFilter(object):
def __init__(self, level):
self.__level = level
def filter(self, logRecord):
# https://docs.python.org/3/library/logging.html#logrecord-attributes
return logRecord.levelno == self.__level
basic_config = {
"level": logging.DEBUG,
"format": '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
"filename": 'log/debug.log',
"filemode": 'w'
}
logging.basicConfig(**basic_config)
logging.info('Jackdaws love my big sphinx of quartz.')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logger1 = logging.getLogger('myapp.area1')
logger2 = logging.getLogger('myapp.area2')
logger1.addHandler(console)
my_filter = MyFilter(logging.ERROR)
logger2.addFilter(my_filter)
logger1.debug('Quick zephyrs blow, vexing daft Jim.')
logger2.info('How quickly daft jumping zebras vex.')
logger1.warning('Jail zesty vixen who grabbed pay from quack.')
logger2.error('The five boxing wizards jump quickly.')
|
feat(logging): Add a more complex example
This example shows how to use multiple loggers, multiple output
destinations and filters.import logging
class MyFilter(object):
def __init__(self, level):
self.__level = level
def filter(self, logRecord):
# https://docs.python.org/3/library/logging.html#logrecord-attributes
return logRecord.levelno == self.__level
basic_config = {
"level": logging.DEBUG,
"format": '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
"filename": 'log/debug.log',
"filemode": 'w'
}
logging.basicConfig(**basic_config)
logging.info('Jackdaws love my big sphinx of quartz.')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logger1 = logging.getLogger('myapp.area1')
logger2 = logging.getLogger('myapp.area2')
logger1.addHandler(console)
my_filter = MyFilter(logging.ERROR)
logger2.addFilter(my_filter)
logger1.debug('Quick zephyrs blow, vexing daft Jim.')
logger2.info('How quickly daft jumping zebras vex.')
logger1.warning('Jail zesty vixen who grabbed pay from quack.')
logger2.error('The five boxing wizards jump quickly.')
|
<commit_before><commit_msg>feat(logging): Add a more complex example
This example shows how to use multiple loggers, multiple output
destinations and filters.<commit_after>import logging
class MyFilter(object):
def __init__(self, level):
self.__level = level
def filter(self, logRecord):
# https://docs.python.org/3/library/logging.html#logrecord-attributes
return logRecord.levelno == self.__level
basic_config = {
"level": logging.DEBUG,
"format": '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
"filename": 'log/debug.log',
"filemode": 'w'
}
logging.basicConfig(**basic_config)
logging.info('Jackdaws love my big sphinx of quartz.')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logger1 = logging.getLogger('myapp.area1')
logger2 = logging.getLogger('myapp.area2')
logger1.addHandler(console)
my_filter = MyFilter(logging.ERROR)
logger2.addFilter(my_filter)
logger1.debug('Quick zephyrs blow, vexing daft Jim.')
logger2.info('How quickly daft jumping zebras vex.')
logger1.warning('Jail zesty vixen who grabbed pay from quack.')
logger2.error('The five boxing wizards jump quickly.')
|
|
661cbfec0f78e37bf6b322b9a59e1b2b1a10c665
|
tests/test_list_indexes.py
|
tests/test_list_indexes.py
|
# Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test list_indexes with more than one batch."""
from bson import SON
from mockupdb import going, MockupDB, OpGetMore, OpQuery
from pymongo import MongoClient
from tests import unittest
class TestListIndexes(unittest.TestCase):
def check_indexes(self, indexes):
self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes)
for index_info in indexes:
self.assertIsInstance(index_info, SON)
def test_indexes_query(self):
server = MockupDB(auto_ismaster=True)
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri)
with going(client.test.collection.list_indexes) as cursor:
request = server.receives(
OpQuery, namespace='test.system.indexes')
request.reply([{'name': 'index_0'}], cursor_id=123)
with going(list, cursor()) as indexes:
request = server.receives(OpGetMore,
namespace='test.system.indexes',
cursor_id=123)
request.reply([{'name': 'index_1'}], starting_from=1, cursor_id=0)
self.check_indexes(indexes())
def test_list_indexes_command(self):
server = MockupDB(auto_ismaster={'maxWireVersion': 3})
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri)
with going(client.test.collection.list_indexes) as cursor:
request = server.receives(
listIndexes='collection', namespace='test')
request.reply({'cursor': {
'firstBatch': [{'name': 'index_0'}],
'id': 123}})
with going(list, cursor()) as indexes:
request = server.receives(OpGetMore,
namespace='test.collection',
cursor_id=123)
request.reply([{'name': 'index_1'}], starting_from=1, cursor_id=0)
self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes())
self.check_indexes(indexes())
if __name__ == '__main__':
unittest.main()
|
Test Collection.list_indexes with multiple batches.
|
Test Collection.list_indexes with multiple batches.
|
Python
|
apache-2.0
|
ajdavis/pymongo-mockup-tests
|
Test Collection.list_indexes with multiple batches.
|
# Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test list_indexes with more than one batch."""
from bson import SON
from mockupdb import going, MockupDB, OpGetMore, OpQuery
from pymongo import MongoClient
from tests import unittest
class TestListIndexes(unittest.TestCase):
def check_indexes(self, indexes):
self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes)
for index_info in indexes:
self.assertIsInstance(index_info, SON)
def test_indexes_query(self):
server = MockupDB(auto_ismaster=True)
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri)
with going(client.test.collection.list_indexes) as cursor:
request = server.receives(
OpQuery, namespace='test.system.indexes')
request.reply([{'name': 'index_0'}], cursor_id=123)
with going(list, cursor()) as indexes:
request = server.receives(OpGetMore,
namespace='test.system.indexes',
cursor_id=123)
request.reply([{'name': 'index_1'}], starting_from=1, cursor_id=0)
self.check_indexes(indexes())
def test_list_indexes_command(self):
server = MockupDB(auto_ismaster={'maxWireVersion': 3})
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri)
with going(client.test.collection.list_indexes) as cursor:
request = server.receives(
listIndexes='collection', namespace='test')
request.reply({'cursor': {
'firstBatch': [{'name': 'index_0'}],
'id': 123}})
with going(list, cursor()) as indexes:
request = server.receives(OpGetMore,
namespace='test.collection',
cursor_id=123)
request.reply([{'name': 'index_1'}], starting_from=1, cursor_id=0)
self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes())
self.check_indexes(indexes())
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test Collection.list_indexes with multiple batches.<commit_after>
|
# Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test list_indexes with more than one batch."""
from bson import SON
from mockupdb import going, MockupDB, OpGetMore, OpQuery
from pymongo import MongoClient
from tests import unittest
class TestListIndexes(unittest.TestCase):
def check_indexes(self, indexes):
self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes)
for index_info in indexes:
self.assertIsInstance(index_info, SON)
def test_indexes_query(self):
server = MockupDB(auto_ismaster=True)
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri)
with going(client.test.collection.list_indexes) as cursor:
request = server.receives(
OpQuery, namespace='test.system.indexes')
request.reply([{'name': 'index_0'}], cursor_id=123)
with going(list, cursor()) as indexes:
request = server.receives(OpGetMore,
namespace='test.system.indexes',
cursor_id=123)
request.reply([{'name': 'index_1'}], starting_from=1, cursor_id=0)
self.check_indexes(indexes())
def test_list_indexes_command(self):
server = MockupDB(auto_ismaster={'maxWireVersion': 3})
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri)
with going(client.test.collection.list_indexes) as cursor:
request = server.receives(
listIndexes='collection', namespace='test')
request.reply({'cursor': {
'firstBatch': [{'name': 'index_0'}],
'id': 123}})
with going(list, cursor()) as indexes:
request = server.receives(OpGetMore,
namespace='test.collection',
cursor_id=123)
request.reply([{'name': 'index_1'}], starting_from=1, cursor_id=0)
self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes())
self.check_indexes(indexes())
if __name__ == '__main__':
unittest.main()
|
Test Collection.list_indexes with multiple batches.# Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test list_indexes with more than one batch."""
from bson import SON
from mockupdb import going, MockupDB, OpGetMore, OpQuery
from pymongo import MongoClient
from tests import unittest
class TestListIndexes(unittest.TestCase):
def check_indexes(self, indexes):
self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes)
for index_info in indexes:
self.assertIsInstance(index_info, SON)
def test_indexes_query(self):
server = MockupDB(auto_ismaster=True)
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri)
with going(client.test.collection.list_indexes) as cursor:
request = server.receives(
OpQuery, namespace='test.system.indexes')
request.reply([{'name': 'index_0'}], cursor_id=123)
with going(list, cursor()) as indexes:
request = server.receives(OpGetMore,
namespace='test.system.indexes',
cursor_id=123)
request.reply([{'name': 'index_1'}], starting_from=1, cursor_id=0)
self.check_indexes(indexes())
def test_list_indexes_command(self):
server = MockupDB(auto_ismaster={'maxWireVersion': 3})
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri)
with going(client.test.collection.list_indexes) as cursor:
request = server.receives(
listIndexes='collection', namespace='test')
request.reply({'cursor': {
'firstBatch': [{'name': 'index_0'}],
'id': 123}})
with going(list, cursor()) as indexes:
request = server.receives(OpGetMore,
namespace='test.collection',
cursor_id=123)
request.reply([{'name': 'index_1'}], starting_from=1, cursor_id=0)
self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes())
self.check_indexes(indexes())
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test Collection.list_indexes with multiple batches.<commit_after># Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test list_indexes with more than one batch."""
from bson import SON
from mockupdb import going, MockupDB, OpGetMore, OpQuery
from pymongo import MongoClient
from tests import unittest
class TestListIndexes(unittest.TestCase):
def check_indexes(self, indexes):
self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes)
for index_info in indexes:
self.assertIsInstance(index_info, SON)
def test_indexes_query(self):
server = MockupDB(auto_ismaster=True)
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri)
with going(client.test.collection.list_indexes) as cursor:
request = server.receives(
OpQuery, namespace='test.system.indexes')
request.reply([{'name': 'index_0'}], cursor_id=123)
with going(list, cursor()) as indexes:
request = server.receives(OpGetMore,
namespace='test.system.indexes',
cursor_id=123)
request.reply([{'name': 'index_1'}], starting_from=1, cursor_id=0)
self.check_indexes(indexes())
def test_list_indexes_command(self):
server = MockupDB(auto_ismaster={'maxWireVersion': 3})
server.run()
self.addCleanup(server.stop)
client = MongoClient(server.uri)
with going(client.test.collection.list_indexes) as cursor:
request = server.receives(
listIndexes='collection', namespace='test')
request.reply({'cursor': {
'firstBatch': [{'name': 'index_0'}],
'id': 123}})
with going(list, cursor()) as indexes:
request = server.receives(OpGetMore,
namespace='test.collection',
cursor_id=123)
request.reply([{'name': 'index_1'}], starting_from=1, cursor_id=0)
self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes())
self.check_indexes(indexes())
if __name__ == '__main__':
unittest.main()
|
|
f4082455ebfed1ca878534f13bd2019609131f20
|
tests/test_modelbackend.py
|
tests/test_modelbackend.py
|
from django.test import TestCase
from django_backend.backend.base.backends import ModelBackend
from django_backend.sitebackend import SiteBackend
from .models import OneFieldModel
class ModelBackendTest(TestCase):
def setUp(self):
self.site = SiteBackend(id='test')
def get_basic_backend(self, **kwargs):
defaults = {
'id': 'onefieldmodel',
'model': OneFieldModel,
}
defaults.update(**kwargs)
return self.site.register(ModelBackend, **defaults)
def test_registration(self):
self.site.register(
ModelBackend,
id='onefieldmodel',
model=OneFieldModel)
def test_get_form_class(self):
backend = self.get_basic_backend()
form_class = backend.get_form_class()
self.assertEqual(form_class.Meta.model, OneFieldModel)
self.assertEqual(form_class.base_fields.keys(), ['name'])
|
Add basic tests for ModelBackend
|
Add basic tests for ModelBackend
|
Python
|
bsd-3-clause
|
team23/django_backend,team23/django_backend,team23/django_backend,team23/django_backend,team23/django_backend
|
Add basic tests for ModelBackend
|
from django.test import TestCase
from django_backend.backend.base.backends import ModelBackend
from django_backend.sitebackend import SiteBackend
from .models import OneFieldModel
class ModelBackendTest(TestCase):
def setUp(self):
self.site = SiteBackend(id='test')
def get_basic_backend(self, **kwargs):
defaults = {
'id': 'onefieldmodel',
'model': OneFieldModel,
}
defaults.update(**kwargs)
return self.site.register(ModelBackend, **defaults)
def test_registration(self):
self.site.register(
ModelBackend,
id='onefieldmodel',
model=OneFieldModel)
def test_get_form_class(self):
backend = self.get_basic_backend()
form_class = backend.get_form_class()
self.assertEqual(form_class.Meta.model, OneFieldModel)
self.assertEqual(form_class.base_fields.keys(), ['name'])
|
<commit_before><commit_msg>Add basic tests for ModelBackend<commit_after>
|
from django.test import TestCase
from django_backend.backend.base.backends import ModelBackend
from django_backend.sitebackend import SiteBackend
from .models import OneFieldModel
class ModelBackendTest(TestCase):
def setUp(self):
self.site = SiteBackend(id='test')
def get_basic_backend(self, **kwargs):
defaults = {
'id': 'onefieldmodel',
'model': OneFieldModel,
}
defaults.update(**kwargs)
return self.site.register(ModelBackend, **defaults)
def test_registration(self):
self.site.register(
ModelBackend,
id='onefieldmodel',
model=OneFieldModel)
def test_get_form_class(self):
backend = self.get_basic_backend()
form_class = backend.get_form_class()
self.assertEqual(form_class.Meta.model, OneFieldModel)
self.assertEqual(form_class.base_fields.keys(), ['name'])
|
Add basic tests for ModelBackendfrom django.test import TestCase
from django_backend.backend.base.backends import ModelBackend
from django_backend.sitebackend import SiteBackend
from .models import OneFieldModel
class ModelBackendTest(TestCase):
def setUp(self):
self.site = SiteBackend(id='test')
def get_basic_backend(self, **kwargs):
defaults = {
'id': 'onefieldmodel',
'model': OneFieldModel,
}
defaults.update(**kwargs)
return self.site.register(ModelBackend, **defaults)
def test_registration(self):
self.site.register(
ModelBackend,
id='onefieldmodel',
model=OneFieldModel)
def test_get_form_class(self):
backend = self.get_basic_backend()
form_class = backend.get_form_class()
self.assertEqual(form_class.Meta.model, OneFieldModel)
self.assertEqual(form_class.base_fields.keys(), ['name'])
|
<commit_before><commit_msg>Add basic tests for ModelBackend<commit_after>from django.test import TestCase
from django_backend.backend.base.backends import ModelBackend
from django_backend.sitebackend import SiteBackend
from .models import OneFieldModel
class ModelBackendTest(TestCase):
def setUp(self):
self.site = SiteBackend(id='test')
def get_basic_backend(self, **kwargs):
defaults = {
'id': 'onefieldmodel',
'model': OneFieldModel,
}
defaults.update(**kwargs)
return self.site.register(ModelBackend, **defaults)
def test_registration(self):
self.site.register(
ModelBackend,
id='onefieldmodel',
model=OneFieldModel)
def test_get_form_class(self):
backend = self.get_basic_backend()
form_class = backend.get_form_class()
self.assertEqual(form_class.Meta.model, OneFieldModel)
self.assertEqual(form_class.base_fields.keys(), ['name'])
|
|
0cdb99856a929dc6c832f55dbdc013b5eac95a78
|
labonneboite/alembic/versions/99473cb51564_add_index_on_etablissements_email.py
|
labonneboite/alembic/versions/99473cb51564_add_index_on_etablissements_email.py
|
"""
add index on etablissements email
Revision ID: 99473cb51564
Revises: 200d176f96b6
Create Date: 2018-11-30 15:38:57.294679
"""
from alembic import op
# Revision identifiers, used by Alembic.
revision = '99473cb51564'
down_revision = '200d176f96b6'
branch_labels = None
depends_on = None
def upgrade():
op.create_index('_email', 'etablissements', ['email'], unique=False)
op.create_index('_email', 'etablissements_exportable', ['email'], unique=False)
def downgrade():
op.drop_constraint('_email', 'etablissements', type_='unique')
op.drop_constraint('_email', 'etablissements_exportable', type_='unique')
|
Add index on etablissements email for quick scam cleanup performance
|
Add index on etablissements email for quick scam cleanup performance
|
Python
|
agpl-3.0
|
StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite
|
Add index on etablissements email for quick scam cleanup performance
|
"""
add index on etablissements email
Revision ID: 99473cb51564
Revises: 200d176f96b6
Create Date: 2018-11-30 15:38:57.294679
"""
from alembic import op
# Revision identifiers, used by Alembic.
revision = '99473cb51564'
down_revision = '200d176f96b6'
branch_labels = None
depends_on = None
def upgrade():
op.create_index('_email', 'etablissements', ['email'], unique=False)
op.create_index('_email', 'etablissements_exportable', ['email'], unique=False)
def downgrade():
op.drop_constraint('_email', 'etablissements', type_='unique')
op.drop_constraint('_email', 'etablissements_exportable', type_='unique')
|
<commit_before><commit_msg>Add index on etablissements email for quick scam cleanup performance<commit_after>
|
"""
add index on etablissements email
Revision ID: 99473cb51564
Revises: 200d176f96b6
Create Date: 2018-11-30 15:38:57.294679
"""
from alembic import op
# Revision identifiers, used by Alembic.
revision = '99473cb51564'
down_revision = '200d176f96b6'
branch_labels = None
depends_on = None
def upgrade():
op.create_index('_email', 'etablissements', ['email'], unique=False)
op.create_index('_email', 'etablissements_exportable', ['email'], unique=False)
def downgrade():
op.drop_constraint('_email', 'etablissements', type_='unique')
op.drop_constraint('_email', 'etablissements_exportable', type_='unique')
|
Add index on etablissements email for quick scam cleanup performance"""
add index on etablissements email
Revision ID: 99473cb51564
Revises: 200d176f96b6
Create Date: 2018-11-30 15:38:57.294679
"""
from alembic import op
# Revision identifiers, used by Alembic.
revision = '99473cb51564'
down_revision = '200d176f96b6'
branch_labels = None
depends_on = None
def upgrade():
op.create_index('_email', 'etablissements', ['email'], unique=False)
op.create_index('_email', 'etablissements_exportable', ['email'], unique=False)
def downgrade():
op.drop_constraint('_email', 'etablissements', type_='unique')
op.drop_constraint('_email', 'etablissements_exportable', type_='unique')
|
<commit_before><commit_msg>Add index on etablissements email for quick scam cleanup performance<commit_after>"""
add index on etablissements email
Revision ID: 99473cb51564
Revises: 200d176f96b6
Create Date: 2018-11-30 15:38:57.294679
"""
from alembic import op
# Revision identifiers, used by Alembic.
revision = '99473cb51564'
down_revision = '200d176f96b6'
branch_labels = None
depends_on = None
def upgrade():
op.create_index('_email', 'etablissements', ['email'], unique=False)
op.create_index('_email', 'etablissements_exportable', ['email'], unique=False)
def downgrade():
op.drop_constraint('_email', 'etablissements', type_='unique')
op.drop_constraint('_email', 'etablissements_exportable', type_='unique')
|
|
97b1e034b8028aff4566d0dcf24d3e1d41c803e9
|
migrations/versions/0259_remove_service_postage.py
|
migrations/versions/0259_remove_service_postage.py
|
"""
Revision ID: 0259_remove_service_postage
Revises: 0258_service_postage_nullable
Create Date: 2019-02-11 17:12:22.341599
"""
from alembic import op
import sqlalchemy as sa
revision = '0259_remove_service_postage'
down_revision = '0258_service_postage_nullable'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('services', 'postage')
op.drop_column('services_history', 'postage')
op.execute("DELETE FROM service_permissions WHERE permission = 'choose_postage'")
op.execute("DELETE FROM service_permission_types WHERE name = 'choose_postage'")
op.execute(
"""UPDATE templates_history SET postage = templates.postage
FROM templates WHERE templates_history.id = templates.id AND templates_history.template_type = 'letter'
AND templates_history.postage is null"""
)
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates DROP CONSTRAINT "chk_templates_postage_null"
""")
op.execute("""
ALTER TABLE templates_history DROP CONSTRAINT "chk_templates_history_postage_null"
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('services_history', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=False))
op.execute("INSERT INTO service_permission_types VALUES ('choose_postage')")
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage_null"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage in ('first', 'second') OR
postage is null
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage_null"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage in ('first', 'second') OR
postage is null
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates DROP CONSTRAINT "chk_templates_postage"
""")
op.execute("""
ALTER TABLE templates_history DROP CONSTRAINT "chk_templates_history_postage"
""")
# ### end Alembic commands ###
|
Remove service.postage and choose_postage permission from database
|
Remove service.postage and choose_postage permission from database
Also change constraint on template postage so it cannot be null for
letters
Also add postage to all letters in template_history to respect new constraint
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Remove service.postage and choose_postage permission from database
Also change constraint on template postage so it cannot be null for
letters
Also add postage to all letters in template_history to respect new constraint
|
"""
Revision ID: 0259_remove_service_postage
Revises: 0258_service_postage_nullable
Create Date: 2019-02-11 17:12:22.341599
"""
from alembic import op
import sqlalchemy as sa
revision = '0259_remove_service_postage'
down_revision = '0258_service_postage_nullable'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('services', 'postage')
op.drop_column('services_history', 'postage')
op.execute("DELETE FROM service_permissions WHERE permission = 'choose_postage'")
op.execute("DELETE FROM service_permission_types WHERE name = 'choose_postage'")
op.execute(
"""UPDATE templates_history SET postage = templates.postage
FROM templates WHERE templates_history.id = templates.id AND templates_history.template_type = 'letter'
AND templates_history.postage is null"""
)
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates DROP CONSTRAINT "chk_templates_postage_null"
""")
op.execute("""
ALTER TABLE templates_history DROP CONSTRAINT "chk_templates_history_postage_null"
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('services_history', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=False))
op.execute("INSERT INTO service_permission_types VALUES ('choose_postage')")
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage_null"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage in ('first', 'second') OR
postage is null
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage_null"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage in ('first', 'second') OR
postage is null
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates DROP CONSTRAINT "chk_templates_postage"
""")
op.execute("""
ALTER TABLE templates_history DROP CONSTRAINT "chk_templates_history_postage"
""")
# ### end Alembic commands ###
|
<commit_before><commit_msg>Remove service.postage and choose_postage permission from database
Also change constraint on template postage so it cannot be null for
letters
Also add postage to all letters in template_history to respect new constraint<commit_after>
|
"""
Revision ID: 0259_remove_service_postage
Revises: 0258_service_postage_nullable
Create Date: 2019-02-11 17:12:22.341599
"""
from alembic import op
import sqlalchemy as sa
revision = '0259_remove_service_postage'
down_revision = '0258_service_postage_nullable'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('services', 'postage')
op.drop_column('services_history', 'postage')
op.execute("DELETE FROM service_permissions WHERE permission = 'choose_postage'")
op.execute("DELETE FROM service_permission_types WHERE name = 'choose_postage'")
op.execute(
"""UPDATE templates_history SET postage = templates.postage
FROM templates WHERE templates_history.id = templates.id AND templates_history.template_type = 'letter'
AND templates_history.postage is null"""
)
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates DROP CONSTRAINT "chk_templates_postage_null"
""")
op.execute("""
ALTER TABLE templates_history DROP CONSTRAINT "chk_templates_history_postage_null"
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('services_history', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=False))
op.execute("INSERT INTO service_permission_types VALUES ('choose_postage')")
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage_null"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage in ('first', 'second') OR
postage is null
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage_null"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage in ('first', 'second') OR
postage is null
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates DROP CONSTRAINT "chk_templates_postage"
""")
op.execute("""
ALTER TABLE templates_history DROP CONSTRAINT "chk_templates_history_postage"
""")
# ### end Alembic commands ###
|
Remove service.postage and choose_postage permission from database
Also change constraint on template postage so it cannot be null for
letters
Also add postage to all letters in template_history to respect new constraint"""
Revision ID: 0259_remove_service_postage
Revises: 0258_service_postage_nullable
Create Date: 2019-02-11 17:12:22.341599
"""
from alembic import op
import sqlalchemy as sa
revision = '0259_remove_service_postage'
down_revision = '0258_service_postage_nullable'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('services', 'postage')
op.drop_column('services_history', 'postage')
op.execute("DELETE FROM service_permissions WHERE permission = 'choose_postage'")
op.execute("DELETE FROM service_permission_types WHERE name = 'choose_postage'")
op.execute(
"""UPDATE templates_history SET postage = templates.postage
FROM templates WHERE templates_history.id = templates.id AND templates_history.template_type = 'letter'
AND templates_history.postage is null"""
)
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates DROP CONSTRAINT "chk_templates_postage_null"
""")
op.execute("""
ALTER TABLE templates_history DROP CONSTRAINT "chk_templates_history_postage_null"
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('services_history', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=False))
op.execute("INSERT INTO service_permission_types VALUES ('choose_postage')")
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage_null"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage in ('first', 'second') OR
postage is null
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage_null"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage in ('first', 'second') OR
postage is null
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates DROP CONSTRAINT "chk_templates_postage"
""")
op.execute("""
ALTER TABLE templates_history DROP CONSTRAINT "chk_templates_history_postage"
""")
# ### end Alembic commands ###
|
<commit_before><commit_msg>Remove service.postage and choose_postage permission from database
Also change constraint on template postage so it cannot be null for
letters
Also add postage to all letters in template_history to respect new constraint<commit_after>"""
Revision ID: 0259_remove_service_postage
Revises: 0258_service_postage_nullable
Create Date: 2019-02-11 17:12:22.341599
"""
from alembic import op
import sqlalchemy as sa
revision = '0259_remove_service_postage'
down_revision = '0258_service_postage_nullable'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('services', 'postage')
op.drop_column('services_history', 'postage')
op.execute("DELETE FROM service_permissions WHERE permission = 'choose_postage'")
op.execute("DELETE FROM service_permission_types WHERE name = 'choose_postage'")
op.execute(
"""UPDATE templates_history SET postage = templates.postage
FROM templates WHERE templates_history.id = templates.id AND templates_history.template_type = 'letter'
AND templates_history.postage is null"""
)
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates DROP CONSTRAINT "chk_templates_postage_null"
""")
op.execute("""
ALTER TABLE templates_history DROP CONSTRAINT "chk_templates_history_postage_null"
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('services_history', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('services', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=False))
op.execute("INSERT INTO service_permission_types VALUES ('choose_postage')")
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage_null"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage in ('first', 'second') OR
postage is null
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage_null"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage in ('first', 'second') OR
postage is null
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates DROP CONSTRAINT "chk_templates_postage"
""")
op.execute("""
ALTER TABLE templates_history DROP CONSTRAINT "chk_templates_history_postage"
""")
# ### end Alembic commands ###
|
|
94d4d79614a86ddee5f03d60c272558c5acf2397
|
Source/Scm/make_wb_git_images.py
|
Source/Scm/make_wb_git_images.py
|
#!/usr/bin/env python3
'''
====================================================================
Copyright (c) 2003-2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
make_wb_images.py
'''
import sys
data_slice = 32
argv = [
sys.argv[0],
'wb_scm_images.py',
'../wb.png',
'../toolbar_images/editcopy.png',
'../toolbar_images/editcut.png',
'../toolbar_images/editpaste.png',
'../toolbar_images/terminal.png',
'../toolbar_images/file_browser.png',
'../toolbar_images/edit.png',
'../toolbar_images/open.png',
'../toolbar_images/include.png',
'../toolbar_images/exclude.png',
'../toolbar_images/revert.png',
'../toolbar_images/diff.png',
'../toolbar_images/history.png',
'../toolbar_images/commit.png',
'../toolbar_images/push.png',
'../toolbar_images/pull.png',
]
def main( argv ):
with open( argv[1], 'w' ) as f:
f.write( header )
for filename in argv[2:]:
if filename.startswith( '../' ):
image_name = filename[len('../'):]
else:
image_name = filename
f.write( 'images_by_filename["%s"] = (\n' % (image_name,) )
with open( filename, 'rb' ) as image:
data = image.read()
for offset in range( 0, len(data), data_slice ):
f.write( ' %r\n' % data[offset:offset+data_slice] )
f.write( ' )\n' )
f.write( footer )
header = '''
from PyQt5 import QtGui
def getQImage( name ):
image = QtGui.QImage()
image.loadFromData( images_by_filename[ name ] )
return image
def getQPixmap( name ):
return QtGui.QPixmap( getQImage( name ) )
def getQIcon( name ):
return QtGui.QIcon( getQPixmap( name ) )
images_by_filename = {}
'''
footer = '''
'''
if __name__ == '__main__':
sys.exit( main( argv ) )
|
Add script to make images file
|
Add script to make images file
|
Python
|
apache-2.0
|
barry-scott/git-workbench,barry-scott/scm-workbench,barry-scott/git-workbench,barry-scott/scm-workbench,barry-scott/scm-workbench
|
Add script to make images file
|
#!/usr/bin/env python3
'''
====================================================================
Copyright (c) 2003-2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
make_wb_images.py
'''
import sys
data_slice = 32
argv = [
sys.argv[0],
'wb_scm_images.py',
'../wb.png',
'../toolbar_images/editcopy.png',
'../toolbar_images/editcut.png',
'../toolbar_images/editpaste.png',
'../toolbar_images/terminal.png',
'../toolbar_images/file_browser.png',
'../toolbar_images/edit.png',
'../toolbar_images/open.png',
'../toolbar_images/include.png',
'../toolbar_images/exclude.png',
'../toolbar_images/revert.png',
'../toolbar_images/diff.png',
'../toolbar_images/history.png',
'../toolbar_images/commit.png',
'../toolbar_images/push.png',
'../toolbar_images/pull.png',
]
def main( argv ):
with open( argv[1], 'w' ) as f:
f.write( header )
for filename in argv[2:]:
if filename.startswith( '../' ):
image_name = filename[len('../'):]
else:
image_name = filename
f.write( 'images_by_filename["%s"] = (\n' % (image_name,) )
with open( filename, 'rb' ) as image:
data = image.read()
for offset in range( 0, len(data), data_slice ):
f.write( ' %r\n' % data[offset:offset+data_slice] )
f.write( ' )\n' )
f.write( footer )
header = '''
from PyQt5 import QtGui
def getQImage( name ):
image = QtGui.QImage()
image.loadFromData( images_by_filename[ name ] )
return image
def getQPixmap( name ):
return QtGui.QPixmap( getQImage( name ) )
def getQIcon( name ):
return QtGui.QIcon( getQPixmap( name ) )
images_by_filename = {}
'''
footer = '''
'''
if __name__ == '__main__':
sys.exit( main( argv ) )
|
<commit_before><commit_msg>Add script to make images file<commit_after>
|
#!/usr/bin/env python3
'''
====================================================================
Copyright (c) 2003-2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
make_wb_images.py
'''
import sys
data_slice = 32
argv = [
sys.argv[0],
'wb_scm_images.py',
'../wb.png',
'../toolbar_images/editcopy.png',
'../toolbar_images/editcut.png',
'../toolbar_images/editpaste.png',
'../toolbar_images/terminal.png',
'../toolbar_images/file_browser.png',
'../toolbar_images/edit.png',
'../toolbar_images/open.png',
'../toolbar_images/include.png',
'../toolbar_images/exclude.png',
'../toolbar_images/revert.png',
'../toolbar_images/diff.png',
'../toolbar_images/history.png',
'../toolbar_images/commit.png',
'../toolbar_images/push.png',
'../toolbar_images/pull.png',
]
def main( argv ):
with open( argv[1], 'w' ) as f:
f.write( header )
for filename in argv[2:]:
if filename.startswith( '../' ):
image_name = filename[len('../'):]
else:
image_name = filename
f.write( 'images_by_filename["%s"] = (\n' % (image_name,) )
with open( filename, 'rb' ) as image:
data = image.read()
for offset in range( 0, len(data), data_slice ):
f.write( ' %r\n' % data[offset:offset+data_slice] )
f.write( ' )\n' )
f.write( footer )
header = '''
from PyQt5 import QtGui
def getQImage( name ):
image = QtGui.QImage()
image.loadFromData( images_by_filename[ name ] )
return image
def getQPixmap( name ):
return QtGui.QPixmap( getQImage( name ) )
def getQIcon( name ):
return QtGui.QIcon( getQPixmap( name ) )
images_by_filename = {}
'''
footer = '''
'''
if __name__ == '__main__':
sys.exit( main( argv ) )
|
Add script to make images file#!/usr/bin/env python3
'''
====================================================================
Copyright (c) 2003-2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
make_wb_images.py
'''
import sys
data_slice = 32
argv = [
sys.argv[0],
'wb_scm_images.py',
'../wb.png',
'../toolbar_images/editcopy.png',
'../toolbar_images/editcut.png',
'../toolbar_images/editpaste.png',
'../toolbar_images/terminal.png',
'../toolbar_images/file_browser.png',
'../toolbar_images/edit.png',
'../toolbar_images/open.png',
'../toolbar_images/include.png',
'../toolbar_images/exclude.png',
'../toolbar_images/revert.png',
'../toolbar_images/diff.png',
'../toolbar_images/history.png',
'../toolbar_images/commit.png',
'../toolbar_images/push.png',
'../toolbar_images/pull.png',
]
def main( argv ):
with open( argv[1], 'w' ) as f:
f.write( header )
for filename in argv[2:]:
if filename.startswith( '../' ):
image_name = filename[len('../'):]
else:
image_name = filename
f.write( 'images_by_filename["%s"] = (\n' % (image_name,) )
with open( filename, 'rb' ) as image:
data = image.read()
for offset in range( 0, len(data), data_slice ):
f.write( ' %r\n' % data[offset:offset+data_slice] )
f.write( ' )\n' )
f.write( footer )
header = '''
from PyQt5 import QtGui
def getQImage( name ):
image = QtGui.QImage()
image.loadFromData( images_by_filename[ name ] )
return image
def getQPixmap( name ):
return QtGui.QPixmap( getQImage( name ) )
def getQIcon( name ):
return QtGui.QIcon( getQPixmap( name ) )
images_by_filename = {}
'''
footer = '''
'''
if __name__ == '__main__':
sys.exit( main( argv ) )
|
<commit_before><commit_msg>Add script to make images file<commit_after>#!/usr/bin/env python3
'''
====================================================================
Copyright (c) 2003-2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
make_wb_images.py
'''
import sys
data_slice = 32
argv = [
sys.argv[0],
'wb_scm_images.py',
'../wb.png',
'../toolbar_images/editcopy.png',
'../toolbar_images/editcut.png',
'../toolbar_images/editpaste.png',
'../toolbar_images/terminal.png',
'../toolbar_images/file_browser.png',
'../toolbar_images/edit.png',
'../toolbar_images/open.png',
'../toolbar_images/include.png',
'../toolbar_images/exclude.png',
'../toolbar_images/revert.png',
'../toolbar_images/diff.png',
'../toolbar_images/history.png',
'../toolbar_images/commit.png',
'../toolbar_images/push.png',
'../toolbar_images/pull.png',
]
def main( argv ):
with open( argv[1], 'w' ) as f:
f.write( header )
for filename in argv[2:]:
if filename.startswith( '../' ):
image_name = filename[len('../'):]
else:
image_name = filename
f.write( 'images_by_filename["%s"] = (\n' % (image_name,) )
with open( filename, 'rb' ) as image:
data = image.read()
for offset in range( 0, len(data), data_slice ):
f.write( ' %r\n' % data[offset:offset+data_slice] )
f.write( ' )\n' )
f.write( footer )
header = '''
from PyQt5 import QtGui
def getQImage( name ):
image = QtGui.QImage()
image.loadFromData( images_by_filename[ name ] )
return image
def getQPixmap( name ):
return QtGui.QPixmap( getQImage( name ) )
def getQIcon( name ):
return QtGui.QIcon( getQPixmap( name ) )
images_by_filename = {}
'''
footer = '''
'''
if __name__ == '__main__':
sys.exit( main( argv ) )
|
|
4ee7576f2c1999258cb33d07f67951aea3de917c
|
migrations/versions/cf6ed76ef146_.py
|
migrations/versions/cf6ed76ef146_.py
|
"""Add `AuthTokens` table.
Revision ID: cf6ed76ef146
Revises: 8da7405903f6
Create Date: 2020-09-10 17:13:40.407017
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cf6ed76ef146'
down_revision = '8da7405903f6'
branch_labels = None
depends_on = None
def upgrade():
try:
op.create_table(
'auth_tokens',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('description', sa.String(length=300), nullable=True),
sa.Column('token', sa.String(length=32), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('token'))
except Exception:
pass
def downgrade():
op.drop_table('auth_tokens')
|
Add AuthTokens table migration file.
|
Add AuthTokens table migration file.
|
Python
|
mpl-2.0
|
mrf345/FQM,mrf345/FQM,mrf345/FQM,mrf345/FQM
|
Add AuthTokens table migration file.
|
"""Add `AuthTokens` table.
Revision ID: cf6ed76ef146
Revises: 8da7405903f6
Create Date: 2020-09-10 17:13:40.407017
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cf6ed76ef146'
down_revision = '8da7405903f6'
branch_labels = None
depends_on = None
def upgrade():
try:
op.create_table(
'auth_tokens',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('description', sa.String(length=300), nullable=True),
sa.Column('token', sa.String(length=32), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('token'))
except Exception:
pass
def downgrade():
op.drop_table('auth_tokens')
|
<commit_before><commit_msg>Add AuthTokens table migration file.<commit_after>
|
"""Add `AuthTokens` table.
Revision ID: cf6ed76ef146
Revises: 8da7405903f6
Create Date: 2020-09-10 17:13:40.407017
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cf6ed76ef146'
down_revision = '8da7405903f6'
branch_labels = None
depends_on = None
def upgrade():
try:
op.create_table(
'auth_tokens',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('description', sa.String(length=300), nullable=True),
sa.Column('token', sa.String(length=32), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('token'))
except Exception:
pass
def downgrade():
op.drop_table('auth_tokens')
|
Add AuthTokens table migration file."""Add `AuthTokens` table.
Revision ID: cf6ed76ef146
Revises: 8da7405903f6
Create Date: 2020-09-10 17:13:40.407017
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cf6ed76ef146'
down_revision = '8da7405903f6'
branch_labels = None
depends_on = None
def upgrade():
try:
op.create_table(
'auth_tokens',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('description', sa.String(length=300), nullable=True),
sa.Column('token', sa.String(length=32), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('token'))
except Exception:
pass
def downgrade():
op.drop_table('auth_tokens')
|
<commit_before><commit_msg>Add AuthTokens table migration file.<commit_after>"""Add `AuthTokens` table.
Revision ID: cf6ed76ef146
Revises: 8da7405903f6
Create Date: 2020-09-10 17:13:40.407017
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cf6ed76ef146'
down_revision = '8da7405903f6'
branch_labels = None
depends_on = None
def upgrade():
try:
op.create_table(
'auth_tokens',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('description', sa.String(length=300), nullable=True),
sa.Column('token', sa.String(length=32), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('token'))
except Exception:
pass
def downgrade():
op.drop_table('auth_tokens')
|
|
f27c01b188b1ecbb42bb5b672ef3c95bf2a271a6
|
run_mc_asimov_analysis.py
|
run_mc_asimov_analysis.py
|
import os
import sys
import copy
from fermipy.gtanalysis import GTAnalysis
from haloanalysis.fit_funcs import fit_halo
import numpy as np
import itertools
import argparse
import yaml
if __name__ == '__main__':
usage = "usage: %(prog)s [config file]"
description = "Run fermipy analysis chain."
parser = argparse.ArgumentParser(usage=usage,description=description)
parser.add_argument('--config', default = 'sample_config.yaml')
parser.add_argument('--source', default = None)
args = parser.parse_args()
config = yaml.load(open(args.config,'r'))
halo_width = np.logspace(-1.5,0,4)
halo_index = np.array([1.5,2.0,2.5])
# halo_source_dict = {
# 'SpectrumType' : 'PowerLaw',
# 'Index' : 2.0,
# 'Scale' : 1000,
# 'Prefactor' : { 'value' : 1E-5, 'scale' : 1e-13 },
# 'SpatialModel' : 'GaussianSource',
# 'SpatialWidth' : 1.0
# }
gta = GTAnalysis(args.config,logging={'verbosity' : 3})
gta.setup()
gta.simulate_roi(restore=True)
ext_fit_data = []
halo_fit_data = []
gta.write_roi('base_model',save_model_map=False,make_plots=False)
for i in range(1):
gta.load_xml('base_model')
gta.simulate_roi(randomize=False)
gta.free_source('testsource')
gta.update_source('testsource')
# gta.fit()
gta.free_sources(free=False)
gta.extension('testsource',width=np.logspace(-2.5,-0.5,9))
ext_fit_data += [copy.deepcopy(gta.roi['testsource'])]
gta.write_roi('fit%04i'%i,save_model_map=False,make_plots=False,
format='npy')
fit_halo(gta,'fit%04i'%i,'testsource',halo_width,halo_index)
np.save(os.path.join(gta._savedir,'ext_fit_data.npy'),ext_fit_data)
|
Add script for asimov MC analysis.
|
Add script for asimov MC analysis.
|
Python
|
bsd-3-clause
|
woodmd/haloanalysis,woodmd/haloanalysis
|
Add script for asimov MC analysis.
|
import os
import sys
import copy
from fermipy.gtanalysis import GTAnalysis
from haloanalysis.fit_funcs import fit_halo
import numpy as np
import itertools
import argparse
import yaml
if __name__ == '__main__':
usage = "usage: %(prog)s [config file]"
description = "Run fermipy analysis chain."
parser = argparse.ArgumentParser(usage=usage,description=description)
parser.add_argument('--config', default = 'sample_config.yaml')
parser.add_argument('--source', default = None)
args = parser.parse_args()
config = yaml.load(open(args.config,'r'))
halo_width = np.logspace(-1.5,0,4)
halo_index = np.array([1.5,2.0,2.5])
# halo_source_dict = {
# 'SpectrumType' : 'PowerLaw',
# 'Index' : 2.0,
# 'Scale' : 1000,
# 'Prefactor' : { 'value' : 1E-5, 'scale' : 1e-13 },
# 'SpatialModel' : 'GaussianSource',
# 'SpatialWidth' : 1.0
# }
gta = GTAnalysis(args.config,logging={'verbosity' : 3})
gta.setup()
gta.simulate_roi(restore=True)
ext_fit_data = []
halo_fit_data = []
gta.write_roi('base_model',save_model_map=False,make_plots=False)
for i in range(1):
gta.load_xml('base_model')
gta.simulate_roi(randomize=False)
gta.free_source('testsource')
gta.update_source('testsource')
# gta.fit()
gta.free_sources(free=False)
gta.extension('testsource',width=np.logspace(-2.5,-0.5,9))
ext_fit_data += [copy.deepcopy(gta.roi['testsource'])]
gta.write_roi('fit%04i'%i,save_model_map=False,make_plots=False,
format='npy')
fit_halo(gta,'fit%04i'%i,'testsource',halo_width,halo_index)
np.save(os.path.join(gta._savedir,'ext_fit_data.npy'),ext_fit_data)
|
<commit_before><commit_msg>Add script for asimov MC analysis.<commit_after>
|
import os
import sys
import copy
from fermipy.gtanalysis import GTAnalysis
from haloanalysis.fit_funcs import fit_halo
import numpy as np
import itertools
import argparse
import yaml
if __name__ == '__main__':
usage = "usage: %(prog)s [config file]"
description = "Run fermipy analysis chain."
parser = argparse.ArgumentParser(usage=usage,description=description)
parser.add_argument('--config', default = 'sample_config.yaml')
parser.add_argument('--source', default = None)
args = parser.parse_args()
config = yaml.load(open(args.config,'r'))
halo_width = np.logspace(-1.5,0,4)
halo_index = np.array([1.5,2.0,2.5])
# halo_source_dict = {
# 'SpectrumType' : 'PowerLaw',
# 'Index' : 2.0,
# 'Scale' : 1000,
# 'Prefactor' : { 'value' : 1E-5, 'scale' : 1e-13 },
# 'SpatialModel' : 'GaussianSource',
# 'SpatialWidth' : 1.0
# }
gta = GTAnalysis(args.config,logging={'verbosity' : 3})
gta.setup()
gta.simulate_roi(restore=True)
ext_fit_data = []
halo_fit_data = []
gta.write_roi('base_model',save_model_map=False,make_plots=False)
for i in range(1):
gta.load_xml('base_model')
gta.simulate_roi(randomize=False)
gta.free_source('testsource')
gta.update_source('testsource')
# gta.fit()
gta.free_sources(free=False)
gta.extension('testsource',width=np.logspace(-2.5,-0.5,9))
ext_fit_data += [copy.deepcopy(gta.roi['testsource'])]
gta.write_roi('fit%04i'%i,save_model_map=False,make_plots=False,
format='npy')
fit_halo(gta,'fit%04i'%i,'testsource',halo_width,halo_index)
np.save(os.path.join(gta._savedir,'ext_fit_data.npy'),ext_fit_data)
|
Add script for asimov MC analysis.import os
import sys
import copy
from fermipy.gtanalysis import GTAnalysis
from haloanalysis.fit_funcs import fit_halo
import numpy as np
import itertools
import argparse
import yaml
if __name__ == '__main__':
usage = "usage: %(prog)s [config file]"
description = "Run fermipy analysis chain."
parser = argparse.ArgumentParser(usage=usage,description=description)
parser.add_argument('--config', default = 'sample_config.yaml')
parser.add_argument('--source', default = None)
args = parser.parse_args()
config = yaml.load(open(args.config,'r'))
halo_width = np.logspace(-1.5,0,4)
halo_index = np.array([1.5,2.0,2.5])
# halo_source_dict = {
# 'SpectrumType' : 'PowerLaw',
# 'Index' : 2.0,
# 'Scale' : 1000,
# 'Prefactor' : { 'value' : 1E-5, 'scale' : 1e-13 },
# 'SpatialModel' : 'GaussianSource',
# 'SpatialWidth' : 1.0
# }
gta = GTAnalysis(args.config,logging={'verbosity' : 3})
gta.setup()
gta.simulate_roi(restore=True)
ext_fit_data = []
halo_fit_data = []
gta.write_roi('base_model',save_model_map=False,make_plots=False)
for i in range(1):
gta.load_xml('base_model')
gta.simulate_roi(randomize=False)
gta.free_source('testsource')
gta.update_source('testsource')
# gta.fit()
gta.free_sources(free=False)
gta.extension('testsource',width=np.logspace(-2.5,-0.5,9))
ext_fit_data += [copy.deepcopy(gta.roi['testsource'])]
gta.write_roi('fit%04i'%i,save_model_map=False,make_plots=False,
format='npy')
fit_halo(gta,'fit%04i'%i,'testsource',halo_width,halo_index)
np.save(os.path.join(gta._savedir,'ext_fit_data.npy'),ext_fit_data)
|
<commit_before><commit_msg>Add script for asimov MC analysis.<commit_after>import os
import sys
import copy
from fermipy.gtanalysis import GTAnalysis
from haloanalysis.fit_funcs import fit_halo
import numpy as np
import itertools
import argparse
import yaml
if __name__ == '__main__':
usage = "usage: %(prog)s [config file]"
description = "Run fermipy analysis chain."
parser = argparse.ArgumentParser(usage=usage,description=description)
parser.add_argument('--config', default = 'sample_config.yaml')
parser.add_argument('--source', default = None)
args = parser.parse_args()
config = yaml.load(open(args.config,'r'))
halo_width = np.logspace(-1.5,0,4)
halo_index = np.array([1.5,2.0,2.5])
# halo_source_dict = {
# 'SpectrumType' : 'PowerLaw',
# 'Index' : 2.0,
# 'Scale' : 1000,
# 'Prefactor' : { 'value' : 1E-5, 'scale' : 1e-13 },
# 'SpatialModel' : 'GaussianSource',
# 'SpatialWidth' : 1.0
# }
gta = GTAnalysis(args.config,logging={'verbosity' : 3})
gta.setup()
gta.simulate_roi(restore=True)
ext_fit_data = []
halo_fit_data = []
gta.write_roi('base_model',save_model_map=False,make_plots=False)
for i in range(1):
gta.load_xml('base_model')
gta.simulate_roi(randomize=False)
gta.free_source('testsource')
gta.update_source('testsource')
# gta.fit()
gta.free_sources(free=False)
gta.extension('testsource',width=np.logspace(-2.5,-0.5,9))
ext_fit_data += [copy.deepcopy(gta.roi['testsource'])]
gta.write_roi('fit%04i'%i,save_model_map=False,make_plots=False,
format='npy')
fit_halo(gta,'fit%04i'%i,'testsource',halo_width,halo_index)
np.save(os.path.join(gta._savedir,'ext_fit_data.npy'),ext_fit_data)
|
|
cbe5617222047bc174e470051b5724f84f70609e
|
benchmarks/parse.py
|
benchmarks/parse.py
|
#!/usr/bin/env python
import json, sys, time
count = int(sys.argv[1])
for n in sys.argv[2:]:
print '%s:' % n
start = time.time()
fp = open(n)
for i in xrange(count):
fp.seek(0)
val = json.load(fp)
end = time.time()
print ' ', end - start
|
Add a Python json benchmark.
|
Add a Python json benchmark.
Alas, it's about 3x faster than us, due to being written in C.
|
Python
|
bsd-3-clause
|
jkarni/aeson,beni55/aeson,nurpax/aeson,lykahb/aeson,roelvandijk/aeson,plaprade/aeson,abbradar/aeson,sol/aeson,dmjio/aeson,sol/aeson,sol/aeson,aelve/json-x,neobrain/aeson,bwo/aeson,timmytofu/aeson,23Skidoo/aeson,JPMoresmau/aeson,neobrain/aeson,roelvandijk/aeson,SeanRBurton/aeson
|
Add a Python json benchmark.
Alas, it's about 3x faster than us, due to being written in C.
|
#!/usr/bin/env python
import json, sys, time
count = int(sys.argv[1])
for n in sys.argv[2:]:
print '%s:' % n
start = time.time()
fp = open(n)
for i in xrange(count):
fp.seek(0)
val = json.load(fp)
end = time.time()
print ' ', end - start
|
<commit_before><commit_msg>Add a Python json benchmark.
Alas, it's about 3x faster than us, due to being written in C.<commit_after>
|
#!/usr/bin/env python
import json, sys, time
count = int(sys.argv[1])
for n in sys.argv[2:]:
print '%s:' % n
start = time.time()
fp = open(n)
for i in xrange(count):
fp.seek(0)
val = json.load(fp)
end = time.time()
print ' ', end - start
|
Add a Python json benchmark.
Alas, it's about 3x faster than us, due to being written in C.#!/usr/bin/env python
import json, sys, time
count = int(sys.argv[1])
for n in sys.argv[2:]:
print '%s:' % n
start = time.time()
fp = open(n)
for i in xrange(count):
fp.seek(0)
val = json.load(fp)
end = time.time()
print ' ', end - start
|
<commit_before><commit_msg>Add a Python json benchmark.
Alas, it's about 3x faster than us, due to being written in C.<commit_after>#!/usr/bin/env python
import json, sys, time
count = int(sys.argv[1])
for n in sys.argv[2:]:
print '%s:' % n
start = time.time()
fp = open(n)
for i in xrange(count):
fp.seek(0)
val = json.load(fp)
end = time.time()
print ' ', end - start
|
|
6b2cb0bfc41a26eb5cb259a66feb937a8564c705
|
ws-tests/test_invalid_merge.py
|
ws-tests/test_invalid_merge.py
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import sys
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/merge/v1/master/master'
data = {
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
Add a test for an invalid merge
|
Add a test for an invalid merge
|
Python
|
bsd-2-clause
|
OpenTreeOfLife/phylesystem-api,OpenTreeOfLife/phylesystem-api,OpenTreeOfLife/phylesystem-api
|
Add a test for an invalid merge
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import sys
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/merge/v1/master/master'
data = {
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
<commit_before><commit_msg>Add a test for an invalid merge<commit_after>
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import sys
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/merge/v1/master/master'
data = {
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
Add a test for an invalid merge#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import sys
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/merge/v1/master/master'
data = {
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
<commit_before><commit_msg>Add a test for an invalid merge<commit_after>#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import sys
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/merge/v1/master/master'
data = {
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
|
be7226c8537809dd551ec68c690cbb6b2cca5a7c
|
yaml_test/ex7_yam_json_read.py
|
yaml_test/ex7_yam_json_read.py
|
import yaml
import json
from pprint import pprint
def output_format(my_list, my_str):
'''
Make the output easier to read
'''
print '\n\n'
print '#'*3
print '#'*3+my_str
print '#'*3
pprint(my_list)
def main():
'''
Read YAML and JSON files. Pretty print to standard out
'''
yaml_file = 'my_test.yml'
json_file= 'my_test.json'
with open(yaml_file) as f:
yaml_list = yaml.load(f)
with open(json_file) as f:
json_list = json.load(f)
output_format(yaml_list,'YAML')
output_format(json_list,'JSON')
print'\n'
if __name__=="__main__":
main()
|
Add YAML & JSON Read script
|
Add YAML & JSON Read script
|
Python
|
apache-2.0
|
terblac/mypynetcourse
|
Add YAML & JSON Read script
|
import yaml
import json
from pprint import pprint
def output_format(my_list, my_str):
'''
Make the output easier to read
'''
print '\n\n'
print '#'*3
print '#'*3+my_str
print '#'*3
pprint(my_list)
def main():
'''
Read YAML and JSON files. Pretty print to standard out
'''
yaml_file = 'my_test.yml'
json_file= 'my_test.json'
with open(yaml_file) as f:
yaml_list = yaml.load(f)
with open(json_file) as f:
json_list = json.load(f)
output_format(yaml_list,'YAML')
output_format(json_list,'JSON')
print'\n'
if __name__=="__main__":
main()
|
<commit_before><commit_msg>Add YAML & JSON Read script<commit_after>
|
import yaml
import json
from pprint import pprint
def output_format(my_list, my_str):
'''
Make the output easier to read
'''
print '\n\n'
print '#'*3
print '#'*3+my_str
print '#'*3
pprint(my_list)
def main():
'''
Read YAML and JSON files. Pretty print to standard out
'''
yaml_file = 'my_test.yml'
json_file= 'my_test.json'
with open(yaml_file) as f:
yaml_list = yaml.load(f)
with open(json_file) as f:
json_list = json.load(f)
output_format(yaml_list,'YAML')
output_format(json_list,'JSON')
print'\n'
if __name__=="__main__":
main()
|
Add YAML & JSON Read script
import yaml
import json
from pprint import pprint
def output_format(my_list, my_str):
'''
Make the output easier to read
'''
print '\n\n'
print '#'*3
print '#'*3+my_str
print '#'*3
pprint(my_list)
def main():
'''
Read YAML and JSON files. Pretty print to standard out
'''
yaml_file = 'my_test.yml'
json_file= 'my_test.json'
with open(yaml_file) as f:
yaml_list = yaml.load(f)
with open(json_file) as f:
json_list = json.load(f)
output_format(yaml_list,'YAML')
output_format(json_list,'JSON')
print'\n'
if __name__=="__main__":
main()
|
<commit_before><commit_msg>Add YAML & JSON Read script<commit_after>
import yaml
import json
from pprint import pprint
def output_format(my_list, my_str):
'''
Make the output easier to read
'''
print '\n\n'
print '#'*3
print '#'*3+my_str
print '#'*3
pprint(my_list)
def main():
'''
Read YAML and JSON files. Pretty print to standard out
'''
yaml_file = 'my_test.yml'
json_file= 'my_test.json'
with open(yaml_file) as f:
yaml_list = yaml.load(f)
with open(json_file) as f:
json_list = json.load(f)
output_format(yaml_list,'YAML')
output_format(json_list,'JSON')
print'\n'
if __name__=="__main__":
main()
|
|
15de4bbd6ef7e64a7b824eebf9cefb14c474baaf
|
src/api/imgur/imgur_api.py
|
src/api/imgur/imgur_api.py
|
from models.image import Image
from utils.utils import tidy_up_url
class ImgurAPI:
_IMGUR_PARAMS = {
"client_id": "",
"api_calls_limits": {
"user_limit": -1,
"user_remaining": -1,
"user_reset_timestamp": -1,
"client_limit": -1,
"client_remaining": -1
}
}
def __init__(self):
pass
@staticmethod
def get_image_urls(post: dict) -> list:
url = tidy_up_url(post.get("url"))
imgur_id = url[url.rfind("/") + 1:]
if "/gallery/" in url:
image_urls = ImgurAPI._get_gallery_urls(imgur_id)
elif "/a/" in url:
image_urls = ImgurAPI._get_album_urls(imgur_id)
else:
# This is a URL with no gallery, album or extension
image_urls = ImgurAPI._get_simple_imgur_url(imgur_id)
return image_urls
@staticmethod
def _get_simple_imgur_url(imgur_id: str) -> list:
raise NotImplementedError
@staticmethod
def _get_album_urls(imgur_id: str) -> list:
raise NotImplementedError
@staticmethod
def _get_gallery_urls(imgur_id: str) -> list:
raise NotImplementedError
|
Add a ImgurAPI simple interface
|
Add a ImgurAPI simple interface
This is just an interface that performs RESTful operations to the Imgur
API.
|
Python
|
apache-2.0
|
CharlieCorner/pymage_downloader
|
Add a ImgurAPI simple interface
This is just an interface that performs RESTful operations to the Imgur
API.
|
from models.image import Image
from utils.utils import tidy_up_url
class ImgurAPI:
_IMGUR_PARAMS = {
"client_id": "",
"api_calls_limits": {
"user_limit": -1,
"user_remaining": -1,
"user_reset_timestamp": -1,
"client_limit": -1,
"client_remaining": -1
}
}
def __init__(self):
pass
@staticmethod
def get_image_urls(post: dict) -> list:
url = tidy_up_url(post.get("url"))
imgur_id = url[url.rfind("/") + 1:]
if "/gallery/" in url:
image_urls = ImgurAPI._get_gallery_urls(imgur_id)
elif "/a/" in url:
image_urls = ImgurAPI._get_album_urls(imgur_id)
else:
# This is a URL with no gallery, album or extension
image_urls = ImgurAPI._get_simple_imgur_url(imgur_id)
return image_urls
@staticmethod
def _get_simple_imgur_url(imgur_id: str) -> list:
raise NotImplementedError
@staticmethod
def _get_album_urls(imgur_id: str) -> list:
raise NotImplementedError
@staticmethod
def _get_gallery_urls(imgur_id: str) -> list:
raise NotImplementedError
|
<commit_before><commit_msg>Add a ImgurAPI simple interface
This is just an interface that performs RESTful operations to the Imgur
API.<commit_after>
|
from models.image import Image
from utils.utils import tidy_up_url
class ImgurAPI:
_IMGUR_PARAMS = {
"client_id": "",
"api_calls_limits": {
"user_limit": -1,
"user_remaining": -1,
"user_reset_timestamp": -1,
"client_limit": -1,
"client_remaining": -1
}
}
def __init__(self):
pass
@staticmethod
def get_image_urls(post: dict) -> list:
url = tidy_up_url(post.get("url"))
imgur_id = url[url.rfind("/") + 1:]
if "/gallery/" in url:
image_urls = ImgurAPI._get_gallery_urls(imgur_id)
elif "/a/" in url:
image_urls = ImgurAPI._get_album_urls(imgur_id)
else:
# This is a URL with no gallery, album or extension
image_urls = ImgurAPI._get_simple_imgur_url(imgur_id)
return image_urls
@staticmethod
def _get_simple_imgur_url(imgur_id: str) -> list:
raise NotImplementedError
@staticmethod
def _get_album_urls(imgur_id: str) -> list:
raise NotImplementedError
@staticmethod
def _get_gallery_urls(imgur_id: str) -> list:
raise NotImplementedError
|
Add a ImgurAPI simple interface
This is just an interface that performs RESTful operations to the Imgur
API.from models.image import Image
from utils.utils import tidy_up_url
class ImgurAPI:
_IMGUR_PARAMS = {
"client_id": "",
"api_calls_limits": {
"user_limit": -1,
"user_remaining": -1,
"user_reset_timestamp": -1,
"client_limit": -1,
"client_remaining": -1
}
}
def __init__(self):
pass
@staticmethod
def get_image_urls(post: dict) -> list:
url = tidy_up_url(post.get("url"))
imgur_id = url[url.rfind("/") + 1:]
if "/gallery/" in url:
image_urls = ImgurAPI._get_gallery_urls(imgur_id)
elif "/a/" in url:
image_urls = ImgurAPI._get_album_urls(imgur_id)
else:
# This is a URL with no gallery, album or extension
image_urls = ImgurAPI._get_simple_imgur_url(imgur_id)
return image_urls
@staticmethod
def _get_simple_imgur_url(imgur_id: str) -> list:
raise NotImplementedError
@staticmethod
def _get_album_urls(imgur_id: str) -> list:
raise NotImplementedError
@staticmethod
def _get_gallery_urls(imgur_id: str) -> list:
raise NotImplementedError
|
<commit_before><commit_msg>Add a ImgurAPI simple interface
This is just an interface that performs RESTful operations to the Imgur
API.<commit_after>from models.image import Image
from utils.utils import tidy_up_url
class ImgurAPI:
_IMGUR_PARAMS = {
"client_id": "",
"api_calls_limits": {
"user_limit": -1,
"user_remaining": -1,
"user_reset_timestamp": -1,
"client_limit": -1,
"client_remaining": -1
}
}
def __init__(self):
pass
@staticmethod
def get_image_urls(post: dict) -> list:
url = tidy_up_url(post.get("url"))
imgur_id = url[url.rfind("/") + 1:]
if "/gallery/" in url:
image_urls = ImgurAPI._get_gallery_urls(imgur_id)
elif "/a/" in url:
image_urls = ImgurAPI._get_album_urls(imgur_id)
else:
# This is a URL with no gallery, album or extension
image_urls = ImgurAPI._get_simple_imgur_url(imgur_id)
return image_urls
@staticmethod
def _get_simple_imgur_url(imgur_id: str) -> list:
raise NotImplementedError
@staticmethod
def _get_album_urls(imgur_id: str) -> list:
raise NotImplementedError
@staticmethod
def _get_gallery_urls(imgur_id: str) -> list:
raise NotImplementedError
|
|
a72ba142d7765ee551f74629507211ed48f2c7a6
|
osf/migrations/0074_auto_20171207_1331.py
|
osf/migrations/0074_auto_20171207_1331.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-07 19:31
from __future__ import unicode_literals
from django.db import migrations, models
import osf.models.validators
class Migration(migrations.Migration):
dependencies = [
('osf', '0073_citationstyle_has_bibliography'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='content',
field=models.TextField(validators=[osf.models.validators.CommentMaxLength(1000), osf.models.validators.string_required]),
),
]
|
Update comment maxlength to 1000 characters
|
Update comment maxlength to 1000 characters
|
Python
|
apache-2.0
|
CenterForOpenScience/osf.io,TomBaxter/osf.io,pattisdr/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,binoculars/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,chennan47/osf.io,mattclark/osf.io,sloria/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,icereval/osf.io,leb2dg/osf.io,chennan47/osf.io,cslzchen/osf.io,aaxelb/osf.io,baylee-d/osf.io,leb2dg/osf.io,aaxelb/osf.io,erinspace/osf.io,brianjgeiger/osf.io,laurenrevere/osf.io,brianjgeiger/osf.io,laurenrevere/osf.io,TomBaxter/osf.io,chennan47/osf.io,mattclark/osf.io,felliott/osf.io,felliott/osf.io,crcresearch/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,sloria/osf.io,HalcyonChimera/osf.io,icereval/osf.io,binoculars/osf.io,crcresearch/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,sloria/osf.io,caseyrollins/osf.io,caseyrollins/osf.io,adlius/osf.io,aaxelb/osf.io,felliott/osf.io,laurenrevere/osf.io,adlius/osf.io,TomBaxter/osf.io,HalcyonChimera/osf.io,felliott/osf.io,leb2dg/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,baylee-d/osf.io,adlius/osf.io,baylee-d/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,saradbowman/osf.io,erinspace/osf.io,binoculars/osf.io,leb2dg/osf.io,mfraezz/osf.io,pattisdr/osf.io,icereval/osf.io,caseyrollins/osf.io,mattclark/osf.io,saradbowman/osf.io,erinspace/osf.io,mfraezz/osf.io
|
Update comment maxlength to 1000 characters
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-07 19:31
from __future__ import unicode_literals
from django.db import migrations, models
import osf.models.validators
class Migration(migrations.Migration):
dependencies = [
('osf', '0073_citationstyle_has_bibliography'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='content',
field=models.TextField(validators=[osf.models.validators.CommentMaxLength(1000), osf.models.validators.string_required]),
),
]
|
<commit_before><commit_msg>Update comment maxlength to 1000 characters<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-07 19:31
from __future__ import unicode_literals
from django.db import migrations, models
import osf.models.validators
class Migration(migrations.Migration):
dependencies = [
('osf', '0073_citationstyle_has_bibliography'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='content',
field=models.TextField(validators=[osf.models.validators.CommentMaxLength(1000), osf.models.validators.string_required]),
),
]
|
Update comment maxlength to 1000 characters# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-07 19:31
from __future__ import unicode_literals
from django.db import migrations, models
import osf.models.validators
class Migration(migrations.Migration):
dependencies = [
('osf', '0073_citationstyle_has_bibliography'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='content',
field=models.TextField(validators=[osf.models.validators.CommentMaxLength(1000), osf.models.validators.string_required]),
),
]
|
<commit_before><commit_msg>Update comment maxlength to 1000 characters<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-07 19:31
from __future__ import unicode_literals
from django.db import migrations, models
import osf.models.validators
class Migration(migrations.Migration):
dependencies = [
('osf', '0073_citationstyle_has_bibliography'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='content',
field=models.TextField(validators=[osf.models.validators.CommentMaxLength(1000), osf.models.validators.string_required]),
),
]
|
|
b5cfc9400e13caa3bb73b3e5d8e8eb8c68eff147
|
build/third_party_license_fmt.py
|
build/third_party_license_fmt.py
|
#
# This script generates the license information of the Maven dependencies
# for the about.html page. You would update this information everytime you
# update some dependency in the pom.xml configuration for a release. These
# are the steps:
#
# 1. Copy this script and the pom.xml in some folder (you do not have to
# do this but this will keep generated things out of version control)
#
# 2. Collect the third party licenses with the Maven license plugin
# (; you may have to change the build target from pom to jar in order
# this to work):
#
# mvn license:add-third-party
#
# This will generate a `THIRD-PARTY.txt` file somewhere deep in the
# target folder.
#
# 3. Copy the generated file next to this script and execute the script.
#
# 4. Copy the script output into the about.html.
#
def main():
with open('THIRD-PARTY.txt', 'r', encoding='utf-8') as f:
for line in f:
s = line.strip()
parts = []
for p in s.split('('):
for q in p.split(')'):
if q.strip() != '':
parts.append(q.strip())
if len(parts) != 3:
continue
license: str = parts[0].strip()
license_prefix = ''
license_suffix = ''
if not license.lower().startswith('the'):
license_prefix = 'the '
if 'license' not in license.lower():
license_suffix = ' license'
license = f'{license_prefix}<strong>{license}</strong>{license_suffix}'
title = parts[1].strip()
info = parts[2].split(' - ')
if len(info) != 2:
continue
url = info[1].strip()
print(f'\n<h2>{title}</h2>')
print(f'<p>The <a href="{url}">{title}</a> library')
print(f'is licensed under {license}.')
print('See the project website for further information.</p>')
if __name__ == '__main__':
main()
|
Add the script for generating
|
Add the script for generating
the legal info for the Maven dependencies
|
Python
|
mpl-2.0
|
GreenDelta/epd-editor,GreenDelta/epd-editor,GreenDelta/epd-editor
|
Add the script for generating
the legal info for the Maven dependencies
|
#
# This script generates the license information of the Maven dependencies
# for the about.html page. You would update this information everytime you
# update some dependency in the pom.xml configuration for a release. These
# are the steps:
#
# 1. Copy this script and the pom.xml in some folder (you do not have to
# do this but this will keep generated things out of version control)
#
# 2. Collect the third party licenses with the Maven license plugin
# (; you may have to change the build target from pom to jar in order
# this to work):
#
# mvn license:add-third-party
#
# This will generate a `THIRD-PARTY.txt` file somewhere deep in the
# target folder.
#
# 3. Copy the generated file next to this script and execute the script.
#
# 4. Copy the script output into the about.html.
#
def main():
with open('THIRD-PARTY.txt', 'r', encoding='utf-8') as f:
for line in f:
s = line.strip()
parts = []
for p in s.split('('):
for q in p.split(')'):
if q.strip() != '':
parts.append(q.strip())
if len(parts) != 3:
continue
license: str = parts[0].strip()
license_prefix = ''
license_suffix = ''
if not license.lower().startswith('the'):
license_prefix = 'the '
if 'license' not in license.lower():
license_suffix = ' license'
license = f'{license_prefix}<strong>{license}</strong>{license_suffix}'
title = parts[1].strip()
info = parts[2].split(' - ')
if len(info) != 2:
continue
url = info[1].strip()
print(f'\n<h2>{title}</h2>')
print(f'<p>The <a href="{url}">{title}</a> library')
print(f'is licensed under {license}.')
print('See the project website for further information.</p>')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add the script for generating
the legal info for the Maven dependencies<commit_after>
|
#
# This script generates the license information of the Maven dependencies
# for the about.html page. You would update this information everytime you
# update some dependency in the pom.xml configuration for a release. These
# are the steps:
#
# 1. Copy this script and the pom.xml in some folder (you do not have to
# do this but this will keep generated things out of version control)
#
# 2. Collect the third party licenses with the Maven license plugin
# (; you may have to change the build target from pom to jar in order
# this to work):
#
# mvn license:add-third-party
#
# This will generate a `THIRD-PARTY.txt` file somewhere deep in the
# target folder.
#
# 3. Copy the generated file next to this script and execute the script.
#
# 4. Copy the script output into the about.html.
#
def main():
with open('THIRD-PARTY.txt', 'r', encoding='utf-8') as f:
for line in f:
s = line.strip()
parts = []
for p in s.split('('):
for q in p.split(')'):
if q.strip() != '':
parts.append(q.strip())
if len(parts) != 3:
continue
license: str = parts[0].strip()
license_prefix = ''
license_suffix = ''
if not license.lower().startswith('the'):
license_prefix = 'the '
if 'license' not in license.lower():
license_suffix = ' license'
license = f'{license_prefix}<strong>{license}</strong>{license_suffix}'
title = parts[1].strip()
info = parts[2].split(' - ')
if len(info) != 2:
continue
url = info[1].strip()
print(f'\n<h2>{title}</h2>')
print(f'<p>The <a href="{url}">{title}</a> library')
print(f'is licensed under {license}.')
print('See the project website for further information.</p>')
if __name__ == '__main__':
main()
|
Add the script for generating
the legal info for the Maven dependencies#
# This script generates the license information of the Maven dependencies
# for the about.html page. You would update this information everytime you
# update some dependency in the pom.xml configuration for a release. These
# are the steps:
#
# 1. Copy this script and the pom.xml in some folder (you do not have to
# do this but this will keep generated things out of version control)
#
# 2. Collect the third party licenses with the Maven license plugin
# (; you may have to change the build target from pom to jar in order
# this to work):
#
# mvn license:add-third-party
#
# This will generate a `THIRD-PARTY.txt` file somewhere deep in the
# target folder.
#
# 3. Copy the generated file next to this script and execute the script.
#
# 4. Copy the script output into the about.html.
#
def main():
with open('THIRD-PARTY.txt', 'r', encoding='utf-8') as f:
for line in f:
s = line.strip()
parts = []
for p in s.split('('):
for q in p.split(')'):
if q.strip() != '':
parts.append(q.strip())
if len(parts) != 3:
continue
license: str = parts[0].strip()
license_prefix = ''
license_suffix = ''
if not license.lower().startswith('the'):
license_prefix = 'the '
if 'license' not in license.lower():
license_suffix = ' license'
license = f'{license_prefix}<strong>{license}</strong>{license_suffix}'
title = parts[1].strip()
info = parts[2].split(' - ')
if len(info) != 2:
continue
url = info[1].strip()
print(f'\n<h2>{title}</h2>')
print(f'<p>The <a href="{url}">{title}</a> library')
print(f'is licensed under {license}.')
print('See the project website for further information.</p>')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add the script for generating
the legal info for the Maven dependencies<commit_after>#
# This script generates the license information of the Maven dependencies
# for the about.html page. You would update this information everytime you
# update some dependency in the pom.xml configuration for a release. These
# are the steps:
#
# 1. Copy this script and the pom.xml in some folder (you do not have to
# do this but this will keep generated things out of version control)
#
# 2. Collect the third party licenses with the Maven license plugin
# (; you may have to change the build target from pom to jar in order
# this to work):
#
# mvn license:add-third-party
#
# This will generate a `THIRD-PARTY.txt` file somewhere deep in the
# target folder.
#
# 3. Copy the generated file next to this script and execute the script.
#
# 4. Copy the script output into the about.html.
#
def main():
with open('THIRD-PARTY.txt', 'r', encoding='utf-8') as f:
for line in f:
s = line.strip()
parts = []
for p in s.split('('):
for q in p.split(')'):
if q.strip() != '':
parts.append(q.strip())
if len(parts) != 3:
continue
license: str = parts[0].strip()
license_prefix = ''
license_suffix = ''
if not license.lower().startswith('the'):
license_prefix = 'the '
if 'license' not in license.lower():
license_suffix = ' license'
license = f'{license_prefix}<strong>{license}</strong>{license_suffix}'
title = parts[1].strip()
info = parts[2].split(' - ')
if len(info) != 2:
continue
url = info[1].strip()
print(f'\n<h2>{title}</h2>')
print(f'<p>The <a href="{url}">{title}</a> library')
print(f'is licensed under {license}.')
print('See the project website for further information.</p>')
if __name__ == '__main__':
main()
|
|
e37b6363749cc4194f63d37ce6d5b3c9499a460c
|
Examples/Infovis/Python/simple_selection.py
|
Examples/Infovis/Python/simple_selection.py
|
from vtk import *
from vtk import *
source = vtkRandomGraphSource()
source.SetNumberOfVertices(25)
source.SetStartWithTree(True)
source.SetIncludeEdgeWeights(True)
view1 = vtkGraphLayoutView()
view1.AddRepresentationFromInputConnection(source.GetOutputPort())
view1.SetColorVertices(True)
view1.SetEdgeColorArrayName("edge weight")
view1.SetColorEdges(True)
view1.SetLayoutStrategyToSimple2D()
view2 = vtkGraphLayoutView()
view2.AddRepresentationFromInputConnection(source.GetOutputPort())
view2.SetColorVertices(True)
view2.SetEdgeColorArrayName("edge weight")
view2.SetColorEdges(True)
view2.SetLayoutStrategyToTree()
# Create a annotation link and set both view to use it
annotationLink = vtkAnnotationLink()
view1.GetRepresentation(0).SetAnnotationLink(annotationLink)
view2.GetRepresentation(0).SetAnnotationLink(annotationLink)
updater = vtkViewUpdater()
updater.AddView(view1)
updater.AddView(view2)
theme = vtkViewTheme.CreateNeonTheme()
view1.ApplyViewTheme(theme)
view2.ApplyViewTheme(theme)
theme.FastDelete()
view1.GetRenderWindow().SetSize(600, 600)
view1.ResetCamera()
view1.Render()
view2.GetRenderWindow().SetSize(600, 600)
view2.ResetCamera()
view2.Render()
view1.GetInteractor().Start()
|
Add a simple selection example.
|
ENH: Add a simple selection example.
|
Python
|
bsd-3-clause
|
demarle/VTK,biddisco/VTK,hendradarwin/VTK,jmerkow/VTK,ashray/VTK-EVM,aashish24/VTK-old,cjh1/VTK,msmolens/VTK,sankhesh/VTK,daviddoria/PointGraphsPhase1,berendkleinhaneveld/VTK,SimVascular/VTK,demarle/VTK,Wuteyan/VTK,ashray/VTK-EVM,mspark93/VTK,spthaolt/VTK,johnkit/vtk-dev,demarle/VTK,arnaudgelas/VTK,biddisco/VTK,hendradarwin/VTK,gram526/VTK,biddisco/VTK,berendkleinhaneveld/VTK,arnaudgelas/VTK,collects/VTK,candy7393/VTK,sankhesh/VTK,johnkit/vtk-dev,jmerkow/VTK,sankhesh/VTK,Wuteyan/VTK,mspark93/VTK,mspark93/VTK,keithroe/vtkoptix,arnaudgelas/VTK,mspark93/VTK,sumedhasingla/VTK,jmerkow/VTK,jmerkow/VTK,johnkit/vtk-dev,sankhesh/VTK,naucoin/VTKSlicerWidgets,collects/VTK,jeffbaumes/jeffbaumes-vtk,berendkleinhaneveld/VTK,johnkit/vtk-dev,keithroe/vtkoptix,candy7393/VTK,gram526/VTK,naucoin/VTKSlicerWidgets,hendradarwin/VTK,arnaudgelas/VTK,naucoin/VTKSlicerWidgets,candy7393/VTK,berendkleinhaneveld/VTK,demarle/VTK,collects/VTK,keithroe/vtkoptix,arnaudgelas/VTK,sumedhasingla/VTK,jmerkow/VTK,candy7393/VTK,cjh1/VTK,biddisco/VTK,spthaolt/VTK,SimVascular/VTK,daviddoria/PointGraphsPhase1,jeffbaumes/jeffbaumes-vtk,SimVascular/VTK,demarle/VTK,cjh1/VTK,mspark93/VTK,candy7393/VTK,spthaolt/VTK,gram526/VTK,sankhesh/VTK,keithroe/vtkoptix,biddisco/VTK,aashish24/VTK-old,jeffbaumes/jeffbaumes-vtk,demarle/VTK,berendkleinhaneveld/VTK,sumedhasingla/VTK,mspark93/VTK,ashray/VTK-EVM,aashish24/VTK-old,gram526/VTK,jeffbaumes/jeffbaumes-vtk,naucoin/VTKSlicerWidgets,hendradarwin/VTK,gram526/VTK,mspark93/VTK,keithroe/vtkoptix,candy7393/VTK,daviddoria/PointGraphsPhase1,candy7393/VTK,demarle/VTK,keithroe/vtkoptix,Wuteyan/VTK,daviddoria/PointGraphsPhase1,spthaolt/VTK,msmolens/VTK,Wuteyan/VTK,ashray/VTK-EVM,hendradarwin/VTK,collects/VTK,biddisco/VTK,cjh1/VTK,jeffbaumes/jeffbaumes-vtk,sankhesh/VTK,arnaudgelas/VTK,johnkit/vtk-dev,SimVascular/VTK,spthaolt/VTK,msmolens/VTK,sumedhasingla/VTK,collects/VTK,aashish24/VTK-old,candy7393/VTK,msmolens/VTK,hendradarwin/VTK,ashray/VTK-EVM,jmerkow/VTK,biddisco/VTK,gram526/VTK,sankhesh/VTK,msmolens/VTK,SimVascular/VTK,collects/VTK,mspark93/VTK,berendkleinhaneveld/VTK,ashray/VTK-EVM,ashray/VTK-EVM,naucoin/VTKSlicerWidgets,sumedhasingla/VTK,spthaolt/VTK,SimVascular/VTK,sumedhasingla/VTK,ashray/VTK-EVM,keithroe/vtkoptix,hendradarwin/VTK,daviddoria/PointGraphsPhase1,jmerkow/VTK,sumedhasingla/VTK,daviddoria/PointGraphsPhase1,Wuteyan/VTK,naucoin/VTKSlicerWidgets,aashish24/VTK-old,demarle/VTK,johnkit/vtk-dev,spthaolt/VTK,SimVascular/VTK,aashish24/VTK-old,gram526/VTK,berendkleinhaneveld/VTK,msmolens/VTK,Wuteyan/VTK,johnkit/vtk-dev,cjh1/VTK,sankhesh/VTK,jeffbaumes/jeffbaumes-vtk,msmolens/VTK,Wuteyan/VTK,keithroe/vtkoptix,sumedhasingla/VTK,gram526/VTK,cjh1/VTK,jmerkow/VTK,SimVascular/VTK,msmolens/VTK
|
ENH: Add a simple selection example.
|
from vtk import *
from vtk import *
source = vtkRandomGraphSource()
source.SetNumberOfVertices(25)
source.SetStartWithTree(True)
source.SetIncludeEdgeWeights(True)
view1 = vtkGraphLayoutView()
view1.AddRepresentationFromInputConnection(source.GetOutputPort())
view1.SetColorVertices(True)
view1.SetEdgeColorArrayName("edge weight")
view1.SetColorEdges(True)
view1.SetLayoutStrategyToSimple2D()
view2 = vtkGraphLayoutView()
view2.AddRepresentationFromInputConnection(source.GetOutputPort())
view2.SetColorVertices(True)
view2.SetEdgeColorArrayName("edge weight")
view2.SetColorEdges(True)
view2.SetLayoutStrategyToTree()
# Create a annotation link and set both view to use it
annotationLink = vtkAnnotationLink()
view1.GetRepresentation(0).SetAnnotationLink(annotationLink)
view2.GetRepresentation(0).SetAnnotationLink(annotationLink)
updater = vtkViewUpdater()
updater.AddView(view1)
updater.AddView(view2)
theme = vtkViewTheme.CreateNeonTheme()
view1.ApplyViewTheme(theme)
view2.ApplyViewTheme(theme)
theme.FastDelete()
view1.GetRenderWindow().SetSize(600, 600)
view1.ResetCamera()
view1.Render()
view2.GetRenderWindow().SetSize(600, 600)
view2.ResetCamera()
view2.Render()
view1.GetInteractor().Start()
|
<commit_before><commit_msg>ENH: Add a simple selection example.<commit_after>
|
from vtk import *
from vtk import *
source = vtkRandomGraphSource()
source.SetNumberOfVertices(25)
source.SetStartWithTree(True)
source.SetIncludeEdgeWeights(True)
view1 = vtkGraphLayoutView()
view1.AddRepresentationFromInputConnection(source.GetOutputPort())
view1.SetColorVertices(True)
view1.SetEdgeColorArrayName("edge weight")
view1.SetColorEdges(True)
view1.SetLayoutStrategyToSimple2D()
view2 = vtkGraphLayoutView()
view2.AddRepresentationFromInputConnection(source.GetOutputPort())
view2.SetColorVertices(True)
view2.SetEdgeColorArrayName("edge weight")
view2.SetColorEdges(True)
view2.SetLayoutStrategyToTree()
# Create a annotation link and set both view to use it
annotationLink = vtkAnnotationLink()
view1.GetRepresentation(0).SetAnnotationLink(annotationLink)
view2.GetRepresentation(0).SetAnnotationLink(annotationLink)
updater = vtkViewUpdater()
updater.AddView(view1)
updater.AddView(view2)
theme = vtkViewTheme.CreateNeonTheme()
view1.ApplyViewTheme(theme)
view2.ApplyViewTheme(theme)
theme.FastDelete()
view1.GetRenderWindow().SetSize(600, 600)
view1.ResetCamera()
view1.Render()
view2.GetRenderWindow().SetSize(600, 600)
view2.ResetCamera()
view2.Render()
view1.GetInteractor().Start()
|
ENH: Add a simple selection example.from vtk import *
from vtk import *
source = vtkRandomGraphSource()
source.SetNumberOfVertices(25)
source.SetStartWithTree(True)
source.SetIncludeEdgeWeights(True)
view1 = vtkGraphLayoutView()
view1.AddRepresentationFromInputConnection(source.GetOutputPort())
view1.SetColorVertices(True)
view1.SetEdgeColorArrayName("edge weight")
view1.SetColorEdges(True)
view1.SetLayoutStrategyToSimple2D()
view2 = vtkGraphLayoutView()
view2.AddRepresentationFromInputConnection(source.GetOutputPort())
view2.SetColorVertices(True)
view2.SetEdgeColorArrayName("edge weight")
view2.SetColorEdges(True)
view2.SetLayoutStrategyToTree()
# Create a annotation link and set both view to use it
annotationLink = vtkAnnotationLink()
view1.GetRepresentation(0).SetAnnotationLink(annotationLink)
view2.GetRepresentation(0).SetAnnotationLink(annotationLink)
updater = vtkViewUpdater()
updater.AddView(view1)
updater.AddView(view2)
theme = vtkViewTheme.CreateNeonTheme()
view1.ApplyViewTheme(theme)
view2.ApplyViewTheme(theme)
theme.FastDelete()
view1.GetRenderWindow().SetSize(600, 600)
view1.ResetCamera()
view1.Render()
view2.GetRenderWindow().SetSize(600, 600)
view2.ResetCamera()
view2.Render()
view1.GetInteractor().Start()
|
<commit_before><commit_msg>ENH: Add a simple selection example.<commit_after>from vtk import *
from vtk import *
source = vtkRandomGraphSource()
source.SetNumberOfVertices(25)
source.SetStartWithTree(True)
source.SetIncludeEdgeWeights(True)
view1 = vtkGraphLayoutView()
view1.AddRepresentationFromInputConnection(source.GetOutputPort())
view1.SetColorVertices(True)
view1.SetEdgeColorArrayName("edge weight")
view1.SetColorEdges(True)
view1.SetLayoutStrategyToSimple2D()
view2 = vtkGraphLayoutView()
view2.AddRepresentationFromInputConnection(source.GetOutputPort())
view2.SetColorVertices(True)
view2.SetEdgeColorArrayName("edge weight")
view2.SetColorEdges(True)
view2.SetLayoutStrategyToTree()
# Create a annotation link and set both view to use it
annotationLink = vtkAnnotationLink()
view1.GetRepresentation(0).SetAnnotationLink(annotationLink)
view2.GetRepresentation(0).SetAnnotationLink(annotationLink)
updater = vtkViewUpdater()
updater.AddView(view1)
updater.AddView(view2)
theme = vtkViewTheme.CreateNeonTheme()
view1.ApplyViewTheme(theme)
view2.ApplyViewTheme(theme)
theme.FastDelete()
view1.GetRenderWindow().SetSize(600, 600)
view1.ResetCamera()
view1.Render()
view2.GetRenderWindow().SetSize(600, 600)
view2.ResetCamera()
view2.Render()
view1.GetInteractor().Start()
|
|
3b8939567bc77407db0179d8e8e72551d1da3798
|
tests/test_cmds.py
|
tests/test_cmds.py
|
from . import TEST_DIR
from . import ccmtest
from ccmlib.cluster import Cluster
import subprocess
from six import print_
CLUSTER_PATH = TEST_DIR
class TestCCMCmd(ccmtest.Tester):
def __init__(self, *args, **kwargs):
ccmtest.Tester.__init__(self, *args, **kwargs)
class TestCCMCreate(TestCCMCmd):
def tearDown(self):
p = subprocess.Popen(['ccm', 'remove'])
p.wait()
def create_cmd(self, args=None, version='2.0.10'):
if args is None:
args = []
if version:
args = ['ccm', 'create', 'test', '-v', version] + args
else:
args = ['ccm', 'create', 'test'] + args
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def validate_output(self, process):
stdout, stderr = process.communicate()
try:
print_("[OUT] %s" % stdout)
self.assertEqual(len(stderr), 0)
except AssertionError:
print_("[ERROR] %s" % stderr.strip())
raise
def cluster_create_version_test(self):
self.validate_output(self.create_cmd())
def cluster_create_populate_test(self):
args = ['-n','3']
self.validate_output(self.create_cmd(args))
|
Add initial tests for ccm cli
|
Add initial tests for ccm cli
|
Python
|
apache-2.0
|
pcmanus/ccm,AtwooTM/ccm,jeffjirsa/ccm,thobbs/ccm,tolbertam/ccm,mike-tr-adamson/ccm,spodkowinski/ccm,slivne/ccm,mike-tr-adamson/ccm,thobbs/ccm,mambocab/ccm,mikefero/ccm,mike-tr-adamson/ccm,mikefero/ccm,bcantoni/ccm,jorgebay/ccm,pombredanne/ccm,kishkaru/ccm,pcmanus/ccm,umitunal/ccm,aboudreault/ccm,tolbertam/ccm,josh-mckenzie/ccm,bcantoni/ccm,pcmanus/ccm,unusedPhD/ccm
|
Add initial tests for ccm cli
|
from . import TEST_DIR
from . import ccmtest
from ccmlib.cluster import Cluster
import subprocess
from six import print_
CLUSTER_PATH = TEST_DIR
class TestCCMCmd(ccmtest.Tester):
def __init__(self, *args, **kwargs):
ccmtest.Tester.__init__(self, *args, **kwargs)
class TestCCMCreate(TestCCMCmd):
def tearDown(self):
p = subprocess.Popen(['ccm', 'remove'])
p.wait()
def create_cmd(self, args=None, version='2.0.10'):
if args is None:
args = []
if version:
args = ['ccm', 'create', 'test', '-v', version] + args
else:
args = ['ccm', 'create', 'test'] + args
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def validate_output(self, process):
stdout, stderr = process.communicate()
try:
print_("[OUT] %s" % stdout)
self.assertEqual(len(stderr), 0)
except AssertionError:
print_("[ERROR] %s" % stderr.strip())
raise
def cluster_create_version_test(self):
self.validate_output(self.create_cmd())
def cluster_create_populate_test(self):
args = ['-n','3']
self.validate_output(self.create_cmd(args))
|
<commit_before><commit_msg>Add initial tests for ccm cli<commit_after>
|
from . import TEST_DIR
from . import ccmtest
from ccmlib.cluster import Cluster
import subprocess
from six import print_
CLUSTER_PATH = TEST_DIR
class TestCCMCmd(ccmtest.Tester):
def __init__(self, *args, **kwargs):
ccmtest.Tester.__init__(self, *args, **kwargs)
class TestCCMCreate(TestCCMCmd):
def tearDown(self):
p = subprocess.Popen(['ccm', 'remove'])
p.wait()
def create_cmd(self, args=None, version='2.0.10'):
if args is None:
args = []
if version:
args = ['ccm', 'create', 'test', '-v', version] + args
else:
args = ['ccm', 'create', 'test'] + args
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def validate_output(self, process):
stdout, stderr = process.communicate()
try:
print_("[OUT] %s" % stdout)
self.assertEqual(len(stderr), 0)
except AssertionError:
print_("[ERROR] %s" % stderr.strip())
raise
def cluster_create_version_test(self):
self.validate_output(self.create_cmd())
def cluster_create_populate_test(self):
args = ['-n','3']
self.validate_output(self.create_cmd(args))
|
Add initial tests for ccm clifrom . import TEST_DIR
from . import ccmtest
from ccmlib.cluster import Cluster
import subprocess
from six import print_
CLUSTER_PATH = TEST_DIR
class TestCCMCmd(ccmtest.Tester):
def __init__(self, *args, **kwargs):
ccmtest.Tester.__init__(self, *args, **kwargs)
class TestCCMCreate(TestCCMCmd):
def tearDown(self):
p = subprocess.Popen(['ccm', 'remove'])
p.wait()
def create_cmd(self, args=None, version='2.0.10'):
if args is None:
args = []
if version:
args = ['ccm', 'create', 'test', '-v', version] + args
else:
args = ['ccm', 'create', 'test'] + args
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def validate_output(self, process):
stdout, stderr = process.communicate()
try:
print_("[OUT] %s" % stdout)
self.assertEqual(len(stderr), 0)
except AssertionError:
print_("[ERROR] %s" % stderr.strip())
raise
def cluster_create_version_test(self):
self.validate_output(self.create_cmd())
def cluster_create_populate_test(self):
args = ['-n','3']
self.validate_output(self.create_cmd(args))
|
<commit_before><commit_msg>Add initial tests for ccm cli<commit_after>from . import TEST_DIR
from . import ccmtest
from ccmlib.cluster import Cluster
import subprocess
from six import print_
CLUSTER_PATH = TEST_DIR
class TestCCMCmd(ccmtest.Tester):
def __init__(self, *args, **kwargs):
ccmtest.Tester.__init__(self, *args, **kwargs)
class TestCCMCreate(TestCCMCmd):
def tearDown(self):
p = subprocess.Popen(['ccm', 'remove'])
p.wait()
def create_cmd(self, args=None, version='2.0.10'):
if args is None:
args = []
if version:
args = ['ccm', 'create', 'test', '-v', version] + args
else:
args = ['ccm', 'create', 'test'] + args
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def validate_output(self, process):
stdout, stderr = process.communicate()
try:
print_("[OUT] %s" % stdout)
self.assertEqual(len(stderr), 0)
except AssertionError:
print_("[ERROR] %s" % stderr.strip())
raise
def cluster_create_version_test(self):
self.validate_output(self.create_cmd())
def cluster_create_populate_test(self):
args = ['-n','3']
self.validate_output(self.create_cmd(args))
|
|
6e778406266f6b44c5fe79d7e3f64aeeda29fe15
|
src/commons/transaction.py
|
src/commons/transaction.py
|
from django.db.transaction import is_dirty, leave_transaction_management, rollback, commit, enter_transaction_management, managed
from django.db import DEFAULT_DB_ALIAS
from django.http import HttpResponse
def commit_on_http_success(func, using=None):
"""
This decorator activates db commit on HTTP success response. This way, if the
view function return a success reponse, a commit is made; if the viewfunc
produces an exception or return an error response, a rollback is made.
"""
if using is None:
using = DEFAULT_DB_ALIAS
def wrapped_func(*args, **kwargs):
enter_transaction_management(using=using)
managed(True, using=using)
try:
res = func(*args, **kwargs)
except:
if is_dirty(using=using):
rollback(using=using)
raise
else:
if is_dirty(using=using):
if not isinstance(res, HttpResponse) or res.status_code > 200 or res.status_code < 200:
rollback(using=using)
else:
try:
commit(using=using)
except:
rollback(using=using)
raise
leave_transaction_management(using=using)
return res
return wrapped_func
|
Add a new decorator: commit_on_http_success.
|
Add a new decorator: commit_on_http_success.
|
Python
|
agpl-3.0
|
rockneurotiko/wirecloud,rockneurotiko/wirecloud,jpajuelo/wirecloud,jpajuelo/wirecloud,rockneurotiko/wirecloud,jpajuelo/wirecloud,rockneurotiko/wirecloud,jpajuelo/wirecloud
|
Add a new decorator: commit_on_http_success.
|
from django.db.transaction import is_dirty, leave_transaction_management, rollback, commit, enter_transaction_management, managed
from django.db import DEFAULT_DB_ALIAS
from django.http import HttpResponse
def commit_on_http_success(func, using=None):
"""
This decorator activates db commit on HTTP success response. This way, if the
view function return a success reponse, a commit is made; if the viewfunc
produces an exception or return an error response, a rollback is made.
"""
if using is None:
using = DEFAULT_DB_ALIAS
def wrapped_func(*args, **kwargs):
enter_transaction_management(using=using)
managed(True, using=using)
try:
res = func(*args, **kwargs)
except:
if is_dirty(using=using):
rollback(using=using)
raise
else:
if is_dirty(using=using):
if not isinstance(res, HttpResponse) or res.status_code > 200 or res.status_code < 200:
rollback(using=using)
else:
try:
commit(using=using)
except:
rollback(using=using)
raise
leave_transaction_management(using=using)
return res
return wrapped_func
|
<commit_before><commit_msg>Add a new decorator: commit_on_http_success.<commit_after>
|
from django.db.transaction import is_dirty, leave_transaction_management, rollback, commit, enter_transaction_management, managed
from django.db import DEFAULT_DB_ALIAS
from django.http import HttpResponse
def commit_on_http_success(func, using=None):
"""
This decorator activates db commit on HTTP success response. This way, if the
view function return a success reponse, a commit is made; if the viewfunc
produces an exception or return an error response, a rollback is made.
"""
if using is None:
using = DEFAULT_DB_ALIAS
def wrapped_func(*args, **kwargs):
enter_transaction_management(using=using)
managed(True, using=using)
try:
res = func(*args, **kwargs)
except:
if is_dirty(using=using):
rollback(using=using)
raise
else:
if is_dirty(using=using):
if not isinstance(res, HttpResponse) or res.status_code > 200 or res.status_code < 200:
rollback(using=using)
else:
try:
commit(using=using)
except:
rollback(using=using)
raise
leave_transaction_management(using=using)
return res
return wrapped_func
|
Add a new decorator: commit_on_http_success.from django.db.transaction import is_dirty, leave_transaction_management, rollback, commit, enter_transaction_management, managed
from django.db import DEFAULT_DB_ALIAS
from django.http import HttpResponse
def commit_on_http_success(func, using=None):
"""
This decorator activates db commit on HTTP success response. This way, if the
view function return a success reponse, a commit is made; if the viewfunc
produces an exception or return an error response, a rollback is made.
"""
if using is None:
using = DEFAULT_DB_ALIAS
def wrapped_func(*args, **kwargs):
enter_transaction_management(using=using)
managed(True, using=using)
try:
res = func(*args, **kwargs)
except:
if is_dirty(using=using):
rollback(using=using)
raise
else:
if is_dirty(using=using):
if not isinstance(res, HttpResponse) or res.status_code > 200 or res.status_code < 200:
rollback(using=using)
else:
try:
commit(using=using)
except:
rollback(using=using)
raise
leave_transaction_management(using=using)
return res
return wrapped_func
|
<commit_before><commit_msg>Add a new decorator: commit_on_http_success.<commit_after>from django.db.transaction import is_dirty, leave_transaction_management, rollback, commit, enter_transaction_management, managed
from django.db import DEFAULT_DB_ALIAS
from django.http import HttpResponse
def commit_on_http_success(func, using=None):
"""
This decorator activates db commit on HTTP success response. This way, if the
view function return a success reponse, a commit is made; if the viewfunc
produces an exception or return an error response, a rollback is made.
"""
if using is None:
using = DEFAULT_DB_ALIAS
def wrapped_func(*args, **kwargs):
enter_transaction_management(using=using)
managed(True, using=using)
try:
res = func(*args, **kwargs)
except:
if is_dirty(using=using):
rollback(using=using)
raise
else:
if is_dirty(using=using):
if not isinstance(res, HttpResponse) or res.status_code > 200 or res.status_code < 200:
rollback(using=using)
else:
try:
commit(using=using)
except:
rollback(using=using)
raise
leave_transaction_management(using=using)
return res
return wrapped_func
|
|
5e92d9b7e339e1de1f807d728e9bd4c9b7d92010
|
temba/contacts/migrations/0024_unblock_contacts_imported_again_after_being_blocked.py
|
temba/contacts/migrations/0024_unblock_contacts_imported_again_after_being_blocked.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Count, Q
def unblock_contacts_imported_again(apps, schema_editor):
Contact = apps.get_model('contacts', 'Contact')
blocked_contacts = Contact.objects.filter(is_blocked=True, is_test=False).annotate(group_count=Count('all_groups'))
reimported_contacts = blocked_contacts.filter(Q(group_count__gt=1) | Q(group_count__lt=1))
updated = reimported_contacts.update(is_blocked=False)
if updated:
print "Fixed %d contacts that are blocked and has another group" % updated
class Migration(migrations.Migration):
dependencies = [
('contacts', '0023_remove_test_contacts_from_sys_groups'),
]
operations = [
migrations.RunPython(unblock_contacts_imported_again)
]
|
Add migrations to fix reimported blocked contacts
|
Add migrations to fix reimported blocked contacts
|
Python
|
agpl-3.0
|
reyrodrigues/EU-SMS,pulilab/rapidpro,praekelt/rapidpro,tsotetsi/textily-web,reyrodrigues/EU-SMS,praekelt/rapidpro,ewheeler/rapidpro,ewheeler/rapidpro,tsotetsi/textily-web,pulilab/rapidpro,ewheeler/rapidpro,tsotetsi/textily-web,praekelt/rapidpro,tsotetsi/textily-web,pulilab/rapidpro,ewheeler/rapidpro,pulilab/rapidpro,pulilab/rapidpro,praekelt/rapidpro,tsotetsi/textily-web,reyrodrigues/EU-SMS
|
Add migrations to fix reimported blocked contacts
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Count, Q
def unblock_contacts_imported_again(apps, schema_editor):
Contact = apps.get_model('contacts', 'Contact')
blocked_contacts = Contact.objects.filter(is_blocked=True, is_test=False).annotate(group_count=Count('all_groups'))
reimported_contacts = blocked_contacts.filter(Q(group_count__gt=1) | Q(group_count__lt=1))
updated = reimported_contacts.update(is_blocked=False)
if updated:
print "Fixed %d contacts that are blocked and has another group" % updated
class Migration(migrations.Migration):
dependencies = [
('contacts', '0023_remove_test_contacts_from_sys_groups'),
]
operations = [
migrations.RunPython(unblock_contacts_imported_again)
]
|
<commit_before><commit_msg>Add migrations to fix reimported blocked contacts<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Count, Q
def unblock_contacts_imported_again(apps, schema_editor):
Contact = apps.get_model('contacts', 'Contact')
blocked_contacts = Contact.objects.filter(is_blocked=True, is_test=False).annotate(group_count=Count('all_groups'))
reimported_contacts = blocked_contacts.filter(Q(group_count__gt=1) | Q(group_count__lt=1))
updated = reimported_contacts.update(is_blocked=False)
if updated:
print "Fixed %d contacts that are blocked and has another group" % updated
class Migration(migrations.Migration):
dependencies = [
('contacts', '0023_remove_test_contacts_from_sys_groups'),
]
operations = [
migrations.RunPython(unblock_contacts_imported_again)
]
|
Add migrations to fix reimported blocked contacts# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Count, Q
def unblock_contacts_imported_again(apps, schema_editor):
Contact = apps.get_model('contacts', 'Contact')
blocked_contacts = Contact.objects.filter(is_blocked=True, is_test=False).annotate(group_count=Count('all_groups'))
reimported_contacts = blocked_contacts.filter(Q(group_count__gt=1) | Q(group_count__lt=1))
updated = reimported_contacts.update(is_blocked=False)
if updated:
print "Fixed %d contacts that are blocked and has another group" % updated
class Migration(migrations.Migration):
dependencies = [
('contacts', '0023_remove_test_contacts_from_sys_groups'),
]
operations = [
migrations.RunPython(unblock_contacts_imported_again)
]
|
<commit_before><commit_msg>Add migrations to fix reimported blocked contacts<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Count, Q
def unblock_contacts_imported_again(apps, schema_editor):
Contact = apps.get_model('contacts', 'Contact')
blocked_contacts = Contact.objects.filter(is_blocked=True, is_test=False).annotate(group_count=Count('all_groups'))
reimported_contacts = blocked_contacts.filter(Q(group_count__gt=1) | Q(group_count__lt=1))
updated = reimported_contacts.update(is_blocked=False)
if updated:
print "Fixed %d contacts that are blocked and has another group" % updated
class Migration(migrations.Migration):
dependencies = [
('contacts', '0023_remove_test_contacts_from_sys_groups'),
]
operations = [
migrations.RunPython(unblock_contacts_imported_again)
]
|
|
280f6fa36a204aa170eff3339e2c2fd7d4943491
|
stepik/Programming_on_Python/part1/1.4.shape_area.py
|
stepik/Programming_on_Python/part1/1.4.shape_area.py
|
#!/usr/bin/env python3
def main():
shape = input()
if shape == 'ΠΏΡΡΠΌΠΎΡΠ³ΠΎΠ»ΡΠ½ΠΈΠΊ':
a, b = int(input()), int(input())
print('{}'.format(a * b))
if shape == 'ΡΡΠ΅ΡΠ³ΠΎΠ»ΡΠ½ΠΈΠΊ':
a, b, c = int(input()), int(input()), int(input())
p = (a + b + c) / 2
print('{}'.format((p * (p - a) * (p - b) * (p - c)) ** (1/2)))
if shape == 'ΠΊΡΡΠ³':
r = int(input())
pi = 3.14
print('{}'.format(pi * r ** 2))
if __name__ == '__main__':
main()
|
Add calc area of shapes
|
Add calc area of shapes
|
Python
|
apache-2.0
|
fedusia/python
|
Add calc area of shapes
|
#!/usr/bin/env python3
def main():
shape = input()
if shape == 'ΠΏΡΡΠΌΠΎΡΠ³ΠΎΠ»ΡΠ½ΠΈΠΊ':
a, b = int(input()), int(input())
print('{}'.format(a * b))
if shape == 'ΡΡΠ΅ΡΠ³ΠΎΠ»ΡΠ½ΠΈΠΊ':
a, b, c = int(input()), int(input()), int(input())
p = (a + b + c) / 2
print('{}'.format((p * (p - a) * (p - b) * (p - c)) ** (1/2)))
if shape == 'ΠΊΡΡΠ³':
r = int(input())
pi = 3.14
print('{}'.format(pi * r ** 2))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add calc area of shapes<commit_after>
|
#!/usr/bin/env python3
def main():
shape = input()
if shape == 'ΠΏΡΡΠΌΠΎΡΠ³ΠΎΠ»ΡΠ½ΠΈΠΊ':
a, b = int(input()), int(input())
print('{}'.format(a * b))
if shape == 'ΡΡΠ΅ΡΠ³ΠΎΠ»ΡΠ½ΠΈΠΊ':
a, b, c = int(input()), int(input()), int(input())
p = (a + b + c) / 2
print('{}'.format((p * (p - a) * (p - b) * (p - c)) ** (1/2)))
if shape == 'ΠΊΡΡΠ³':
r = int(input())
pi = 3.14
print('{}'.format(pi * r ** 2))
if __name__ == '__main__':
main()
|
Add calc area of shapes#!/usr/bin/env python3
def main():
shape = input()
if shape == 'ΠΏΡΡΠΌΠΎΡΠ³ΠΎΠ»ΡΠ½ΠΈΠΊ':
a, b = int(input()), int(input())
print('{}'.format(a * b))
if shape == 'ΡΡΠ΅ΡΠ³ΠΎΠ»ΡΠ½ΠΈΠΊ':
a, b, c = int(input()), int(input()), int(input())
p = (a + b + c) / 2
print('{}'.format((p * (p - a) * (p - b) * (p - c)) ** (1/2)))
if shape == 'ΠΊΡΡΠ³':
r = int(input())
pi = 3.14
print('{}'.format(pi * r ** 2))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add calc area of shapes<commit_after>#!/usr/bin/env python3
def main():
shape = input()
if shape == 'ΠΏΡΡΠΌΠΎΡΠ³ΠΎΠ»ΡΠ½ΠΈΠΊ':
a, b = int(input()), int(input())
print('{}'.format(a * b))
if shape == 'ΡΡΠ΅ΡΠ³ΠΎΠ»ΡΠ½ΠΈΠΊ':
a, b, c = int(input()), int(input()), int(input())
p = (a + b + c) / 2
print('{}'.format((p * (p - a) * (p - b) * (p - c)) ** (1/2)))
if shape == 'ΠΊΡΡΠ³':
r = int(input())
pi = 3.14
print('{}'.format(pi * r ** 2))
if __name__ == '__main__':
main()
|
|
8d0e213213c1f1cb295373f8f50f2215564cd8d4
|
build/run_tests.py
|
build/run_tests.py
|
#!/usr/bin/env python
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
script_dir = os.path.dirname(__file__)
crashpad_dir = os.path.dirname(script_dir) if script_dir is not '' else '..'
# This script is primarily used from the waterfall so that the list of tests
# that are run is maintained in-tree, rather than in a separate infrastructure
# location in the recipe.
def main(args):
if len(args) != 1:
print >>sys.stderr, 'usage: run_tests.py {Debug|Release}'
return 1;
binary_dir = os.path.join(crashpad_dir, 'out', args[0])
tests = [
'client_test',
'minidump_test',
'snapshot_test',
'util_test',
]
for test in tests:
print '-' * 80
print test
print '-' * 80
subprocess.check_call(os.path.join(binary_dir, test))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Add test runner script for integration with the waterfall
|
Add test runner script for integration with the waterfall
R=mark@chromium.org
Review URL: https://codereview.chromium.org/800983002
|
Python
|
apache-2.0
|
Chilledheart/crashpad,hankbao/Crashpad,hokein/crashpad,Chilledheart/crashpad,chromium/crashpad,hokein/crashpad,hankbao/Crashpad,chromium/crashpad,chromium/crashpad,hankbao/Crashpad,atom/crashpad,atom/crashpad,hokein/crashpad,Chilledheart/crashpad,atom/crashpad
|
Add test runner script for integration with the waterfall
R=mark@chromium.org
Review URL: https://codereview.chromium.org/800983002
|
#!/usr/bin/env python
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
script_dir = os.path.dirname(__file__)
crashpad_dir = os.path.dirname(script_dir) if script_dir is not '' else '..'
# This script is primarily used from the waterfall so that the list of tests
# that are run is maintained in-tree, rather than in a separate infrastructure
# location in the recipe.
def main(args):
if len(args) != 1:
print >>sys.stderr, 'usage: run_tests.py {Debug|Release}'
return 1;
binary_dir = os.path.join(crashpad_dir, 'out', args[0])
tests = [
'client_test',
'minidump_test',
'snapshot_test',
'util_test',
]
for test in tests:
print '-' * 80
print test
print '-' * 80
subprocess.check_call(os.path.join(binary_dir, test))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>Add test runner script for integration with the waterfall
R=mark@chromium.org
Review URL: https://codereview.chromium.org/800983002<commit_after>
|
#!/usr/bin/env python
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
script_dir = os.path.dirname(__file__)
crashpad_dir = os.path.dirname(script_dir) if script_dir is not '' else '..'
# This script is primarily used from the waterfall so that the list of tests
# that are run is maintained in-tree, rather than in a separate infrastructure
# location in the recipe.
def main(args):
if len(args) != 1:
print >>sys.stderr, 'usage: run_tests.py {Debug|Release}'
return 1;
binary_dir = os.path.join(crashpad_dir, 'out', args[0])
tests = [
'client_test',
'minidump_test',
'snapshot_test',
'util_test',
]
for test in tests:
print '-' * 80
print test
print '-' * 80
subprocess.check_call(os.path.join(binary_dir, test))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Add test runner script for integration with the waterfall
R=mark@chromium.org
Review URL: https://codereview.chromium.org/800983002#!/usr/bin/env python
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
script_dir = os.path.dirname(__file__)
crashpad_dir = os.path.dirname(script_dir) if script_dir is not '' else '..'
# This script is primarily used from the waterfall so that the list of tests
# that are run is maintained in-tree, rather than in a separate infrastructure
# location in the recipe.
def main(args):
if len(args) != 1:
print >>sys.stderr, 'usage: run_tests.py {Debug|Release}'
return 1;
binary_dir = os.path.join(crashpad_dir, 'out', args[0])
tests = [
'client_test',
'minidump_test',
'snapshot_test',
'util_test',
]
for test in tests:
print '-' * 80
print test
print '-' * 80
subprocess.check_call(os.path.join(binary_dir, test))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>Add test runner script for integration with the waterfall
R=mark@chromium.org
Review URL: https://codereview.chromium.org/800983002<commit_after>#!/usr/bin/env python
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
script_dir = os.path.dirname(__file__)
crashpad_dir = os.path.dirname(script_dir) if script_dir is not '' else '..'
# This script is primarily used from the waterfall so that the list of tests
# that are run is maintained in-tree, rather than in a separate infrastructure
# location in the recipe.
def main(args):
if len(args) != 1:
print >>sys.stderr, 'usage: run_tests.py {Debug|Release}'
return 1;
binary_dir = os.path.join(crashpad_dir, 'out', args[0])
tests = [
'client_test',
'minidump_test',
'snapshot_test',
'util_test',
]
for test in tests:
print '-' * 80
print test
print '-' * 80
subprocess.check_call(os.path.join(binary_dir, test))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
172c47ae803691a36d52496f6de74b82707a77f4
|
tests/test_conversions.py
|
tests/test_conversions.py
|
from tomso.adipls import load_amdl
from tomso.fgong import load_fgong
from tomso.gyre import load_gyre
from math import frexp
import unittest
# We should be able to make various round trips between different
# format and preserve the data relevant for adiabatic oscillations.
# Conversion to AMDL might modify the data because we mimic ADIPLS's
# own FGONG-to-AMDL script.
scalars = ['R', 'M']
vectors = ['r', 'q', 'm', 'rho', 'P', 'Gamma_1', 'N2']
class TestConversionFunctions(unittest.TestCase):
def compare_floats(self, x, y, attr='', index=0, places=12):
self.assertAlmostEqual(frexp(x)[0], frexp(y)[0], places=places,
msg='%s at k=%i' % (attr, index))
def compare_models(self, m0, m1):
self.assertEqual(len(m0), len(m1))
for attr in scalars:
self.compare_floats(getattr(m0, attr), getattr(m1, attr),
attr=attr)
for attr in vectors:
x0 = getattr(m0, attr)
x1 = getattr(m1, attr)
for i in range(len(m0)):
self.compare_floats(x0[i], x1[i], attr=attr, index=i)
def test_fgong_to_fgong(self):
f = load_fgong('data/modelS.fgong', return_object=True)
self.compare_models(f, f.to_gyre().to_fgong())
def test_gyre_to_gyre(self):
g = load_gyre('data/mesa.gyre', return_object=True)
self.compare_models(g, g.to_fgong().to_gyre())
def test_amdl_to_amdl(self):
a = load_amdl('data/modelS.amdl', return_object=True)
self.compare_models(a, a.to_fgong().to_amdl())
self.compare_models(a, a.to_gyre().to_amdl())
self.compare_models(a, a.to_fgong().to_gyre().to_amdl())
self.compare_models(a, a.to_gyre().to_fgong().to_amdl())
if __name__ == '__main__':
unittest.main()
|
Add test for object-based conversions between various stellar model formats, including via other formats
|
Add test for object-based conversions between various stellar model formats, including via other formats
|
Python
|
mit
|
warrickball/tomso
|
Add test for object-based conversions between various stellar model formats, including via other formats
|
from tomso.adipls import load_amdl
from tomso.fgong import load_fgong
from tomso.gyre import load_gyre
from math import frexp
import unittest
# We should be able to make various round trips between different
# format and preserve the data relevant for adiabatic oscillations.
# Conversion to AMDL might modify the data because we mimic ADIPLS's
# own FGONG-to-AMDL script.
scalars = ['R', 'M']
vectors = ['r', 'q', 'm', 'rho', 'P', 'Gamma_1', 'N2']
class TestConversionFunctions(unittest.TestCase):
def compare_floats(self, x, y, attr='', index=0, places=12):
self.assertAlmostEqual(frexp(x)[0], frexp(y)[0], places=places,
msg='%s at k=%i' % (attr, index))
def compare_models(self, m0, m1):
self.assertEqual(len(m0), len(m1))
for attr in scalars:
self.compare_floats(getattr(m0, attr), getattr(m1, attr),
attr=attr)
for attr in vectors:
x0 = getattr(m0, attr)
x1 = getattr(m1, attr)
for i in range(len(m0)):
self.compare_floats(x0[i], x1[i], attr=attr, index=i)
def test_fgong_to_fgong(self):
f = load_fgong('data/modelS.fgong', return_object=True)
self.compare_models(f, f.to_gyre().to_fgong())
def test_gyre_to_gyre(self):
g = load_gyre('data/mesa.gyre', return_object=True)
self.compare_models(g, g.to_fgong().to_gyre())
def test_amdl_to_amdl(self):
a = load_amdl('data/modelS.amdl', return_object=True)
self.compare_models(a, a.to_fgong().to_amdl())
self.compare_models(a, a.to_gyre().to_amdl())
self.compare_models(a, a.to_fgong().to_gyre().to_amdl())
self.compare_models(a, a.to_gyre().to_fgong().to_amdl())
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for object-based conversions between various stellar model formats, including via other formats<commit_after>
|
from tomso.adipls import load_amdl
from tomso.fgong import load_fgong
from tomso.gyre import load_gyre
from math import frexp
import unittest
# We should be able to make various round trips between different
# format and preserve the data relevant for adiabatic oscillations.
# Conversion to AMDL might modify the data because we mimic ADIPLS's
# own FGONG-to-AMDL script.
scalars = ['R', 'M']
vectors = ['r', 'q', 'm', 'rho', 'P', 'Gamma_1', 'N2']
class TestConversionFunctions(unittest.TestCase):
def compare_floats(self, x, y, attr='', index=0, places=12):
self.assertAlmostEqual(frexp(x)[0], frexp(y)[0], places=places,
msg='%s at k=%i' % (attr, index))
def compare_models(self, m0, m1):
self.assertEqual(len(m0), len(m1))
for attr in scalars:
self.compare_floats(getattr(m0, attr), getattr(m1, attr),
attr=attr)
for attr in vectors:
x0 = getattr(m0, attr)
x1 = getattr(m1, attr)
for i in range(len(m0)):
self.compare_floats(x0[i], x1[i], attr=attr, index=i)
def test_fgong_to_fgong(self):
f = load_fgong('data/modelS.fgong', return_object=True)
self.compare_models(f, f.to_gyre().to_fgong())
def test_gyre_to_gyre(self):
g = load_gyre('data/mesa.gyre', return_object=True)
self.compare_models(g, g.to_fgong().to_gyre())
def test_amdl_to_amdl(self):
a = load_amdl('data/modelS.amdl', return_object=True)
self.compare_models(a, a.to_fgong().to_amdl())
self.compare_models(a, a.to_gyre().to_amdl())
self.compare_models(a, a.to_fgong().to_gyre().to_amdl())
self.compare_models(a, a.to_gyre().to_fgong().to_amdl())
if __name__ == '__main__':
unittest.main()
|
Add test for object-based conversions between various stellar model formats, including via other formatsfrom tomso.adipls import load_amdl
from tomso.fgong import load_fgong
from tomso.gyre import load_gyre
from math import frexp
import unittest
# We should be able to make various round trips between different
# format and preserve the data relevant for adiabatic oscillations.
# Conversion to AMDL might modify the data because we mimic ADIPLS's
# own FGONG-to-AMDL script.
scalars = ['R', 'M']
vectors = ['r', 'q', 'm', 'rho', 'P', 'Gamma_1', 'N2']
class TestConversionFunctions(unittest.TestCase):
def compare_floats(self, x, y, attr='', index=0, places=12):
self.assertAlmostEqual(frexp(x)[0], frexp(y)[0], places=places,
msg='%s at k=%i' % (attr, index))
def compare_models(self, m0, m1):
self.assertEqual(len(m0), len(m1))
for attr in scalars:
self.compare_floats(getattr(m0, attr), getattr(m1, attr),
attr=attr)
for attr in vectors:
x0 = getattr(m0, attr)
x1 = getattr(m1, attr)
for i in range(len(m0)):
self.compare_floats(x0[i], x1[i], attr=attr, index=i)
def test_fgong_to_fgong(self):
f = load_fgong('data/modelS.fgong', return_object=True)
self.compare_models(f, f.to_gyre().to_fgong())
def test_gyre_to_gyre(self):
g = load_gyre('data/mesa.gyre', return_object=True)
self.compare_models(g, g.to_fgong().to_gyre())
def test_amdl_to_amdl(self):
a = load_amdl('data/modelS.amdl', return_object=True)
self.compare_models(a, a.to_fgong().to_amdl())
self.compare_models(a, a.to_gyre().to_amdl())
self.compare_models(a, a.to_fgong().to_gyre().to_amdl())
self.compare_models(a, a.to_gyre().to_fgong().to_amdl())
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for object-based conversions between various stellar model formats, including via other formats<commit_after>from tomso.adipls import load_amdl
from tomso.fgong import load_fgong
from tomso.gyre import load_gyre
from math import frexp
import unittest
# We should be able to make various round trips between different
# format and preserve the data relevant for adiabatic oscillations.
# Conversion to AMDL might modify the data because we mimic ADIPLS's
# own FGONG-to-AMDL script.
scalars = ['R', 'M']
vectors = ['r', 'q', 'm', 'rho', 'P', 'Gamma_1', 'N2']
class TestConversionFunctions(unittest.TestCase):
def compare_floats(self, x, y, attr='', index=0, places=12):
self.assertAlmostEqual(frexp(x)[0], frexp(y)[0], places=places,
msg='%s at k=%i' % (attr, index))
def compare_models(self, m0, m1):
self.assertEqual(len(m0), len(m1))
for attr in scalars:
self.compare_floats(getattr(m0, attr), getattr(m1, attr),
attr=attr)
for attr in vectors:
x0 = getattr(m0, attr)
x1 = getattr(m1, attr)
for i in range(len(m0)):
self.compare_floats(x0[i], x1[i], attr=attr, index=i)
def test_fgong_to_fgong(self):
f = load_fgong('data/modelS.fgong', return_object=True)
self.compare_models(f, f.to_gyre().to_fgong())
def test_gyre_to_gyre(self):
g = load_gyre('data/mesa.gyre', return_object=True)
self.compare_models(g, g.to_fgong().to_gyre())
def test_amdl_to_amdl(self):
a = load_amdl('data/modelS.amdl', return_object=True)
self.compare_models(a, a.to_fgong().to_amdl())
self.compare_models(a, a.to_gyre().to_amdl())
self.compare_models(a, a.to_fgong().to_gyre().to_amdl())
self.compare_models(a, a.to_gyre().to_fgong().to_amdl())
if __name__ == '__main__':
unittest.main()
|
|
6fa314f29d1036a89e43e8a1c7152911f0ffe430
|
tests/test_geolocation.py
|
tests/test_geolocation.py
|
from . import TheInternetTestCase
from helium.api import click, get_driver, S
class AbTestingTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/geolocation"
def test_fake_geolocation(self):
get_driver().execute_script(
'window.navigator.geolocation.getCurrentPosition = '
'function(success){ '
'var position = {"coords" : { '
'"latitude": "1", '
'"longitude": "2"'
'}'
'}; '
'success(position);}'
)
click('Where am I?')
latitude = S("#lat-value").web_element.text
self.assertEqual(latitude, u"1")
longitude = S("#long-value").web_element.text
self.assertEqual(longitude, u"2")
|
Add test case for geolocation.
|
Add test case for geolocation.
|
Python
|
mit
|
bugfree-software/the-internet-solution-python
|
Add test case for geolocation.
|
from . import TheInternetTestCase
from helium.api import click, get_driver, S
class AbTestingTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/geolocation"
def test_fake_geolocation(self):
get_driver().execute_script(
'window.navigator.geolocation.getCurrentPosition = '
'function(success){ '
'var position = {"coords" : { '
'"latitude": "1", '
'"longitude": "2"'
'}'
'}; '
'success(position);}'
)
click('Where am I?')
latitude = S("#lat-value").web_element.text
self.assertEqual(latitude, u"1")
longitude = S("#long-value").web_element.text
self.assertEqual(longitude, u"2")
|
<commit_before><commit_msg>Add test case for geolocation.<commit_after>
|
from . import TheInternetTestCase
from helium.api import click, get_driver, S
class AbTestingTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/geolocation"
def test_fake_geolocation(self):
get_driver().execute_script(
'window.navigator.geolocation.getCurrentPosition = '
'function(success){ '
'var position = {"coords" : { '
'"latitude": "1", '
'"longitude": "2"'
'}'
'}; '
'success(position);}'
)
click('Where am I?')
latitude = S("#lat-value").web_element.text
self.assertEqual(latitude, u"1")
longitude = S("#long-value").web_element.text
self.assertEqual(longitude, u"2")
|
Add test case for geolocation.from . import TheInternetTestCase
from helium.api import click, get_driver, S
class AbTestingTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/geolocation"
def test_fake_geolocation(self):
get_driver().execute_script(
'window.navigator.geolocation.getCurrentPosition = '
'function(success){ '
'var position = {"coords" : { '
'"latitude": "1", '
'"longitude": "2"'
'}'
'}; '
'success(position);}'
)
click('Where am I?')
latitude = S("#lat-value").web_element.text
self.assertEqual(latitude, u"1")
longitude = S("#long-value").web_element.text
self.assertEqual(longitude, u"2")
|
<commit_before><commit_msg>Add test case for geolocation.<commit_after>from . import TheInternetTestCase
from helium.api import click, get_driver, S
class AbTestingTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/geolocation"
def test_fake_geolocation(self):
get_driver().execute_script(
'window.navigator.geolocation.getCurrentPosition = '
'function(success){ '
'var position = {"coords" : { '
'"latitude": "1", '
'"longitude": "2"'
'}'
'}; '
'success(position);}'
)
click('Where am I?')
latitude = S("#lat-value").web_element.text
self.assertEqual(latitude, u"1")
longitude = S("#long-value").web_element.text
self.assertEqual(longitude, u"2")
|
|
85aaa8d8ec3bc179729d4a3d731153406560dd2c
|
test/automl/test_models.py
|
test/automl/test_models.py
|
# -*- encoding: utf-8 -*-
from __future__ import print_function
import unittest
import mock
from autosklearn.automl import AutoML
from autosklearn.util.backend import Backend
class AutoMLStub(object):
def __init__(self):
self.__class__ = AutoML
class AutoMlModelsTest(unittest.TestCase):
def setUp(self):
self.automl = AutoMLStub()
self.automl._shared_mode = False
self.automl._seed = 42
self.automl._backend = mock.Mock(spec=Backend)
self.automl._delete_output_directories = lambda: 0
def test_only_loads_ensemble_models(self):
identifiers = [(1, 2), (3, 4)]
models = [ 42 ]
self.automl._backend.load_ensemble.return_value.identifiers_ \
= identifiers
self.automl._backend.load_models_by_identifiers.side_effect \
= lambda ids: models if ids is identifiers else None
self.automl._load_models()
self.assertEqual(models, self.automl.models_)
def test_loads_all_models_if_no_ensemble(self):
models = [ 42 ]
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.load_all_models.return_value = models
self.automl._load_models()
self.assertEqual(models, self.automl.models_)
def test_raises_if_no_models(self):
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.load_all_models.return_value = []
self.assertRaises(ValueError, self.automl._load_models)
|
Test AutoML usage of Backend to load models
|
Test AutoML usage of Backend to load models
|
Python
|
bsd-3-clause
|
automl/auto-sklearn,automl/auto-sklearn,hmendozap/auto-sklearn,hmendozap/auto-sklearn
|
Test AutoML usage of Backend to load models
|
# -*- encoding: utf-8 -*-
from __future__ import print_function
import unittest
import mock
from autosklearn.automl import AutoML
from autosklearn.util.backend import Backend
class AutoMLStub(object):
def __init__(self):
self.__class__ = AutoML
class AutoMlModelsTest(unittest.TestCase):
def setUp(self):
self.automl = AutoMLStub()
self.automl._shared_mode = False
self.automl._seed = 42
self.automl._backend = mock.Mock(spec=Backend)
self.automl._delete_output_directories = lambda: 0
def test_only_loads_ensemble_models(self):
identifiers = [(1, 2), (3, 4)]
models = [ 42 ]
self.automl._backend.load_ensemble.return_value.identifiers_ \
= identifiers
self.automl._backend.load_models_by_identifiers.side_effect \
= lambda ids: models if ids is identifiers else None
self.automl._load_models()
self.assertEqual(models, self.automl.models_)
def test_loads_all_models_if_no_ensemble(self):
models = [ 42 ]
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.load_all_models.return_value = models
self.automl._load_models()
self.assertEqual(models, self.automl.models_)
def test_raises_if_no_models(self):
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.load_all_models.return_value = []
self.assertRaises(ValueError, self.automl._load_models)
|
<commit_before><commit_msg>Test AutoML usage of Backend to load models<commit_after>
|
# -*- encoding: utf-8 -*-
from __future__ import print_function
import unittest
import mock
from autosklearn.automl import AutoML
from autosklearn.util.backend import Backend
class AutoMLStub(object):
def __init__(self):
self.__class__ = AutoML
class AutoMlModelsTest(unittest.TestCase):
def setUp(self):
self.automl = AutoMLStub()
self.automl._shared_mode = False
self.automl._seed = 42
self.automl._backend = mock.Mock(spec=Backend)
self.automl._delete_output_directories = lambda: 0
def test_only_loads_ensemble_models(self):
identifiers = [(1, 2), (3, 4)]
models = [ 42 ]
self.automl._backend.load_ensemble.return_value.identifiers_ \
= identifiers
self.automl._backend.load_models_by_identifiers.side_effect \
= lambda ids: models if ids is identifiers else None
self.automl._load_models()
self.assertEqual(models, self.automl.models_)
def test_loads_all_models_if_no_ensemble(self):
models = [ 42 ]
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.load_all_models.return_value = models
self.automl._load_models()
self.assertEqual(models, self.automl.models_)
def test_raises_if_no_models(self):
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.load_all_models.return_value = []
self.assertRaises(ValueError, self.automl._load_models)
|
Test AutoML usage of Backend to load models# -*- encoding: utf-8 -*-
from __future__ import print_function
import unittest
import mock
from autosklearn.automl import AutoML
from autosklearn.util.backend import Backend
class AutoMLStub(object):
def __init__(self):
self.__class__ = AutoML
class AutoMlModelsTest(unittest.TestCase):
def setUp(self):
self.automl = AutoMLStub()
self.automl._shared_mode = False
self.automl._seed = 42
self.automl._backend = mock.Mock(spec=Backend)
self.automl._delete_output_directories = lambda: 0
def test_only_loads_ensemble_models(self):
identifiers = [(1, 2), (3, 4)]
models = [ 42 ]
self.automl._backend.load_ensemble.return_value.identifiers_ \
= identifiers
self.automl._backend.load_models_by_identifiers.side_effect \
= lambda ids: models if ids is identifiers else None
self.automl._load_models()
self.assertEqual(models, self.automl.models_)
def test_loads_all_models_if_no_ensemble(self):
models = [ 42 ]
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.load_all_models.return_value = models
self.automl._load_models()
self.assertEqual(models, self.automl.models_)
def test_raises_if_no_models(self):
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.load_all_models.return_value = []
self.assertRaises(ValueError, self.automl._load_models)
|
<commit_before><commit_msg>Test AutoML usage of Backend to load models<commit_after># -*- encoding: utf-8 -*-
from __future__ import print_function
import unittest
import mock
from autosklearn.automl import AutoML
from autosklearn.util.backend import Backend
class AutoMLStub(object):
def __init__(self):
self.__class__ = AutoML
class AutoMlModelsTest(unittest.TestCase):
def setUp(self):
self.automl = AutoMLStub()
self.automl._shared_mode = False
self.automl._seed = 42
self.automl._backend = mock.Mock(spec=Backend)
self.automl._delete_output_directories = lambda: 0
def test_only_loads_ensemble_models(self):
identifiers = [(1, 2), (3, 4)]
models = [ 42 ]
self.automl._backend.load_ensemble.return_value.identifiers_ \
= identifiers
self.automl._backend.load_models_by_identifiers.side_effect \
= lambda ids: models if ids is identifiers else None
self.automl._load_models()
self.assertEqual(models, self.automl.models_)
def test_loads_all_models_if_no_ensemble(self):
models = [ 42 ]
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.load_all_models.return_value = models
self.automl._load_models()
self.assertEqual(models, self.automl.models_)
def test_raises_if_no_models(self):
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.load_all_models.return_value = []
self.assertRaises(ValueError, self.automl._load_models)
|
|
47afc368ab508a1b13f936e1cc8c0530dd8175ac
|
corehq/motech/migrations/0010_auto_20211124_1931.py
|
corehq/motech/migrations/0010_auto_20211124_1931.py
|
# Generated by Django 2.2.24 on 2021-11-24 19:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('motech', '0009_auto_20211122_2011'),
]
operations = [
migrations.AlterField(
model_name='connectionsettings',
name='auth_type',
field=models.CharField(
blank=True,
choices=[(None, 'None'), ('basic', 'HTTP Basic'), ('digest', 'HTTP Digest'),
('bearer', 'Bearer Token'), ('oauth1', 'OAuth1'),
('oauth2_pwd', 'OAuth 2.0 Password Grant'), ('oauth2_client', 'OAuth 2.0 Client Grant')],
max_length=16,
null=True
),
),
]
|
Add client grant to db choices
|
Add client grant to db choices
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add client grant to db choices
|
# Generated by Django 2.2.24 on 2021-11-24 19:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('motech', '0009_auto_20211122_2011'),
]
operations = [
migrations.AlterField(
model_name='connectionsettings',
name='auth_type',
field=models.CharField(
blank=True,
choices=[(None, 'None'), ('basic', 'HTTP Basic'), ('digest', 'HTTP Digest'),
('bearer', 'Bearer Token'), ('oauth1', 'OAuth1'),
('oauth2_pwd', 'OAuth 2.0 Password Grant'), ('oauth2_client', 'OAuth 2.0 Client Grant')],
max_length=16,
null=True
),
),
]
|
<commit_before><commit_msg>Add client grant to db choices<commit_after>
|
# Generated by Django 2.2.24 on 2021-11-24 19:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('motech', '0009_auto_20211122_2011'),
]
operations = [
migrations.AlterField(
model_name='connectionsettings',
name='auth_type',
field=models.CharField(
blank=True,
choices=[(None, 'None'), ('basic', 'HTTP Basic'), ('digest', 'HTTP Digest'),
('bearer', 'Bearer Token'), ('oauth1', 'OAuth1'),
('oauth2_pwd', 'OAuth 2.0 Password Grant'), ('oauth2_client', 'OAuth 2.0 Client Grant')],
max_length=16,
null=True
),
),
]
|
Add client grant to db choices# Generated by Django 2.2.24 on 2021-11-24 19:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('motech', '0009_auto_20211122_2011'),
]
operations = [
migrations.AlterField(
model_name='connectionsettings',
name='auth_type',
field=models.CharField(
blank=True,
choices=[(None, 'None'), ('basic', 'HTTP Basic'), ('digest', 'HTTP Digest'),
('bearer', 'Bearer Token'), ('oauth1', 'OAuth1'),
('oauth2_pwd', 'OAuth 2.0 Password Grant'), ('oauth2_client', 'OAuth 2.0 Client Grant')],
max_length=16,
null=True
),
),
]
|
<commit_before><commit_msg>Add client grant to db choices<commit_after># Generated by Django 2.2.24 on 2021-11-24 19:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('motech', '0009_auto_20211122_2011'),
]
operations = [
migrations.AlterField(
model_name='connectionsettings',
name='auth_type',
field=models.CharField(
blank=True,
choices=[(None, 'None'), ('basic', 'HTTP Basic'), ('digest', 'HTTP Digest'),
('bearer', 'Bearer Token'), ('oauth1', 'OAuth1'),
('oauth2_pwd', 'OAuth 2.0 Password Grant'), ('oauth2_client', 'OAuth 2.0 Client Grant')],
max_length=16,
null=True
),
),
]
|
|
75393b7ff34009aa5cb2993011107882941c949d
|
drudge/utils.py
|
drudge/utils.py
|
"""Small utilities."""
from collections.abc import Sequence
from sympy import sympify, Symbol, Expr, SympifyError
#
# SymPy utilities
# ---------------
#
def ensure_sympify(obj, role='', expected_type=None):
"""Sympify the given object with checking and error reporting.
This is a shallow wrapper over SymPy sympify function to have error
reporting in consistent style and an optional type checking.
"""
header = 'Invalid {}: '.format(role)
try:
sympified = sympify(obj, strict=True)
except SympifyError as exc:
raise TypeError(header, obj, 'failed to be simpified', exc.args)
if expected_type is not None and isinstance(sympified, expected_type):
raise TypeError(header, sympified, 'expecting', expected_type)
else:
return sympified
def ensure_symb(obj, role=''):
"""Sympify the given object into a symbol."""
return ensure_sympify(obj, role, Symbol)
def ensure_expr(obj, role=''):
"""Sympify the given object into an expression."""
return ensure_sympify(obj, role, Expr)
#
# Misc utilities
# --------------
#
def ensure_pair(obj, role):
"""Ensures that the given object is a pair."""
if not (isinstance(obj, Sequence) and len(obj) == 2):
raise TypeError('Invalid {}: '.format(role), obj, 'expecting pair')
return obj
|
Add utilities for checking inputs
|
Add utilities for checking inputs
These functions all uses a consistent style of error reporting, which
will be used throughout drudge.
|
Python
|
mit
|
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
|
Add utilities for checking inputs
These functions all uses a consistent style of error reporting, which
will be used throughout drudge.
|
"""Small utilities."""
from collections.abc import Sequence
from sympy import sympify, Symbol, Expr, SympifyError
#
# SymPy utilities
# ---------------
#
def ensure_sympify(obj, role='', expected_type=None):
"""Sympify the given object with checking and error reporting.
This is a shallow wrapper over SymPy sympify function to have error
reporting in consistent style and an optional type checking.
"""
header = 'Invalid {}: '.format(role)
try:
sympified = sympify(obj, strict=True)
except SympifyError as exc:
raise TypeError(header, obj, 'failed to be simpified', exc.args)
if expected_type is not None and isinstance(sympified, expected_type):
raise TypeError(header, sympified, 'expecting', expected_type)
else:
return sympified
def ensure_symb(obj, role=''):
"""Sympify the given object into a symbol."""
return ensure_sympify(obj, role, Symbol)
def ensure_expr(obj, role=''):
"""Sympify the given object into an expression."""
return ensure_sympify(obj, role, Expr)
#
# Misc utilities
# --------------
#
def ensure_pair(obj, role):
"""Ensures that the given object is a pair."""
if not (isinstance(obj, Sequence) and len(obj) == 2):
raise TypeError('Invalid {}: '.format(role), obj, 'expecting pair')
return obj
|
<commit_before><commit_msg>Add utilities for checking inputs
These functions all uses a consistent style of error reporting, which
will be used throughout drudge.<commit_after>
|
"""Small utilities."""
from collections.abc import Sequence
from sympy import sympify, Symbol, Expr, SympifyError
#
# SymPy utilities
# ---------------
#
def ensure_sympify(obj, role='', expected_type=None):
"""Sympify the given object with checking and error reporting.
This is a shallow wrapper over SymPy sympify function to have error
reporting in consistent style and an optional type checking.
"""
header = 'Invalid {}: '.format(role)
try:
sympified = sympify(obj, strict=True)
except SympifyError as exc:
raise TypeError(header, obj, 'failed to be simpified', exc.args)
if expected_type is not None and isinstance(sympified, expected_type):
raise TypeError(header, sympified, 'expecting', expected_type)
else:
return sympified
def ensure_symb(obj, role=''):
"""Sympify the given object into a symbol."""
return ensure_sympify(obj, role, Symbol)
def ensure_expr(obj, role=''):
"""Sympify the given object into an expression."""
return ensure_sympify(obj, role, Expr)
#
# Misc utilities
# --------------
#
def ensure_pair(obj, role):
"""Ensures that the given object is a pair."""
if not (isinstance(obj, Sequence) and len(obj) == 2):
raise TypeError('Invalid {}: '.format(role), obj, 'expecting pair')
return obj
|
Add utilities for checking inputs
These functions all uses a consistent style of error reporting, which
will be used throughout drudge."""Small utilities."""
from collections.abc import Sequence
from sympy import sympify, Symbol, Expr, SympifyError
#
# SymPy utilities
# ---------------
#
def ensure_sympify(obj, role='', expected_type=None):
"""Sympify the given object with checking and error reporting.
This is a shallow wrapper over SymPy sympify function to have error
reporting in consistent style and an optional type checking.
"""
header = 'Invalid {}: '.format(role)
try:
sympified = sympify(obj, strict=True)
except SympifyError as exc:
raise TypeError(header, obj, 'failed to be simpified', exc.args)
if expected_type is not None and isinstance(sympified, expected_type):
raise TypeError(header, sympified, 'expecting', expected_type)
else:
return sympified
def ensure_symb(obj, role=''):
"""Sympify the given object into a symbol."""
return ensure_sympify(obj, role, Symbol)
def ensure_expr(obj, role=''):
"""Sympify the given object into an expression."""
return ensure_sympify(obj, role, Expr)
#
# Misc utilities
# --------------
#
def ensure_pair(obj, role):
"""Ensures that the given object is a pair."""
if not (isinstance(obj, Sequence) and len(obj) == 2):
raise TypeError('Invalid {}: '.format(role), obj, 'expecting pair')
return obj
|
<commit_before><commit_msg>Add utilities for checking inputs
These functions all uses a consistent style of error reporting, which
will be used throughout drudge.<commit_after>"""Small utilities."""
from collections.abc import Sequence
from sympy import sympify, Symbol, Expr, SympifyError
#
# SymPy utilities
# ---------------
#
def ensure_sympify(obj, role='', expected_type=None):
"""Sympify the given object with checking and error reporting.
This is a shallow wrapper over SymPy sympify function to have error
reporting in consistent style and an optional type checking.
"""
header = 'Invalid {}: '.format(role)
try:
sympified = sympify(obj, strict=True)
except SympifyError as exc:
raise TypeError(header, obj, 'failed to be simpified', exc.args)
if expected_type is not None and isinstance(sympified, expected_type):
raise TypeError(header, sympified, 'expecting', expected_type)
else:
return sympified
def ensure_symb(obj, role=''):
"""Sympify the given object into a symbol."""
return ensure_sympify(obj, role, Symbol)
def ensure_expr(obj, role=''):
"""Sympify the given object into an expression."""
return ensure_sympify(obj, role, Expr)
#
# Misc utilities
# --------------
#
def ensure_pair(obj, role):
"""Ensures that the given object is a pair."""
if not (isinstance(obj, Sequence) and len(obj) == 2):
raise TypeError('Invalid {}: '.format(role), obj, 'expecting pair')
return obj
|
|
444168c4e53283e99c5fb8e0c63585cda97138dd
|
analysis/plot-trial-durations.py
|
analysis/plot-trial-durations.py
|
import climate
import collections
import lmj.plot
import numpy as np
import database
import plots
@climate.annotate(
root='load experiment data from this directory',
pattern=('plot data from files matching this pattern', 'option'),
)
def main(root, pattern='*.csv.gz'):
data = collections.defaultdict(lambda: collections.defaultdict(list))
for s in database.Experiment(root).subjects:
for i, b in enumerate(s.blocks):
for j, t in enumerate(b.trials):
t.load()
data[i][j].append(t.df.index[-1])
counts = []
means = []
stds = []
labels = []
for i in sorted(data):
for j in sorted(data[i]):
counts.append(len(data[i][j]))
means.append(np.mean(data[i][j]))
stds.append(np.std(data[i][j]))
labels.append('{}/{}'.format(i + 1, j + 1))
xs = np.arange(len(means))
means = np.array(means)
stds = np.array(stds)
with plots.plot() as ax:
ax.plot(xs, means, color='#111111')
ax.fill_between(xs, means - stds, means + stds, color='#111111', alpha=0.7, lw=0)
ax.set_xticks(xs)
ax.set_xticklabels(labels)
if __name__ == '__main__':
climate.call(main)
|
Add plot for trial durations, needs more work!
|
Add plot for trial durations, needs more work!
|
Python
|
mit
|
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
|
Add plot for trial durations, needs more work!
|
import climate
import collections
import lmj.plot
import numpy as np
import database
import plots
@climate.annotate(
root='load experiment data from this directory',
pattern=('plot data from files matching this pattern', 'option'),
)
def main(root, pattern='*.csv.gz'):
data = collections.defaultdict(lambda: collections.defaultdict(list))
for s in database.Experiment(root).subjects:
for i, b in enumerate(s.blocks):
for j, t in enumerate(b.trials):
t.load()
data[i][j].append(t.df.index[-1])
counts = []
means = []
stds = []
labels = []
for i in sorted(data):
for j in sorted(data[i]):
counts.append(len(data[i][j]))
means.append(np.mean(data[i][j]))
stds.append(np.std(data[i][j]))
labels.append('{}/{}'.format(i + 1, j + 1))
xs = np.arange(len(means))
means = np.array(means)
stds = np.array(stds)
with plots.plot() as ax:
ax.plot(xs, means, color='#111111')
ax.fill_between(xs, means - stds, means + stds, color='#111111', alpha=0.7, lw=0)
ax.set_xticks(xs)
ax.set_xticklabels(labels)
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add plot for trial durations, needs more work!<commit_after>
|
import climate
import collections
import lmj.plot
import numpy as np
import database
import plots
@climate.annotate(
root='load experiment data from this directory',
pattern=('plot data from files matching this pattern', 'option'),
)
def main(root, pattern='*.csv.gz'):
data = collections.defaultdict(lambda: collections.defaultdict(list))
for s in database.Experiment(root).subjects:
for i, b in enumerate(s.blocks):
for j, t in enumerate(b.trials):
t.load()
data[i][j].append(t.df.index[-1])
counts = []
means = []
stds = []
labels = []
for i in sorted(data):
for j in sorted(data[i]):
counts.append(len(data[i][j]))
means.append(np.mean(data[i][j]))
stds.append(np.std(data[i][j]))
labels.append('{}/{}'.format(i + 1, j + 1))
xs = np.arange(len(means))
means = np.array(means)
stds = np.array(stds)
with plots.plot() as ax:
ax.plot(xs, means, color='#111111')
ax.fill_between(xs, means - stds, means + stds, color='#111111', alpha=0.7, lw=0)
ax.set_xticks(xs)
ax.set_xticklabels(labels)
if __name__ == '__main__':
climate.call(main)
|
Add plot for trial durations, needs more work!import climate
import collections
import lmj.plot
import numpy as np
import database
import plots
@climate.annotate(
root='load experiment data from this directory',
pattern=('plot data from files matching this pattern', 'option'),
)
def main(root, pattern='*.csv.gz'):
data = collections.defaultdict(lambda: collections.defaultdict(list))
for s in database.Experiment(root).subjects:
for i, b in enumerate(s.blocks):
for j, t in enumerate(b.trials):
t.load()
data[i][j].append(t.df.index[-1])
counts = []
means = []
stds = []
labels = []
for i in sorted(data):
for j in sorted(data[i]):
counts.append(len(data[i][j]))
means.append(np.mean(data[i][j]))
stds.append(np.std(data[i][j]))
labels.append('{}/{}'.format(i + 1, j + 1))
xs = np.arange(len(means))
means = np.array(means)
stds = np.array(stds)
with plots.plot() as ax:
ax.plot(xs, means, color='#111111')
ax.fill_between(xs, means - stds, means + stds, color='#111111', alpha=0.7, lw=0)
ax.set_xticks(xs)
ax.set_xticklabels(labels)
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add plot for trial durations, needs more work!<commit_after>import climate
import collections
import lmj.plot
import numpy as np
import database
import plots
@climate.annotate(
root='load experiment data from this directory',
pattern=('plot data from files matching this pattern', 'option'),
)
def main(root, pattern='*.csv.gz'):
data = collections.defaultdict(lambda: collections.defaultdict(list))
for s in database.Experiment(root).subjects:
for i, b in enumerate(s.blocks):
for j, t in enumerate(b.trials):
t.load()
data[i][j].append(t.df.index[-1])
counts = []
means = []
stds = []
labels = []
for i in sorted(data):
for j in sorted(data[i]):
counts.append(len(data[i][j]))
means.append(np.mean(data[i][j]))
stds.append(np.std(data[i][j]))
labels.append('{}/{}'.format(i + 1, j + 1))
xs = np.arange(len(means))
means = np.array(means)
stds = np.array(stds)
with plots.plot() as ax:
ax.plot(xs, means, color='#111111')
ax.fill_between(xs, means - stds, means + stds, color='#111111', alpha=0.7, lw=0)
ax.set_xticks(xs)
ax.set_xticklabels(labels)
if __name__ == '__main__':
climate.call(main)
|
|
cb7a1654a3bf606a74c8f2cdbdceb4cd4a27d24c
|
tests/test_status_codes.py
|
tests/test_status_codes.py
|
from . import TheInternetTestCase
from helium.api import Link, find_all
from httplib import HTTPConnection
from urlparse import urlparse
class StatusCodesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/status_codes"
def test_status_code_200(self):
self._test_status_code(200)
def test_status_code_301(self):
self._test_status_code(301)
def test_status_code_404(self):
self._test_status_code(404)
def test_status_code_500(self):
self._test_status_code(500)
def _test_status_code(self, status_code):
self.assertEqual(
self._get_status_code(
"http://the-internet.herokuapp.com/" + Link(str(status_code)).href
), status_code
)
def _get_status_code(self, url):
parsed_url = urlparse(url)
try:
conn = HTTPConnection(parsed_url.netloc)
conn.request("HEAD", parsed_url.path)
return conn.getresponse().status
except StandardError:
return None
|
Add test case for status codes.
|
Add test case for status codes.
|
Python
|
mit
|
bugfree-software/the-internet-solution-python
|
Add test case for status codes.
|
from . import TheInternetTestCase
from helium.api import Link, find_all
from httplib import HTTPConnection
from urlparse import urlparse
class StatusCodesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/status_codes"
def test_status_code_200(self):
self._test_status_code(200)
def test_status_code_301(self):
self._test_status_code(301)
def test_status_code_404(self):
self._test_status_code(404)
def test_status_code_500(self):
self._test_status_code(500)
def _test_status_code(self, status_code):
self.assertEqual(
self._get_status_code(
"http://the-internet.herokuapp.com/" + Link(str(status_code)).href
), status_code
)
def _get_status_code(self, url):
parsed_url = urlparse(url)
try:
conn = HTTPConnection(parsed_url.netloc)
conn.request("HEAD", parsed_url.path)
return conn.getresponse().status
except StandardError:
return None
|
<commit_before><commit_msg>Add test case for status codes.<commit_after>
|
from . import TheInternetTestCase
from helium.api import Link, find_all
from httplib import HTTPConnection
from urlparse import urlparse
class StatusCodesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/status_codes"
def test_status_code_200(self):
self._test_status_code(200)
def test_status_code_301(self):
self._test_status_code(301)
def test_status_code_404(self):
self._test_status_code(404)
def test_status_code_500(self):
self._test_status_code(500)
def _test_status_code(self, status_code):
self.assertEqual(
self._get_status_code(
"http://the-internet.herokuapp.com/" + Link(str(status_code)).href
), status_code
)
def _get_status_code(self, url):
parsed_url = urlparse(url)
try:
conn = HTTPConnection(parsed_url.netloc)
conn.request("HEAD", parsed_url.path)
return conn.getresponse().status
except StandardError:
return None
|
Add test case for status codes.from . import TheInternetTestCase
from helium.api import Link, find_all
from httplib import HTTPConnection
from urlparse import urlparse
class StatusCodesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/status_codes"
def test_status_code_200(self):
self._test_status_code(200)
def test_status_code_301(self):
self._test_status_code(301)
def test_status_code_404(self):
self._test_status_code(404)
def test_status_code_500(self):
self._test_status_code(500)
def _test_status_code(self, status_code):
self.assertEqual(
self._get_status_code(
"http://the-internet.herokuapp.com/" + Link(str(status_code)).href
), status_code
)
def _get_status_code(self, url):
parsed_url = urlparse(url)
try:
conn = HTTPConnection(parsed_url.netloc)
conn.request("HEAD", parsed_url.path)
return conn.getresponse().status
except StandardError:
return None
|
<commit_before><commit_msg>Add test case for status codes.<commit_after>from . import TheInternetTestCase
from helium.api import Link, find_all
from httplib import HTTPConnection
from urlparse import urlparse
class StatusCodesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/status_codes"
def test_status_code_200(self):
self._test_status_code(200)
def test_status_code_301(self):
self._test_status_code(301)
def test_status_code_404(self):
self._test_status_code(404)
def test_status_code_500(self):
self._test_status_code(500)
def _test_status_code(self, status_code):
self.assertEqual(
self._get_status_code(
"http://the-internet.herokuapp.com/" + Link(str(status_code)).href
), status_code
)
def _get_status_code(self, url):
parsed_url = urlparse(url)
try:
conn = HTTPConnection(parsed_url.netloc)
conn.request("HEAD", parsed_url.path)
return conn.getresponse().status
except StandardError:
return None
|
|
e519090181f330f218a155d3e96447c4330913a6
|
wrench/script/gen-many-images.py
|
wrench/script/gen-many-images.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
SIZE = 8
with open("../benchmarks/many-images.yaml", "w") as text_file:
text_file.write("root:\n")
text_file.write(" items:\n")
for y in range(0, 64):
yb = SIZE * y
for x in range(0, 128):
xb = SIZE * x
text_file.write(" - image: solid-color({0}, {1}, 0, 255, {2}, {2})\n".format(x, y, SIZE))
text_file.write(" bounds: {0} {1} {2} {2}\n".format(xb, yb, SIZE))
|
Add a python script to generate the many-images.yaml benchmark.
|
Add a python script to generate the many-images.yaml benchmark.
|
Python
|
mpl-2.0
|
servo/webrender,servo/webrender,servo/webrender,servo/webrender,servo/webrender,servo/webrender
|
Add a python script to generate the many-images.yaml benchmark.
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
SIZE = 8
with open("../benchmarks/many-images.yaml", "w") as text_file:
text_file.write("root:\n")
text_file.write(" items:\n")
for y in range(0, 64):
yb = SIZE * y
for x in range(0, 128):
xb = SIZE * x
text_file.write(" - image: solid-color({0}, {1}, 0, 255, {2}, {2})\n".format(x, y, SIZE))
text_file.write(" bounds: {0} {1} {2} {2}\n".format(xb, yb, SIZE))
|
<commit_before><commit_msg>Add a python script to generate the many-images.yaml benchmark.<commit_after>
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
SIZE = 8
with open("../benchmarks/many-images.yaml", "w") as text_file:
text_file.write("root:\n")
text_file.write(" items:\n")
for y in range(0, 64):
yb = SIZE * y
for x in range(0, 128):
xb = SIZE * x
text_file.write(" - image: solid-color({0}, {1}, 0, 255, {2}, {2})\n".format(x, y, SIZE))
text_file.write(" bounds: {0} {1} {2} {2}\n".format(xb, yb, SIZE))
|
Add a python script to generate the many-images.yaml benchmark.# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
SIZE = 8
with open("../benchmarks/many-images.yaml", "w") as text_file:
text_file.write("root:\n")
text_file.write(" items:\n")
for y in range(0, 64):
yb = SIZE * y
for x in range(0, 128):
xb = SIZE * x
text_file.write(" - image: solid-color({0}, {1}, 0, 255, {2}, {2})\n".format(x, y, SIZE))
text_file.write(" bounds: {0} {1} {2} {2}\n".format(xb, yb, SIZE))
|
<commit_before><commit_msg>Add a python script to generate the many-images.yaml benchmark.<commit_after># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
SIZE = 8
with open("../benchmarks/many-images.yaml", "w") as text_file:
text_file.write("root:\n")
text_file.write(" items:\n")
for y in range(0, 64):
yb = SIZE * y
for x in range(0, 128):
xb = SIZE * x
text_file.write(" - image: solid-color({0}, {1}, 0, 255, {2}, {2})\n".format(x, y, SIZE))
text_file.write(" bounds: {0} {1} {2} {2}\n".format(xb, yb, SIZE))
|
|
34113a2042bdd4bbb6480a7cc7c0d8ff9b6e7586
|
data/load_data.py
|
data/load_data.py
|
import csv
from chemtools.mol_name import get_exact_name
from models import DataPoint
with open("data/data.csv", "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
except:
exact_name = None
print row[1]
point = DataPoint(
name=row[1], options=row[4],
occupied=row[5], virtual=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name)
point.clean_fields()
points.append(point)
except Exception as e:
print e
DataPoint.objects.bulk_create(points)
|
Add script to load log parse data into the database
|
Add script to load log parse data into the database
|
Python
|
mit
|
crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp
|
Add script to load log parse data into the database
|
import csv
from chemtools.mol_name import get_exact_name
from models import DataPoint
with open("data/data.csv", "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
except:
exact_name = None
print row[1]
point = DataPoint(
name=row[1], options=row[4],
occupied=row[5], virtual=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name)
point.clean_fields()
points.append(point)
except Exception as e:
print e
DataPoint.objects.bulk_create(points)
|
<commit_before><commit_msg>Add script to load log parse data into the database<commit_after>
|
import csv
from chemtools.mol_name import get_exact_name
from models import DataPoint
with open("data/data.csv", "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
except:
exact_name = None
print row[1]
point = DataPoint(
name=row[1], options=row[4],
occupied=row[5], virtual=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name)
point.clean_fields()
points.append(point)
except Exception as e:
print e
DataPoint.objects.bulk_create(points)
|
Add script to load log parse data into the databaseimport csv
from chemtools.mol_name import get_exact_name
from models import DataPoint
with open("data/data.csv", "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
except:
exact_name = None
print row[1]
point = DataPoint(
name=row[1], options=row[4],
occupied=row[5], virtual=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name)
point.clean_fields()
points.append(point)
except Exception as e:
print e
DataPoint.objects.bulk_create(points)
|
<commit_before><commit_msg>Add script to load log parse data into the database<commit_after>import csv
from chemtools.mol_name import get_exact_name
from models import DataPoint
with open("data/data.csv", "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
points = []
for row in reader:
if row == []:
continue
try:
band_gap = row[10]
if band_gap == '---':
band_gap = None
options = row[4]
try:
exact_name = get_exact_name(row[1])
except:
exact_name = None
print row[1]
point = DataPoint(
name=row[1], options=row[4],
occupied=row[5], virtual=row[6],
homo_orbital=row[7], dipole=row[8],
energy=row[9], band_gap=band_gap,
exact_name=exact_name)
point.clean_fields()
points.append(point)
except Exception as e:
print e
DataPoint.objects.bulk_create(points)
|
|
0f7ae57f269fdce2691358a9a51a3f9e0bcaace0
|
config/trace_pox_l2_learning.py
|
config/trace_pox_l2_learning.py
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import StarTopology, BufferedPatchPanel, MeshTopology, GridTopology, BinaryLeafTreeTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.control_flow.interactive import Interactive
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
from config.application_events import AppCircuitPusher
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
'''forwarding.l2_learning '''
'''openflow.of_01 --address=__address__ --port=__port__ ''')
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
num = 2
topology_class = StarTopology
topology_params = "num_hosts=%d" % num
# topology_class = MeshTopology
# topology_params = "num_switches=%d" % num
# topology_class = GridTopology
# topology_params = "num_rows=3, num_columns=3"
# Where should the output files be written to
results_dir = "traces/trace_pox_hb_l2_learning-%s%d" % (topology_class.__name__, num)
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=10,
delay=0.1,
halt_on_violation=True,
steps=100,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
Add trace config for pox l2_learning switch
|
Add trace config for pox l2_learning switch
|
Python
|
apache-2.0
|
jmiserez/sts,jmiserez/sts
|
Add trace config for pox l2_learning switch
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import StarTopology, BufferedPatchPanel, MeshTopology, GridTopology, BinaryLeafTreeTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.control_flow.interactive import Interactive
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
from config.application_events import AppCircuitPusher
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
'''forwarding.l2_learning '''
'''openflow.of_01 --address=__address__ --port=__port__ ''')
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
num = 2
topology_class = StarTopology
topology_params = "num_hosts=%d" % num
# topology_class = MeshTopology
# topology_params = "num_switches=%d" % num
# topology_class = GridTopology
# topology_params = "num_rows=3, num_columns=3"
# Where should the output files be written to
results_dir = "traces/trace_pox_hb_l2_learning-%s%d" % (topology_class.__name__, num)
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=10,
delay=0.1,
halt_on_violation=True,
steps=100,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
<commit_before><commit_msg>Add trace config for pox l2_learning switch<commit_after>
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import StarTopology, BufferedPatchPanel, MeshTopology, GridTopology, BinaryLeafTreeTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.control_flow.interactive import Interactive
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
from config.application_events import AppCircuitPusher
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
'''forwarding.l2_learning '''
'''openflow.of_01 --address=__address__ --port=__port__ ''')
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
num = 2
topology_class = StarTopology
topology_params = "num_hosts=%d" % num
# topology_class = MeshTopology
# topology_params = "num_switches=%d" % num
# topology_class = GridTopology
# topology_params = "num_rows=3, num_columns=3"
# Where should the output files be written to
results_dir = "traces/trace_pox_hb_l2_learning-%s%d" % (topology_class.__name__, num)
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=10,
delay=0.1,
halt_on_violation=True,
steps=100,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
Add trace config for pox l2_learning switchfrom config.experiment_config_lib import ControllerConfig
from sts.topology import StarTopology, BufferedPatchPanel, MeshTopology, GridTopology, BinaryLeafTreeTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.control_flow.interactive import Interactive
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
from config.application_events import AppCircuitPusher
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
'''forwarding.l2_learning '''
'''openflow.of_01 --address=__address__ --port=__port__ ''')
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
num = 2
topology_class = StarTopology
topology_params = "num_hosts=%d" % num
# topology_class = MeshTopology
# topology_params = "num_switches=%d" % num
# topology_class = GridTopology
# topology_params = "num_rows=3, num_columns=3"
# Where should the output files be written to
results_dir = "traces/trace_pox_hb_l2_learning-%s%d" % (topology_class.__name__, num)
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=10,
delay=0.1,
halt_on_violation=True,
steps=100,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
<commit_before><commit_msg>Add trace config for pox l2_learning switch<commit_after>from config.experiment_config_lib import ControllerConfig
from sts.topology import StarTopology, BufferedPatchPanel, MeshTopology, GridTopology, BinaryLeafTreeTopology
from sts.controller_manager import UserSpaceControllerPatchPanel
from sts.control_flow.fuzzer import Fuzzer
from sts.control_flow.interactive import Interactive
from sts.input_traces.input_logger import InputLogger
from sts.simulation_state import SimulationConfig
from sts.happensbefore.hb_logger import HappensBeforeLogger
from config.application_events import AppCircuitPusher
# Use POX as our controller
start_cmd = ('''./pox.py --verbose '''
'''forwarding.l2_learning '''
'''openflow.of_01 --address=__address__ --port=__port__ ''')
controllers = [ControllerConfig(start_cmd, cwd="pox/")]
num = 2
topology_class = StarTopology
topology_params = "num_hosts=%d" % num
# topology_class = MeshTopology
# topology_params = "num_switches=%d" % num
# topology_class = GridTopology
# topology_params = "num_rows=3, num_columns=3"
# Where should the output files be written to
results_dir = "traces/trace_pox_hb_l2_learning-%s%d" % (topology_class.__name__, num)
apps = None
# include all defaults
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
patch_panel_class=BufferedPatchPanel,
controller_patch_panel_class=UserSpaceControllerPatchPanel,
dataplane_trace=None,
snapshot_service=None,
multiplex_sockets=False,
violation_persistence_threshold=None,
kill_controllers_on_exit=True,
interpose_on_controllers=False,
ignore_interposition=False,
hb_logger_class=HappensBeforeLogger,
hb_logger_params=results_dir,
apps=apps)
# Manual, interactive mode
# control_flow = Interactive(simulation_config, input_logger=InputLogger())
control_flow = Fuzzer(simulation_config,
input_logger=InputLogger(),
initialization_rounds=20,
send_all_to_all=False,
check_interval=10,
delay=0.1,
halt_on_violation=True,
steps=100,
# invariant_check_name="check_everything",
invariant_check_name="InvariantChecker.check_liveness",
apps=apps)
|
|
c59ccacfa46efd6dde167c5460c4d862e4658061
|
encmass.py
|
encmass.py
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.ENCMASS
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
from numpy import *
from astropy import units as u
from scipy import special
def encmass(r, norm, rs, alpha, beta, gamma):
"""
Enclosed mass profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
norm : normalisation (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
a = (3.-gamma)/alpha
b = (gamma-beta)/alpha
y = (r/rs)**alpha
fn = lambda x: x**a * special.hyp2f1(a, -b, 1+a, -x)/a
encmass = (4*pi*norm*rs**3*fn(y.value)).to(u.Msun)
return encmass
|
Add routine to calculate enclosed mass.
|
Add routine to calculate enclosed mass.
|
Python
|
bsd-2-clause
|
lauralwatkins/genhernquist
|
Add routine to calculate enclosed mass.
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.ENCMASS
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
from numpy import *
from astropy import units as u
from scipy import special
def encmass(r, norm, rs, alpha, beta, gamma):
"""
Enclosed mass profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
norm : normalisation (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
a = (3.-gamma)/alpha
b = (gamma-beta)/alpha
y = (r/rs)**alpha
fn = lambda x: x**a * special.hyp2f1(a, -b, 1+a, -x)/a
encmass = (4*pi*norm*rs**3*fn(y.value)).to(u.Msun)
return encmass
|
<commit_before><commit_msg>Add routine to calculate enclosed mass.<commit_after>
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.ENCMASS
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
from numpy import *
from astropy import units as u
from scipy import special
def encmass(r, norm, rs, alpha, beta, gamma):
"""
Enclosed mass profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
norm : normalisation (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
a = (3.-gamma)/alpha
b = (gamma-beta)/alpha
y = (r/rs)**alpha
fn = lambda x: x**a * special.hyp2f1(a, -b, 1+a, -x)/a
encmass = (4*pi*norm*rs**3*fn(y.value)).to(u.Msun)
return encmass
|
Add routine to calculate enclosed mass.#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.ENCMASS
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
from numpy import *
from astropy import units as u
from scipy import special
def encmass(r, norm, rs, alpha, beta, gamma):
"""
Enclosed mass profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
norm : normalisation (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
a = (3.-gamma)/alpha
b = (gamma-beta)/alpha
y = (r/rs)**alpha
fn = lambda x: x**a * special.hyp2f1(a, -b, 1+a, -x)/a
encmass = (4*pi*norm*rs**3*fn(y.value)).to(u.Msun)
return encmass
|
<commit_before><commit_msg>Add routine to calculate enclosed mass.<commit_after>#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.ENCMASS
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
from numpy import *
from astropy import units as u
from scipy import special
def encmass(r, norm, rs, alpha, beta, gamma):
"""
Enclosed mass profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
norm : normalisation (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
a = (3.-gamma)/alpha
b = (gamma-beta)/alpha
y = (r/rs)**alpha
fn = lambda x: x**a * special.hyp2f1(a, -b, 1+a, -x)/a
encmass = (4*pi*norm*rs**3*fn(y.value)).to(u.Msun)
return encmass
|
|
cbd8521891c97c1655a3b89863e1a2170d9edc6b
|
examples/missing_constants.py
|
examples/missing_constants.py
|
import numpy as np
from molml.features import Connectivity
from molml.constants import BOND_LENGTHS
# Currently, there are two recommended ways to work with elements that are not
# included in molml/constants.py. In this example, we will look at an iron
# complex (iron is not in the constants).
# Maybe at some point, molml will include more constants, but it seems outside
# of the scope of this library.
if __name__ == '__main__':
elements = ['Fe', 'H', 'H', 'H', 'H', 'H', 'H']
coords = np.array([
[0., 0., 0.],
[1.46, 0., 0.],
[0., 1.46, 0.],
[0., 0., 1.46],
[-1.46, 0., 0.],
[0., -1.46, 0.],
[0., 0., -1.46],
])
feat = Connectivity(depth=2)
# Notice the warning about missing elements.
print(feat.fit_transform([(elements, coords)]))
# 1) Modify the values in the constants module before your script.
BOND_LENGTHS['Fe'] = {'1': 1.32}
print(feat.fit_transform([(elements, coords)]))
del BOND_LENGTHS['Fe']
# 2) Include connectivity information in your data. The other instances
# where constants are used (electronegativity, element symbols, atomic
# numbers).
connections = {
0: {1: '1', 2: '1', 3: '1', 4: '1', 5: '1', 6: '1'},
1: {0: '1'},
2: {0: '1'},
3: {0: '1'},
4: {0: '1'},
5: {0: '1'},
6: {0: '1'},
}
print(feat.fit_transform([(elements, coords, connections)]))
|
Add an example script with missing constants
|
Add an example script with missing constants
|
Python
|
mit
|
crcollins/molml
|
Add an example script with missing constants
|
import numpy as np
from molml.features import Connectivity
from molml.constants import BOND_LENGTHS
# Currently, there are two recommended ways to work with elements that are not
# included in molml/constants.py. In this example, we will look at an iron
# complex (iron is not in the constants).
# Maybe at some point, molml will include more constants, but it seems outside
# of the scope of this library.
if __name__ == '__main__':
elements = ['Fe', 'H', 'H', 'H', 'H', 'H', 'H']
coords = np.array([
[0., 0., 0.],
[1.46, 0., 0.],
[0., 1.46, 0.],
[0., 0., 1.46],
[-1.46, 0., 0.],
[0., -1.46, 0.],
[0., 0., -1.46],
])
feat = Connectivity(depth=2)
# Notice the warning about missing elements.
print(feat.fit_transform([(elements, coords)]))
# 1) Modify the values in the constants module before your script.
BOND_LENGTHS['Fe'] = {'1': 1.32}
print(feat.fit_transform([(elements, coords)]))
del BOND_LENGTHS['Fe']
# 2) Include connectivity information in your data. The other instances
# where constants are used (electronegativity, element symbols, atomic
# numbers).
connections = {
0: {1: '1', 2: '1', 3: '1', 4: '1', 5: '1', 6: '1'},
1: {0: '1'},
2: {0: '1'},
3: {0: '1'},
4: {0: '1'},
5: {0: '1'},
6: {0: '1'},
}
print(feat.fit_transform([(elements, coords, connections)]))
|
<commit_before><commit_msg>Add an example script with missing constants<commit_after>
|
import numpy as np
from molml.features import Connectivity
from molml.constants import BOND_LENGTHS
# Currently, there are two recommended ways to work with elements that are not
# included in molml/constants.py. In this example, we will look at an iron
# complex (iron is not in the constants).
# Maybe at some point, molml will include more constants, but it seems outside
# of the scope of this library.
if __name__ == '__main__':
elements = ['Fe', 'H', 'H', 'H', 'H', 'H', 'H']
coords = np.array([
[0., 0., 0.],
[1.46, 0., 0.],
[0., 1.46, 0.],
[0., 0., 1.46],
[-1.46, 0., 0.],
[0., -1.46, 0.],
[0., 0., -1.46],
])
feat = Connectivity(depth=2)
# Notice the warning about missing elements.
print(feat.fit_transform([(elements, coords)]))
# 1) Modify the values in the constants module before your script.
BOND_LENGTHS['Fe'] = {'1': 1.32}
print(feat.fit_transform([(elements, coords)]))
del BOND_LENGTHS['Fe']
# 2) Include connectivity information in your data. The other instances
# where constants are used (electronegativity, element symbols, atomic
# numbers).
connections = {
0: {1: '1', 2: '1', 3: '1', 4: '1', 5: '1', 6: '1'},
1: {0: '1'},
2: {0: '1'},
3: {0: '1'},
4: {0: '1'},
5: {0: '1'},
6: {0: '1'},
}
print(feat.fit_transform([(elements, coords, connections)]))
|
Add an example script with missing constantsimport numpy as np
from molml.features import Connectivity
from molml.constants import BOND_LENGTHS
# Currently, there are two recommended ways to work with elements that are not
# included in molml/constants.py. In this example, we will look at an iron
# complex (iron is not in the constants).
# Maybe at some point, molml will include more constants, but it seems outside
# of the scope of this library.
if __name__ == '__main__':
elements = ['Fe', 'H', 'H', 'H', 'H', 'H', 'H']
coords = np.array([
[0., 0., 0.],
[1.46, 0., 0.],
[0., 1.46, 0.],
[0., 0., 1.46],
[-1.46, 0., 0.],
[0., -1.46, 0.],
[0., 0., -1.46],
])
feat = Connectivity(depth=2)
# Notice the warning about missing elements.
print(feat.fit_transform([(elements, coords)]))
# 1) Modify the values in the constants module before your script.
BOND_LENGTHS['Fe'] = {'1': 1.32}
print(feat.fit_transform([(elements, coords)]))
del BOND_LENGTHS['Fe']
# 2) Include connectivity information in your data. The other instances
# where constants are used (electronegativity, element symbols, atomic
# numbers).
connections = {
0: {1: '1', 2: '1', 3: '1', 4: '1', 5: '1', 6: '1'},
1: {0: '1'},
2: {0: '1'},
3: {0: '1'},
4: {0: '1'},
5: {0: '1'},
6: {0: '1'},
}
print(feat.fit_transform([(elements, coords, connections)]))
|
<commit_before><commit_msg>Add an example script with missing constants<commit_after>import numpy as np
from molml.features import Connectivity
from molml.constants import BOND_LENGTHS
# Currently, there are two recommended ways to work with elements that are not
# included in molml/constants.py. In this example, we will look at an iron
# complex (iron is not in the constants).
# Maybe at some point, molml will include more constants, but it seems outside
# of the scope of this library.
if __name__ == '__main__':
elements = ['Fe', 'H', 'H', 'H', 'H', 'H', 'H']
coords = np.array([
[0., 0., 0.],
[1.46, 0., 0.],
[0., 1.46, 0.],
[0., 0., 1.46],
[-1.46, 0., 0.],
[0., -1.46, 0.],
[0., 0., -1.46],
])
feat = Connectivity(depth=2)
# Notice the warning about missing elements.
print(feat.fit_transform([(elements, coords)]))
# 1) Modify the values in the constants module before your script.
BOND_LENGTHS['Fe'] = {'1': 1.32}
print(feat.fit_transform([(elements, coords)]))
del BOND_LENGTHS['Fe']
# 2) Include connectivity information in your data. The other instances
# where constants are used (electronegativity, element symbols, atomic
# numbers).
connections = {
0: {1: '1', 2: '1', 3: '1', 4: '1', 5: '1', 6: '1'},
1: {0: '1'},
2: {0: '1'},
3: {0: '1'},
4: {0: '1'},
5: {0: '1'},
6: {0: '1'},
}
print(feat.fit_transform([(elements, coords, connections)]))
|
|
ce0aee46e26f10d247ffcfb15b62df77f507257f
|
software/control/src/qp_controller_input_snooper.py
|
software/control/src/qp_controller_input_snooper.py
|
#!/usr/bin/python
''' Listens to QP Controller Inputs and draws, in different but
order-consistent colors, the cubic splines being followed by each
body motion block. '''
import lcm
import drc
from drake import lcmt_qp_controller_input, lcmt_body_motion_data
import sys
import time
from bot_lcmgl import lcmgl, GL_LINES
import numpy as np
color_order = [[1.0, 0.1, 0.1], [0.1, 1.0, 0.1], [0.1, 0.1, 1.0], [1.0, 1.0, 0.1], [1.0, 0.1, 1.0], [0.1, 1.0, 1.0]];
def pval(coefs, t_off):
out = np.array([0.0]*6)
for j in range(0, 6):
out[j] = coefs[j, 0]*(t_off**3.0) + coefs[j, 1]*(t_off**2.0) + coefs[j, 2]*t_off + coefs[j, 3]
return out
def handle_qp_controller_input_msg(channel, data):
msg = lcmt_qp_controller_input.decode(data)
#print("received")
# draw spline segment for each tracked body
for i in range(0, msg.num_tracked_bodies):
bmd = msg.body_motion_data[i]
ts = bmd.ts;
tsdense = np.linspace(ts[0], ts[-1], 20);
coefs = np.array(bmd.coefs);
color = color_order[i%len(color_order)];
gl.glColor3f(color[0], color[1], color[2]);
gl.glLineWidth(5);
gl.glBegin(GL_LINES);
ps = np.array([pval(coefs, t-ts[0]) for t in tsdense]);
for j in range(0,tsdense.size-1):
gl.glVertex3f(ps[j,0], ps[j,1], ps[j,2]);
gl.glVertex3f(ps[j+1,0], ps[j+1,1], ps[j+1,2]);
gl.glEnd();
gl.switch_buffer()
lc = lcm.LCM()
gl = lcmgl('qp input bmd snoop', lc);
subscription = lc.subscribe("QP_CONTROLLER_INPUT", handle_qp_controller_input_msg)
subscription.set_queue_capacity(1);
try:
while True:
lc.handle()
except KeyboardInterrupt:
pass
|
Add qp controller input snooper
|
Add qp controller input snooper
|
Python
|
bsd-3-clause
|
openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro
|
Add qp controller input snooper
|
#!/usr/bin/python
''' Listens to QP Controller Inputs and draws, in different but
order-consistent colors, the cubic splines being followed by each
body motion block. '''
import lcm
import drc
from drake import lcmt_qp_controller_input, lcmt_body_motion_data
import sys
import time
from bot_lcmgl import lcmgl, GL_LINES
import numpy as np
color_order = [[1.0, 0.1, 0.1], [0.1, 1.0, 0.1], [0.1, 0.1, 1.0], [1.0, 1.0, 0.1], [1.0, 0.1, 1.0], [0.1, 1.0, 1.0]];
def pval(coefs, t_off):
out = np.array([0.0]*6)
for j in range(0, 6):
out[j] = coefs[j, 0]*(t_off**3.0) + coefs[j, 1]*(t_off**2.0) + coefs[j, 2]*t_off + coefs[j, 3]
return out
def handle_qp_controller_input_msg(channel, data):
msg = lcmt_qp_controller_input.decode(data)
#print("received")
# draw spline segment for each tracked body
for i in range(0, msg.num_tracked_bodies):
bmd = msg.body_motion_data[i]
ts = bmd.ts;
tsdense = np.linspace(ts[0], ts[-1], 20);
coefs = np.array(bmd.coefs);
color = color_order[i%len(color_order)];
gl.glColor3f(color[0], color[1], color[2]);
gl.glLineWidth(5);
gl.glBegin(GL_LINES);
ps = np.array([pval(coefs, t-ts[0]) for t in tsdense]);
for j in range(0,tsdense.size-1):
gl.glVertex3f(ps[j,0], ps[j,1], ps[j,2]);
gl.glVertex3f(ps[j+1,0], ps[j+1,1], ps[j+1,2]);
gl.glEnd();
gl.switch_buffer()
lc = lcm.LCM()
gl = lcmgl('qp input bmd snoop', lc);
subscription = lc.subscribe("QP_CONTROLLER_INPUT", handle_qp_controller_input_msg)
subscription.set_queue_capacity(1);
try:
while True:
lc.handle()
except KeyboardInterrupt:
pass
|
<commit_before><commit_msg>Add qp controller input snooper<commit_after>
|
#!/usr/bin/python
''' Listens to QP Controller Inputs and draws, in different but
order-consistent colors, the cubic splines being followed by each
body motion block. '''
import lcm
import drc
from drake import lcmt_qp_controller_input, lcmt_body_motion_data
import sys
import time
from bot_lcmgl import lcmgl, GL_LINES
import numpy as np
color_order = [[1.0, 0.1, 0.1], [0.1, 1.0, 0.1], [0.1, 0.1, 1.0], [1.0, 1.0, 0.1], [1.0, 0.1, 1.0], [0.1, 1.0, 1.0]];
def pval(coefs, t_off):
out = np.array([0.0]*6)
for j in range(0, 6):
out[j] = coefs[j, 0]*(t_off**3.0) + coefs[j, 1]*(t_off**2.0) + coefs[j, 2]*t_off + coefs[j, 3]
return out
def handle_qp_controller_input_msg(channel, data):
msg = lcmt_qp_controller_input.decode(data)
#print("received")
# draw spline segment for each tracked body
for i in range(0, msg.num_tracked_bodies):
bmd = msg.body_motion_data[i]
ts = bmd.ts;
tsdense = np.linspace(ts[0], ts[-1], 20);
coefs = np.array(bmd.coefs);
color = color_order[i%len(color_order)];
gl.glColor3f(color[0], color[1], color[2]);
gl.glLineWidth(5);
gl.glBegin(GL_LINES);
ps = np.array([pval(coefs, t-ts[0]) for t in tsdense]);
for j in range(0,tsdense.size-1):
gl.glVertex3f(ps[j,0], ps[j,1], ps[j,2]);
gl.glVertex3f(ps[j+1,0], ps[j+1,1], ps[j+1,2]);
gl.glEnd();
gl.switch_buffer()
lc = lcm.LCM()
gl = lcmgl('qp input bmd snoop', lc);
subscription = lc.subscribe("QP_CONTROLLER_INPUT", handle_qp_controller_input_msg)
subscription.set_queue_capacity(1);
try:
while True:
lc.handle()
except KeyboardInterrupt:
pass
|
Add qp controller input snooper#!/usr/bin/python
''' Listens to QP Controller Inputs and draws, in different but
order-consistent colors, the cubic splines being followed by each
body motion block. '''
import lcm
import drc
from drake import lcmt_qp_controller_input, lcmt_body_motion_data
import sys
import time
from bot_lcmgl import lcmgl, GL_LINES
import numpy as np
color_order = [[1.0, 0.1, 0.1], [0.1, 1.0, 0.1], [0.1, 0.1, 1.0], [1.0, 1.0, 0.1], [1.0, 0.1, 1.0], [0.1, 1.0, 1.0]];
def pval(coefs, t_off):
out = np.array([0.0]*6)
for j in range(0, 6):
out[j] = coefs[j, 0]*(t_off**3.0) + coefs[j, 1]*(t_off**2.0) + coefs[j, 2]*t_off + coefs[j, 3]
return out
def handle_qp_controller_input_msg(channel, data):
msg = lcmt_qp_controller_input.decode(data)
#print("received")
# draw spline segment for each tracked body
for i in range(0, msg.num_tracked_bodies):
bmd = msg.body_motion_data[i]
ts = bmd.ts;
tsdense = np.linspace(ts[0], ts[-1], 20);
coefs = np.array(bmd.coefs);
color = color_order[i%len(color_order)];
gl.glColor3f(color[0], color[1], color[2]);
gl.glLineWidth(5);
gl.glBegin(GL_LINES);
ps = np.array([pval(coefs, t-ts[0]) for t in tsdense]);
for j in range(0,tsdense.size-1):
gl.glVertex3f(ps[j,0], ps[j,1], ps[j,2]);
gl.glVertex3f(ps[j+1,0], ps[j+1,1], ps[j+1,2]);
gl.glEnd();
gl.switch_buffer()
lc = lcm.LCM()
gl = lcmgl('qp input bmd snoop', lc);
subscription = lc.subscribe("QP_CONTROLLER_INPUT", handle_qp_controller_input_msg)
subscription.set_queue_capacity(1);
try:
while True:
lc.handle()
except KeyboardInterrupt:
pass
|
<commit_before><commit_msg>Add qp controller input snooper<commit_after>#!/usr/bin/python
''' Listens to QP Controller Inputs and draws, in different but
order-consistent colors, the cubic splines being followed by each
body motion block. '''
import lcm
import drc
from drake import lcmt_qp_controller_input, lcmt_body_motion_data
import sys
import time
from bot_lcmgl import lcmgl, GL_LINES
import numpy as np
color_order = [[1.0, 0.1, 0.1], [0.1, 1.0, 0.1], [0.1, 0.1, 1.0], [1.0, 1.0, 0.1], [1.0, 0.1, 1.0], [0.1, 1.0, 1.0]];
def pval(coefs, t_off):
out = np.array([0.0]*6)
for j in range(0, 6):
out[j] = coefs[j, 0]*(t_off**3.0) + coefs[j, 1]*(t_off**2.0) + coefs[j, 2]*t_off + coefs[j, 3]
return out
def handle_qp_controller_input_msg(channel, data):
msg = lcmt_qp_controller_input.decode(data)
#print("received")
# draw spline segment for each tracked body
for i in range(0, msg.num_tracked_bodies):
bmd = msg.body_motion_data[i]
ts = bmd.ts;
tsdense = np.linspace(ts[0], ts[-1], 20);
coefs = np.array(bmd.coefs);
color = color_order[i%len(color_order)];
gl.glColor3f(color[0], color[1], color[2]);
gl.glLineWidth(5);
gl.glBegin(GL_LINES);
ps = np.array([pval(coefs, t-ts[0]) for t in tsdense]);
for j in range(0,tsdense.size-1):
gl.glVertex3f(ps[j,0], ps[j,1], ps[j,2]);
gl.glVertex3f(ps[j+1,0], ps[j+1,1], ps[j+1,2]);
gl.glEnd();
gl.switch_buffer()
lc = lcm.LCM()
gl = lcmgl('qp input bmd snoop', lc);
subscription = lc.subscribe("QP_CONTROLLER_INPUT", handle_qp_controller_input_msg)
subscription.set_queue_capacity(1);
try:
while True:
lc.handle()
except KeyboardInterrupt:
pass
|
|
0ebfe4a0777850aa851c7d7bc0f642d692a1515a
|
2016/qualification_round/revenge_of_the_pancakes.py
|
2016/qualification_round/revenge_of_the_pancakes.py
|
#!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2016
# Qualification Round 2016
# Problem B. Revenge of the Pancakes
# Solved all test sets
from __future__ import print_function
def calc_min_flip_step(s):
grouped_height = 1 + s.count('-+') + s.count('+-')
if s.endswith('-'):
return grouped_height
else:
return grouped_height - 1
if __name__ == '__main__':
import os
samples = [
'-',
'-+',
'+-',
'+++',
'--+-'
]
for sample in samples:
print(calc_min_flip_step(sample))
data_files = ['B-small-practice',
'B-large-practice']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for in_ in inputs:
output_file.write('Case #{0}: {1}\n'.format(i, calc_min_flip_step(in_)))
i += 1
|
Add revenge of the pancakes
|
Add revenge of the pancakes
|
Python
|
apache-2.0
|
laichunpongben/CodeJam
|
Add revenge of the pancakes
|
#!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2016
# Qualification Round 2016
# Problem B. Revenge of the Pancakes
# Solved all test sets
from __future__ import print_function
def calc_min_flip_step(s):
grouped_height = 1 + s.count('-+') + s.count('+-')
if s.endswith('-'):
return grouped_height
else:
return grouped_height - 1
if __name__ == '__main__':
import os
samples = [
'-',
'-+',
'+-',
'+++',
'--+-'
]
for sample in samples:
print(calc_min_flip_step(sample))
data_files = ['B-small-practice',
'B-large-practice']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for in_ in inputs:
output_file.write('Case #{0}: {1}\n'.format(i, calc_min_flip_step(in_)))
i += 1
|
<commit_before><commit_msg>Add revenge of the pancakes<commit_after>
|
#!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2016
# Qualification Round 2016
# Problem B. Revenge of the Pancakes
# Solved all test sets
from __future__ import print_function
def calc_min_flip_step(s):
grouped_height = 1 + s.count('-+') + s.count('+-')
if s.endswith('-'):
return grouped_height
else:
return grouped_height - 1
if __name__ == '__main__':
import os
samples = [
'-',
'-+',
'+-',
'+++',
'--+-'
]
for sample in samples:
print(calc_min_flip_step(sample))
data_files = ['B-small-practice',
'B-large-practice']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for in_ in inputs:
output_file.write('Case #{0}: {1}\n'.format(i, calc_min_flip_step(in_)))
i += 1
|
Add revenge of the pancakes#!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2016
# Qualification Round 2016
# Problem B. Revenge of the Pancakes
# Solved all test sets
from __future__ import print_function
def calc_min_flip_step(s):
grouped_height = 1 + s.count('-+') + s.count('+-')
if s.endswith('-'):
return grouped_height
else:
return grouped_height - 1
if __name__ == '__main__':
import os
samples = [
'-',
'-+',
'+-',
'+++',
'--+-'
]
for sample in samples:
print(calc_min_flip_step(sample))
data_files = ['B-small-practice',
'B-large-practice']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for in_ in inputs:
output_file.write('Case #{0}: {1}\n'.format(i, calc_min_flip_step(in_)))
i += 1
|
<commit_before><commit_msg>Add revenge of the pancakes<commit_after>#!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2016
# Qualification Round 2016
# Problem B. Revenge of the Pancakes
# Solved all test sets
from __future__ import print_function
def calc_min_flip_step(s):
grouped_height = 1 + s.count('-+') + s.count('+-')
if s.endswith('-'):
return grouped_height
else:
return grouped_height - 1
if __name__ == '__main__':
import os
samples = [
'-',
'-+',
'+-',
'+++',
'--+-'
]
for sample in samples:
print(calc_min_flip_step(sample))
data_files = ['B-small-practice',
'B-large-practice']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for in_ in inputs:
output_file.write('Case #{0}: {1}\n'.format(i, calc_min_flip_step(in_)))
i += 1
|
|
04d795a6f51a701e522bfbad86b948f1efcb3b68
|
globaleaks/tests/test_jobs.py
|
globaleaks/tests/test_jobs.py
|
from twisted.internet.defer import inlineCallbacks
from twisted.trial import unittest
from globaleaks.settings import transact
from globaleaks.jobs import delivery_sched
from globaleaks.tests import helpers
from globaleaks.handlers.receiver import get_receiver_tip_list
from globaleaks.handlers.submission import finalize_submission
class TestJobs(helpers.TestGL):
@inlineCallbacks
def setUp(self):
self.setUp_dummy()
yield self.initalize_db()
@inlineCallbacks
def test_tip_creation(self):
yield finalize_submission(self.dummySubmission['submission_gus'])
yield delivery_sched.tip_creation()
receiver_tips = yield get_receiver_tip_list(self.dummyReceiver['username'])
expected_keys = ['access_counter', 'creation_date', 'expressed_pertinence', 'id', 'last_acesss']
self.assertEqual(set(receiver_tips[0].keys()), set(expected_keys))
|
Add unittest for creation of tips
|
Add unittest for creation of tips
|
Python
|
agpl-3.0
|
vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks
|
Add unittest for creation of tips
|
from twisted.internet.defer import inlineCallbacks
from twisted.trial import unittest
from globaleaks.settings import transact
from globaleaks.jobs import delivery_sched
from globaleaks.tests import helpers
from globaleaks.handlers.receiver import get_receiver_tip_list
from globaleaks.handlers.submission import finalize_submission
class TestJobs(helpers.TestGL):
@inlineCallbacks
def setUp(self):
self.setUp_dummy()
yield self.initalize_db()
@inlineCallbacks
def test_tip_creation(self):
yield finalize_submission(self.dummySubmission['submission_gus'])
yield delivery_sched.tip_creation()
receiver_tips = yield get_receiver_tip_list(self.dummyReceiver['username'])
expected_keys = ['access_counter', 'creation_date', 'expressed_pertinence', 'id', 'last_acesss']
self.assertEqual(set(receiver_tips[0].keys()), set(expected_keys))
|
<commit_before><commit_msg>Add unittest for creation of tips<commit_after>
|
from twisted.internet.defer import inlineCallbacks
from twisted.trial import unittest
from globaleaks.settings import transact
from globaleaks.jobs import delivery_sched
from globaleaks.tests import helpers
from globaleaks.handlers.receiver import get_receiver_tip_list
from globaleaks.handlers.submission import finalize_submission
class TestJobs(helpers.TestGL):
@inlineCallbacks
def setUp(self):
self.setUp_dummy()
yield self.initalize_db()
@inlineCallbacks
def test_tip_creation(self):
yield finalize_submission(self.dummySubmission['submission_gus'])
yield delivery_sched.tip_creation()
receiver_tips = yield get_receiver_tip_list(self.dummyReceiver['username'])
expected_keys = ['access_counter', 'creation_date', 'expressed_pertinence', 'id', 'last_acesss']
self.assertEqual(set(receiver_tips[0].keys()), set(expected_keys))
|
Add unittest for creation of tipsfrom twisted.internet.defer import inlineCallbacks
from twisted.trial import unittest
from globaleaks.settings import transact
from globaleaks.jobs import delivery_sched
from globaleaks.tests import helpers
from globaleaks.handlers.receiver import get_receiver_tip_list
from globaleaks.handlers.submission import finalize_submission
class TestJobs(helpers.TestGL):
@inlineCallbacks
def setUp(self):
self.setUp_dummy()
yield self.initalize_db()
@inlineCallbacks
def test_tip_creation(self):
yield finalize_submission(self.dummySubmission['submission_gus'])
yield delivery_sched.tip_creation()
receiver_tips = yield get_receiver_tip_list(self.dummyReceiver['username'])
expected_keys = ['access_counter', 'creation_date', 'expressed_pertinence', 'id', 'last_acesss']
self.assertEqual(set(receiver_tips[0].keys()), set(expected_keys))
|
<commit_before><commit_msg>Add unittest for creation of tips<commit_after>from twisted.internet.defer import inlineCallbacks
from twisted.trial import unittest
from globaleaks.settings import transact
from globaleaks.jobs import delivery_sched
from globaleaks.tests import helpers
from globaleaks.handlers.receiver import get_receiver_tip_list
from globaleaks.handlers.submission import finalize_submission
class TestJobs(helpers.TestGL):
@inlineCallbacks
def setUp(self):
self.setUp_dummy()
yield self.initalize_db()
@inlineCallbacks
def test_tip_creation(self):
yield finalize_submission(self.dummySubmission['submission_gus'])
yield delivery_sched.tip_creation()
receiver_tips = yield get_receiver_tip_list(self.dummyReceiver['username'])
expected_keys = ['access_counter', 'creation_date', 'expressed_pertinence', 'id', 'last_acesss']
self.assertEqual(set(receiver_tips[0].keys()), set(expected_keys))
|
|
c526a5745fccddc5df8179969c52dceacf8c1db3
|
scripts/update_ora2_truncated_course_ids.py
|
scripts/update_ora2_truncated_course_ids.py
|
#!/usr/bin/env python
"""
Script to fix workflows with truncated course_ids from https://github.com/Stanford-Online/edx-ora2/pull/25.
AIClassifierSet, AIGradingWorkflow and AITrainingWorkflow excluded as they are not used by Stanford.
"""
from itertools import chain
import os
import django
from django.db.models.functions import Length
from openedx.core.djangoapps.monkey_patch import django_db_models_options
def main():
from openassessment.assessment.models import PeerWorkflow, StaffWorkflow, StudentTrainingWorkflow
from openassessment.workflow.models import AssessmentWorkflow
peer_workflows = PeerWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
staff_workflows = StaffWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
training_workflows = StudentTrainingWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
full_course_ids = {} # Keep local dict to avoid repeated database hits for the same course_id
for workflow in chain(peer_workflows, staff_workflows, training_workflows):
truncated_course = workflow.course_id
if truncated_course not in full_course_ids:
# Get full course_id from AssessmentWorkflow table
try:
assessment_workflow = AssessmentWorkflow.objects.filter(course_id__startswith=truncated_course)[:1].get()
full_course_ids[truncated_course] = assessment_workflow.course_id
except AssessmentWorkflow.DoesNotExist:
print("No assessment workflow matching truncated course_id: {}".format(truncated_course))
continue
workflow.course_id = full_course_ids[truncated_course]
workflow.save()
print("Script finished.")
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'openedx.stanford.lms.envs.aws')
os.environ.setdefault("SERVICE_VARIANT", 'lms')
django_db_models_options.patch()
django.setup()
main()
|
Add script to update ora2 workflows with truncated course_ids
|
Add script to update ora2 workflows with truncated course_ids
Stanford-Online/edx-ora2#25 expands course_id length.
Note this script does not account for ambiguous truncated course_ids
(i.e. multiple different full course_ids match the truncation).
This edge case does not happen on our instances.
|
Python
|
agpl-3.0
|
caesar2164/edx-platform,caesar2164/edx-platform,Stanford-Online/edx-platform,Stanford-Online/edx-platform,Stanford-Online/edx-platform,caesar2164/edx-platform,Stanford-Online/edx-platform,caesar2164/edx-platform
|
Add script to update ora2 workflows with truncated course_ids
Stanford-Online/edx-ora2#25 expands course_id length.
Note this script does not account for ambiguous truncated course_ids
(i.e. multiple different full course_ids match the truncation).
This edge case does not happen on our instances.
|
#!/usr/bin/env python
"""
Script to fix workflows with truncated course_ids from https://github.com/Stanford-Online/edx-ora2/pull/25.
AIClassifierSet, AIGradingWorkflow and AITrainingWorkflow excluded as they are not used by Stanford.
"""
from itertools import chain
import os
import django
from django.db.models.functions import Length
from openedx.core.djangoapps.monkey_patch import django_db_models_options
def main():
from openassessment.assessment.models import PeerWorkflow, StaffWorkflow, StudentTrainingWorkflow
from openassessment.workflow.models import AssessmentWorkflow
peer_workflows = PeerWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
staff_workflows = StaffWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
training_workflows = StudentTrainingWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
full_course_ids = {} # Keep local dict to avoid repeated database hits for the same course_id
for workflow in chain(peer_workflows, staff_workflows, training_workflows):
truncated_course = workflow.course_id
if truncated_course not in full_course_ids:
# Get full course_id from AssessmentWorkflow table
try:
assessment_workflow = AssessmentWorkflow.objects.filter(course_id__startswith=truncated_course)[:1].get()
full_course_ids[truncated_course] = assessment_workflow.course_id
except AssessmentWorkflow.DoesNotExist:
print("No assessment workflow matching truncated course_id: {}".format(truncated_course))
continue
workflow.course_id = full_course_ids[truncated_course]
workflow.save()
print("Script finished.")
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'openedx.stanford.lms.envs.aws')
os.environ.setdefault("SERVICE_VARIANT", 'lms')
django_db_models_options.patch()
django.setup()
main()
|
<commit_before><commit_msg>Add script to update ora2 workflows with truncated course_ids
Stanford-Online/edx-ora2#25 expands course_id length.
Note this script does not account for ambiguous truncated course_ids
(i.e. multiple different full course_ids match the truncation).
This edge case does not happen on our instances.<commit_after>
|
#!/usr/bin/env python
"""
Script to fix workflows with truncated course_ids from https://github.com/Stanford-Online/edx-ora2/pull/25.
AIClassifierSet, AIGradingWorkflow and AITrainingWorkflow excluded as they are not used by Stanford.
"""
from itertools import chain
import os
import django
from django.db.models.functions import Length
from openedx.core.djangoapps.monkey_patch import django_db_models_options
def main():
from openassessment.assessment.models import PeerWorkflow, StaffWorkflow, StudentTrainingWorkflow
from openassessment.workflow.models import AssessmentWorkflow
peer_workflows = PeerWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
staff_workflows = StaffWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
training_workflows = StudentTrainingWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
full_course_ids = {} # Keep local dict to avoid repeated database hits for the same course_id
for workflow in chain(peer_workflows, staff_workflows, training_workflows):
truncated_course = workflow.course_id
if truncated_course not in full_course_ids:
# Get full course_id from AssessmentWorkflow table
try:
assessment_workflow = AssessmentWorkflow.objects.filter(course_id__startswith=truncated_course)[:1].get()
full_course_ids[truncated_course] = assessment_workflow.course_id
except AssessmentWorkflow.DoesNotExist:
print("No assessment workflow matching truncated course_id: {}".format(truncated_course))
continue
workflow.course_id = full_course_ids[truncated_course]
workflow.save()
print("Script finished.")
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'openedx.stanford.lms.envs.aws')
os.environ.setdefault("SERVICE_VARIANT", 'lms')
django_db_models_options.patch()
django.setup()
main()
|
Add script to update ora2 workflows with truncated course_ids
Stanford-Online/edx-ora2#25 expands course_id length.
Note this script does not account for ambiguous truncated course_ids
(i.e. multiple different full course_ids match the truncation).
This edge case does not happen on our instances.#!/usr/bin/env python
"""
Script to fix workflows with truncated course_ids from https://github.com/Stanford-Online/edx-ora2/pull/25.
AIClassifierSet, AIGradingWorkflow and AITrainingWorkflow excluded as they are not used by Stanford.
"""
from itertools import chain
import os
import django
from django.db.models.functions import Length
from openedx.core.djangoapps.monkey_patch import django_db_models_options
def main():
from openassessment.assessment.models import PeerWorkflow, StaffWorkflow, StudentTrainingWorkflow
from openassessment.workflow.models import AssessmentWorkflow
peer_workflows = PeerWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
staff_workflows = StaffWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
training_workflows = StudentTrainingWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
full_course_ids = {} # Keep local dict to avoid repeated database hits for the same course_id
for workflow in chain(peer_workflows, staff_workflows, training_workflows):
truncated_course = workflow.course_id
if truncated_course not in full_course_ids:
# Get full course_id from AssessmentWorkflow table
try:
assessment_workflow = AssessmentWorkflow.objects.filter(course_id__startswith=truncated_course)[:1].get()
full_course_ids[truncated_course] = assessment_workflow.course_id
except AssessmentWorkflow.DoesNotExist:
print("No assessment workflow matching truncated course_id: {}".format(truncated_course))
continue
workflow.course_id = full_course_ids[truncated_course]
workflow.save()
print("Script finished.")
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'openedx.stanford.lms.envs.aws')
os.environ.setdefault("SERVICE_VARIANT", 'lms')
django_db_models_options.patch()
django.setup()
main()
|
<commit_before><commit_msg>Add script to update ora2 workflows with truncated course_ids
Stanford-Online/edx-ora2#25 expands course_id length.
Note this script does not account for ambiguous truncated course_ids
(i.e. multiple different full course_ids match the truncation).
This edge case does not happen on our instances.<commit_after>#!/usr/bin/env python
"""
Script to fix workflows with truncated course_ids from https://github.com/Stanford-Online/edx-ora2/pull/25.
AIClassifierSet, AIGradingWorkflow and AITrainingWorkflow excluded as they are not used by Stanford.
"""
from itertools import chain
import os
import django
from django.db.models.functions import Length
from openedx.core.djangoapps.monkey_patch import django_db_models_options
def main():
from openassessment.assessment.models import PeerWorkflow, StaffWorkflow, StudentTrainingWorkflow
from openassessment.workflow.models import AssessmentWorkflow
peer_workflows = PeerWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
staff_workflows = StaffWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
training_workflows = StudentTrainingWorkflow.objects.annotate(course_id_len=Length('course_id')).filter(course_id_len=40)
full_course_ids = {} # Keep local dict to avoid repeated database hits for the same course_id
for workflow in chain(peer_workflows, staff_workflows, training_workflows):
truncated_course = workflow.course_id
if truncated_course not in full_course_ids:
# Get full course_id from AssessmentWorkflow table
try:
assessment_workflow = AssessmentWorkflow.objects.filter(course_id__startswith=truncated_course)[:1].get()
full_course_ids[truncated_course] = assessment_workflow.course_id
except AssessmentWorkflow.DoesNotExist:
print("No assessment workflow matching truncated course_id: {}".format(truncated_course))
continue
workflow.course_id = full_course_ids[truncated_course]
workflow.save()
print("Script finished.")
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'openedx.stanford.lms.envs.aws')
os.environ.setdefault("SERVICE_VARIANT", 'lms')
django_db_models_options.patch()
django.setup()
main()
|
|
bd11a0b7885b15ee6d7f54c9fe99c09ae0d13701
|
Crawer-GoogleNew.py
|
Crawer-GoogleNew.py
|
#!/usr/bin/env python3
#Crawer Google News
# coding=utf-8
# -*- coding: utf8 -*-
from urllib.request import urlopen
from bs4 import BeautifulSoup
res = urlopen("https://news.google.com")
soup = BeautifulSoup(res, "html.parser")
#print soup.select(".esc-body")
count = 1
for item in soup.select(".esc-body"):
print('======[',count,']=========')
news_title = item.select(".esc-lead-article-title")[0].text
news_url = item.select(".esc-lead-article-title")[0].find('a')['href']
print(news_title)
print(news_url)
count += 1
|
Add modify code Google News Crawer
|
Add modify code Google News Crawer
|
Python
|
epl-1.0
|
KuChanTung/Python
|
Add modify code Google News Crawer
|
#!/usr/bin/env python3
#Crawer Google News
# coding=utf-8
# -*- coding: utf8 -*-
from urllib.request import urlopen
from bs4 import BeautifulSoup
res = urlopen("https://news.google.com")
soup = BeautifulSoup(res, "html.parser")
#print soup.select(".esc-body")
count = 1
for item in soup.select(".esc-body"):
print('======[',count,']=========')
news_title = item.select(".esc-lead-article-title")[0].text
news_url = item.select(".esc-lead-article-title")[0].find('a')['href']
print(news_title)
print(news_url)
count += 1
|
<commit_before><commit_msg>Add modify code Google News Crawer<commit_after>
|
#!/usr/bin/env python3
#Crawer Google News
# coding=utf-8
# -*- coding: utf8 -*-
from urllib.request import urlopen
from bs4 import BeautifulSoup
res = urlopen("https://news.google.com")
soup = BeautifulSoup(res, "html.parser")
#print soup.select(".esc-body")
count = 1
for item in soup.select(".esc-body"):
print('======[',count,']=========')
news_title = item.select(".esc-lead-article-title")[0].text
news_url = item.select(".esc-lead-article-title")[0].find('a')['href']
print(news_title)
print(news_url)
count += 1
|
Add modify code Google News Crawer#!/usr/bin/env python3
#Crawer Google News
# coding=utf-8
# -*- coding: utf8 -*-
from urllib.request import urlopen
from bs4 import BeautifulSoup
res = urlopen("https://news.google.com")
soup = BeautifulSoup(res, "html.parser")
#print soup.select(".esc-body")
count = 1
for item in soup.select(".esc-body"):
print('======[',count,']=========')
news_title = item.select(".esc-lead-article-title")[0].text
news_url = item.select(".esc-lead-article-title")[0].find('a')['href']
print(news_title)
print(news_url)
count += 1
|
<commit_before><commit_msg>Add modify code Google News Crawer<commit_after>#!/usr/bin/env python3
#Crawer Google News
# coding=utf-8
# -*- coding: utf8 -*-
from urllib.request import urlopen
from bs4 import BeautifulSoup
res = urlopen("https://news.google.com")
soup = BeautifulSoup(res, "html.parser")
#print soup.select(".esc-body")
count = 1
for item in soup.select(".esc-body"):
print('======[',count,']=========')
news_title = item.select(".esc-lead-article-title")[0].text
news_url = item.select(".esc-lead-article-title")[0].find('a')['href']
print(news_title)
print(news_url)
count += 1
|
|
8f6b308a9b0f2a4469bae8b844776245e69a8eaf
|
TWLight/users/migrations/0056_expire_all_sessions.py
|
TWLight/users/migrations/0056_expire_all_sessions.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-08 17:49
from __future__ import unicode_literals
from django.db import migrations
from django.core import management
from django.contrib.sessions.models import Session
from django.utils.timezone import now
def expire_all_sessions(apps, schema_editor):
# Clear any expired sessions.
management.call_command("clearsessions")
# Set any remaining sessions to expire.
sessions = Session.objects.all()
for session in sessions:
session.expire_date = now()
session.save()
# Clear those sessions too.
management.call_command("clearsessions")
class Migration(migrations.Migration):
dependencies = [("users", "0055_remove_authorization_partner")]
operations = [migrations.RunPython(expire_all_sessions)]
|
Add migration clearing all sessions.
|
Add migration clearing all sessions.
|
Python
|
mit
|
WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight
|
Add migration clearing all sessions.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-08 17:49
from __future__ import unicode_literals
from django.db import migrations
from django.core import management
from django.contrib.sessions.models import Session
from django.utils.timezone import now
def expire_all_sessions(apps, schema_editor):
# Clear any expired sessions.
management.call_command("clearsessions")
# Set any remaining sessions to expire.
sessions = Session.objects.all()
for session in sessions:
session.expire_date = now()
session.save()
# Clear those sessions too.
management.call_command("clearsessions")
class Migration(migrations.Migration):
dependencies = [("users", "0055_remove_authorization_partner")]
operations = [migrations.RunPython(expire_all_sessions)]
|
<commit_before><commit_msg>Add migration clearing all sessions.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-08 17:49
from __future__ import unicode_literals
from django.db import migrations
from django.core import management
from django.contrib.sessions.models import Session
from django.utils.timezone import now
def expire_all_sessions(apps, schema_editor):
# Clear any expired sessions.
management.call_command("clearsessions")
# Set any remaining sessions to expire.
sessions = Session.objects.all()
for session in sessions:
session.expire_date = now()
session.save()
# Clear those sessions too.
management.call_command("clearsessions")
class Migration(migrations.Migration):
dependencies = [("users", "0055_remove_authorization_partner")]
operations = [migrations.RunPython(expire_all_sessions)]
|
Add migration clearing all sessions.# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-08 17:49
from __future__ import unicode_literals
from django.db import migrations
from django.core import management
from django.contrib.sessions.models import Session
from django.utils.timezone import now
def expire_all_sessions(apps, schema_editor):
# Clear any expired sessions.
management.call_command("clearsessions")
# Set any remaining sessions to expire.
sessions = Session.objects.all()
for session in sessions:
session.expire_date = now()
session.save()
# Clear those sessions too.
management.call_command("clearsessions")
class Migration(migrations.Migration):
dependencies = [("users", "0055_remove_authorization_partner")]
operations = [migrations.RunPython(expire_all_sessions)]
|
<commit_before><commit_msg>Add migration clearing all sessions.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-08 17:49
from __future__ import unicode_literals
from django.db import migrations
from django.core import management
from django.contrib.sessions.models import Session
from django.utils.timezone import now
def expire_all_sessions(apps, schema_editor):
# Clear any expired sessions.
management.call_command("clearsessions")
# Set any remaining sessions to expire.
sessions = Session.objects.all()
for session in sessions:
session.expire_date = now()
session.save()
# Clear those sessions too.
management.call_command("clearsessions")
class Migration(migrations.Migration):
dependencies = [("users", "0055_remove_authorization_partner")]
operations = [migrations.RunPython(expire_all_sessions)]
|
|
f2e22271bab3b1fb9152f57003ee96c009afcc72
|
django/artists/tests/test_serializers.py
|
django/artists/tests/test_serializers.py
|
from unittest import TestCase
from ..models import Artist, Hyperlink
from ..serializers import ArtistSerializer, HyperlinkSerializer
class HyperlinkSerializerTest(TestCase):
"""Tests for Hyperlink serializer."""
def test_valid_fields(self):
id_ = 4
name = 'jamendo'
display_name = "Jamendo"
url = "http://www.jamendo.com/artist/1333"
link = Hyperlink(id=id_, name=name, url=url)
serializer = HyperlinkSerializer(link)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'display_name': display_name,
'url': url,
})
class ArtistSerializerTest(TestCase):
"""Tests for Artist serializer."""
def test_no_links(self):
id_ = 2
name = "Brad Sucks"
artist = Artist(id=id_, name=name)
serializer = ArtistSerializer(artist)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'links': [],
})
|
Add tests for artist serializers
|
Add tests for artist serializers
|
Python
|
bsd-3-clause
|
FreeMusicNinja/freemusic.ninja,FreeMusicNinja/freemusic.ninja
|
Add tests for artist serializers
|
from unittest import TestCase
from ..models import Artist, Hyperlink
from ..serializers import ArtistSerializer, HyperlinkSerializer
class HyperlinkSerializerTest(TestCase):
"""Tests for Hyperlink serializer."""
def test_valid_fields(self):
id_ = 4
name = 'jamendo'
display_name = "Jamendo"
url = "http://www.jamendo.com/artist/1333"
link = Hyperlink(id=id_, name=name, url=url)
serializer = HyperlinkSerializer(link)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'display_name': display_name,
'url': url,
})
class ArtistSerializerTest(TestCase):
"""Tests for Artist serializer."""
def test_no_links(self):
id_ = 2
name = "Brad Sucks"
artist = Artist(id=id_, name=name)
serializer = ArtistSerializer(artist)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'links': [],
})
|
<commit_before><commit_msg>Add tests for artist serializers<commit_after>
|
from unittest import TestCase
from ..models import Artist, Hyperlink
from ..serializers import ArtistSerializer, HyperlinkSerializer
class HyperlinkSerializerTest(TestCase):
"""Tests for Hyperlink serializer."""
def test_valid_fields(self):
id_ = 4
name = 'jamendo'
display_name = "Jamendo"
url = "http://www.jamendo.com/artist/1333"
link = Hyperlink(id=id_, name=name, url=url)
serializer = HyperlinkSerializer(link)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'display_name': display_name,
'url': url,
})
class ArtistSerializerTest(TestCase):
"""Tests for Artist serializer."""
def test_no_links(self):
id_ = 2
name = "Brad Sucks"
artist = Artist(id=id_, name=name)
serializer = ArtistSerializer(artist)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'links': [],
})
|
Add tests for artist serializersfrom unittest import TestCase
from ..models import Artist, Hyperlink
from ..serializers import ArtistSerializer, HyperlinkSerializer
class HyperlinkSerializerTest(TestCase):
"""Tests for Hyperlink serializer."""
def test_valid_fields(self):
id_ = 4
name = 'jamendo'
display_name = "Jamendo"
url = "http://www.jamendo.com/artist/1333"
link = Hyperlink(id=id_, name=name, url=url)
serializer = HyperlinkSerializer(link)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'display_name': display_name,
'url': url,
})
class ArtistSerializerTest(TestCase):
"""Tests for Artist serializer."""
def test_no_links(self):
id_ = 2
name = "Brad Sucks"
artist = Artist(id=id_, name=name)
serializer = ArtistSerializer(artist)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'links': [],
})
|
<commit_before><commit_msg>Add tests for artist serializers<commit_after>from unittest import TestCase
from ..models import Artist, Hyperlink
from ..serializers import ArtistSerializer, HyperlinkSerializer
class HyperlinkSerializerTest(TestCase):
"""Tests for Hyperlink serializer."""
def test_valid_fields(self):
id_ = 4
name = 'jamendo'
display_name = "Jamendo"
url = "http://www.jamendo.com/artist/1333"
link = Hyperlink(id=id_, name=name, url=url)
serializer = HyperlinkSerializer(link)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'display_name': display_name,
'url': url,
})
class ArtistSerializerTest(TestCase):
"""Tests for Artist serializer."""
def test_no_links(self):
id_ = 2
name = "Brad Sucks"
artist = Artist(id=id_, name=name)
serializer = ArtistSerializer(artist)
self.assertEqual(serializer.data, {
'id': id_,
'name': name,
'links': [],
})
|
|
2274eb1fcc2330188d9b84da261f5a370da61092
|
indra/sources/hypothesis/annotator.py
|
indra/sources/hypothesis/annotator.py
|
from indra.assemblers.english import EnglishAssembler
from indra.databases import identifiers
from indra.statements.agent import get_grounding, default_ns_order
grounding_ns = default_ns_order + \
['NCIT', 'PUBCHEM', 'CHEMBL']
def statement_to_annotations(stmt, annotate_agents=True):
annotation_text = get_annotation_text(stmt,
annotate_agents=annotate_agents)
annotations = []
for ev in stmt.evidence:
annot = evidence_to_annotation(ev)
if annot is None:
continue
annot['annotation'] = annotation_text
annotations.append(annot)
return annotations
def evidence_to_annotation(evidence):
if not evidence.text:
return None
if 'PMCID' in evidence.text_refs:
url = 'https://www.ncbi.nlm.nih.gov/pmc/articles/%s/' % \
evidence.text_refs['PMCID']
elif evidence.pmid:
url = 'https://pubmed.ncbi.nlm.nih.gov/%s/' % evidence.pmid
else:
return None
return {
'url': url,
'target_text': evidence.text
}
def get_annotation_text(stmt, annotate_agents=True):
ea = EnglishAssembler(stmts=[stmt])
annotation_text = ea.make_model()
if annotate_agents:
inserts = []
for agent_wc in ea.stmt_agents[0]:
for insert_begin, insert_len in inserts:
if insert_begin < agent_wc.coords[0]:
agent_wc.update_coords(insert_len)
db_ns, db_id = get_grounding(agent_wc.db_refs)
if not db_ns:
continue
grounding_text = '[%s:%s]' % (db_ns, db_id)
inserts.append((agent_wc.coords[1], len(grounding_text)))
before_part = annotation_text[:agent_wc.coords[1]]
after_part = annotation_text[agent_wc.coords[1]:]
annotation_text = ''.join([before_part, grounding_text,
after_part])
return annotation_text
|
Implement more sophisticated annotation generation
|
Implement more sophisticated annotation generation
|
Python
|
bsd-2-clause
|
sorgerlab/belpy,bgyori/indra,johnbachman/indra,sorgerlab/indra,johnbachman/indra,bgyori/indra,sorgerlab/indra,bgyori/indra,johnbachman/indra,sorgerlab/indra,sorgerlab/belpy,sorgerlab/belpy
|
Implement more sophisticated annotation generation
|
from indra.assemblers.english import EnglishAssembler
from indra.databases import identifiers
from indra.statements.agent import get_grounding, default_ns_order
grounding_ns = default_ns_order + \
['NCIT', 'PUBCHEM', 'CHEMBL']
def statement_to_annotations(stmt, annotate_agents=True):
annotation_text = get_annotation_text(stmt,
annotate_agents=annotate_agents)
annotations = []
for ev in stmt.evidence:
annot = evidence_to_annotation(ev)
if annot is None:
continue
annot['annotation'] = annotation_text
annotations.append(annot)
return annotations
def evidence_to_annotation(evidence):
if not evidence.text:
return None
if 'PMCID' in evidence.text_refs:
url = 'https://www.ncbi.nlm.nih.gov/pmc/articles/%s/' % \
evidence.text_refs['PMCID']
elif evidence.pmid:
url = 'https://pubmed.ncbi.nlm.nih.gov/%s/' % evidence.pmid
else:
return None
return {
'url': url,
'target_text': evidence.text
}
def get_annotation_text(stmt, annotate_agents=True):
ea = EnglishAssembler(stmts=[stmt])
annotation_text = ea.make_model()
if annotate_agents:
inserts = []
for agent_wc in ea.stmt_agents[0]:
for insert_begin, insert_len in inserts:
if insert_begin < agent_wc.coords[0]:
agent_wc.update_coords(insert_len)
db_ns, db_id = get_grounding(agent_wc.db_refs)
if not db_ns:
continue
grounding_text = '[%s:%s]' % (db_ns, db_id)
inserts.append((agent_wc.coords[1], len(grounding_text)))
before_part = annotation_text[:agent_wc.coords[1]]
after_part = annotation_text[agent_wc.coords[1]:]
annotation_text = ''.join([before_part, grounding_text,
after_part])
return annotation_text
|
<commit_before><commit_msg>Implement more sophisticated annotation generation<commit_after>
|
from indra.assemblers.english import EnglishAssembler
from indra.databases import identifiers
from indra.statements.agent import get_grounding, default_ns_order
grounding_ns = default_ns_order + \
['NCIT', 'PUBCHEM', 'CHEMBL']
def statement_to_annotations(stmt, annotate_agents=True):
annotation_text = get_annotation_text(stmt,
annotate_agents=annotate_agents)
annotations = []
for ev in stmt.evidence:
annot = evidence_to_annotation(ev)
if annot is None:
continue
annot['annotation'] = annotation_text
annotations.append(annot)
return annotations
def evidence_to_annotation(evidence):
if not evidence.text:
return None
if 'PMCID' in evidence.text_refs:
url = 'https://www.ncbi.nlm.nih.gov/pmc/articles/%s/' % \
evidence.text_refs['PMCID']
elif evidence.pmid:
url = 'https://pubmed.ncbi.nlm.nih.gov/%s/' % evidence.pmid
else:
return None
return {
'url': url,
'target_text': evidence.text
}
def get_annotation_text(stmt, annotate_agents=True):
ea = EnglishAssembler(stmts=[stmt])
annotation_text = ea.make_model()
if annotate_agents:
inserts = []
for agent_wc in ea.stmt_agents[0]:
for insert_begin, insert_len in inserts:
if insert_begin < agent_wc.coords[0]:
agent_wc.update_coords(insert_len)
db_ns, db_id = get_grounding(agent_wc.db_refs)
if not db_ns:
continue
grounding_text = '[%s:%s]' % (db_ns, db_id)
inserts.append((agent_wc.coords[1], len(grounding_text)))
before_part = annotation_text[:agent_wc.coords[1]]
after_part = annotation_text[agent_wc.coords[1]:]
annotation_text = ''.join([before_part, grounding_text,
after_part])
return annotation_text
|
Implement more sophisticated annotation generationfrom indra.assemblers.english import EnglishAssembler
from indra.databases import identifiers
from indra.statements.agent import get_grounding, default_ns_order
grounding_ns = default_ns_order + \
['NCIT', 'PUBCHEM', 'CHEMBL']
def statement_to_annotations(stmt, annotate_agents=True):
annotation_text = get_annotation_text(stmt,
annotate_agents=annotate_agents)
annotations = []
for ev in stmt.evidence:
annot = evidence_to_annotation(ev)
if annot is None:
continue
annot['annotation'] = annotation_text
annotations.append(annot)
return annotations
def evidence_to_annotation(evidence):
if not evidence.text:
return None
if 'PMCID' in evidence.text_refs:
url = 'https://www.ncbi.nlm.nih.gov/pmc/articles/%s/' % \
evidence.text_refs['PMCID']
elif evidence.pmid:
url = 'https://pubmed.ncbi.nlm.nih.gov/%s/' % evidence.pmid
else:
return None
return {
'url': url,
'target_text': evidence.text
}
def get_annotation_text(stmt, annotate_agents=True):
ea = EnglishAssembler(stmts=[stmt])
annotation_text = ea.make_model()
if annotate_agents:
inserts = []
for agent_wc in ea.stmt_agents[0]:
for insert_begin, insert_len in inserts:
if insert_begin < agent_wc.coords[0]:
agent_wc.update_coords(insert_len)
db_ns, db_id = get_grounding(agent_wc.db_refs)
if not db_ns:
continue
grounding_text = '[%s:%s]' % (db_ns, db_id)
inserts.append((agent_wc.coords[1], len(grounding_text)))
before_part = annotation_text[:agent_wc.coords[1]]
after_part = annotation_text[agent_wc.coords[1]:]
annotation_text = ''.join([before_part, grounding_text,
after_part])
return annotation_text
|
<commit_before><commit_msg>Implement more sophisticated annotation generation<commit_after>from indra.assemblers.english import EnglishAssembler
from indra.databases import identifiers
from indra.statements.agent import get_grounding, default_ns_order
grounding_ns = default_ns_order + \
['NCIT', 'PUBCHEM', 'CHEMBL']
def statement_to_annotations(stmt, annotate_agents=True):
annotation_text = get_annotation_text(stmt,
annotate_agents=annotate_agents)
annotations = []
for ev in stmt.evidence:
annot = evidence_to_annotation(ev)
if annot is None:
continue
annot['annotation'] = annotation_text
annotations.append(annot)
return annotations
def evidence_to_annotation(evidence):
if not evidence.text:
return None
if 'PMCID' in evidence.text_refs:
url = 'https://www.ncbi.nlm.nih.gov/pmc/articles/%s/' % \
evidence.text_refs['PMCID']
elif evidence.pmid:
url = 'https://pubmed.ncbi.nlm.nih.gov/%s/' % evidence.pmid
else:
return None
return {
'url': url,
'target_text': evidence.text
}
def get_annotation_text(stmt, annotate_agents=True):
ea = EnglishAssembler(stmts=[stmt])
annotation_text = ea.make_model()
if annotate_agents:
inserts = []
for agent_wc in ea.stmt_agents[0]:
for insert_begin, insert_len in inserts:
if insert_begin < agent_wc.coords[0]:
agent_wc.update_coords(insert_len)
db_ns, db_id = get_grounding(agent_wc.db_refs)
if not db_ns:
continue
grounding_text = '[%s:%s]' % (db_ns, db_id)
inserts.append((agent_wc.coords[1], len(grounding_text)))
before_part = annotation_text[:agent_wc.coords[1]]
after_part = annotation_text[agent_wc.coords[1]:]
annotation_text = ''.join([before_part, grounding_text,
after_part])
return annotation_text
|
|
d7c958299382ec3177318cf818d0c90d467a2369
|
migrations/006_add_indexes.py
|
migrations/006_add_indexes.py
|
import logging
import migrations
logging.basicConfig()
log = logging.getLogger()
conn = migrations.connect()
cur = conn.cursor()
def drop_index(table, idx):
cur.execute("SHOW INDEX FROM %s WHERE KEY_NAME='%s'" % (table, idx))
if cur.fetchone():
cur.execute("DROP INDEX %s ON %s" % (idx, table))
drop_index("planner_metrics", "idx_planner_metrics_root_wf_uuid")
cur.execute("create index idx_planner_metrics_root_wf_uuid on planner_metrics(root_wf_uuid)")
drop_index("planner_metrics", "idx_planner_metrics_ts")
cur.execute("create index idx_planner_metrics_ts on planner_metrics(ts)")
conn.commit()
cur.close()
|
Add migration script to add indexes
|
Add migration script to add indexes
|
Python
|
apache-2.0
|
pegasus-isi/pegasus-metrics,pegasus-isi/pegasus-metrics,pegasus-isi/pegasus-metrics
|
Add migration script to add indexes
|
import logging
import migrations
logging.basicConfig()
log = logging.getLogger()
conn = migrations.connect()
cur = conn.cursor()
def drop_index(table, idx):
cur.execute("SHOW INDEX FROM %s WHERE KEY_NAME='%s'" % (table, idx))
if cur.fetchone():
cur.execute("DROP INDEX %s ON %s" % (idx, table))
drop_index("planner_metrics", "idx_planner_metrics_root_wf_uuid")
cur.execute("create index idx_planner_metrics_root_wf_uuid on planner_metrics(root_wf_uuid)")
drop_index("planner_metrics", "idx_planner_metrics_ts")
cur.execute("create index idx_planner_metrics_ts on planner_metrics(ts)")
conn.commit()
cur.close()
|
<commit_before><commit_msg>Add migration script to add indexes<commit_after>
|
import logging
import migrations
logging.basicConfig()
log = logging.getLogger()
conn = migrations.connect()
cur = conn.cursor()
def drop_index(table, idx):
cur.execute("SHOW INDEX FROM %s WHERE KEY_NAME='%s'" % (table, idx))
if cur.fetchone():
cur.execute("DROP INDEX %s ON %s" % (idx, table))
drop_index("planner_metrics", "idx_planner_metrics_root_wf_uuid")
cur.execute("create index idx_planner_metrics_root_wf_uuid on planner_metrics(root_wf_uuid)")
drop_index("planner_metrics", "idx_planner_metrics_ts")
cur.execute("create index idx_planner_metrics_ts on planner_metrics(ts)")
conn.commit()
cur.close()
|
Add migration script to add indexesimport logging
import migrations
logging.basicConfig()
log = logging.getLogger()
conn = migrations.connect()
cur = conn.cursor()
def drop_index(table, idx):
cur.execute("SHOW INDEX FROM %s WHERE KEY_NAME='%s'" % (table, idx))
if cur.fetchone():
cur.execute("DROP INDEX %s ON %s" % (idx, table))
drop_index("planner_metrics", "idx_planner_metrics_root_wf_uuid")
cur.execute("create index idx_planner_metrics_root_wf_uuid on planner_metrics(root_wf_uuid)")
drop_index("planner_metrics", "idx_planner_metrics_ts")
cur.execute("create index idx_planner_metrics_ts on planner_metrics(ts)")
conn.commit()
cur.close()
|
<commit_before><commit_msg>Add migration script to add indexes<commit_after>import logging
import migrations
logging.basicConfig()
log = logging.getLogger()
conn = migrations.connect()
cur = conn.cursor()
def drop_index(table, idx):
cur.execute("SHOW INDEX FROM %s WHERE KEY_NAME='%s'" % (table, idx))
if cur.fetchone():
cur.execute("DROP INDEX %s ON %s" % (idx, table))
drop_index("planner_metrics", "idx_planner_metrics_root_wf_uuid")
cur.execute("create index idx_planner_metrics_root_wf_uuid on planner_metrics(root_wf_uuid)")
drop_index("planner_metrics", "idx_planner_metrics_ts")
cur.execute("create index idx_planner_metrics_ts on planner_metrics(ts)")
conn.commit()
cur.close()
|
|
20b10fcedc6395dc693185e01fc4c56479660c52
|
scripts/release/rethreshold_family.py
|
scripts/release/rethreshold_family.py
|
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ----------------------------------------------------------------------------------
import os
import sys
import subprocess
import argparse
# ------------------------------------- GLOBALS ------------------------------------
LSF_GROUP = "/family_srch"
MEMORY = 8000
# ----------------------------------------------------------------------------------
def checkout_family(rfam_acc):
"""
Checks out a family from Rfam based on a valid Rfam accession.
rfam_acc: A valid Rfam accession
return:
"""
cmd = "rfco.pl %s" % rfam_acc
subprocess.call(cmd, shell=True)
# add some checks here
# ----------------------------------------------------------------------------------
if __name__== '__main__':
pass
|
Add function to checkout a family
|
Add function to checkout a family
|
Python
|
apache-2.0
|
Rfam/rfam-production,Rfam/rfam-production,Rfam/rfam-production
|
Add function to checkout a family
|
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ----------------------------------------------------------------------------------
import os
import sys
import subprocess
import argparse
# ------------------------------------- GLOBALS ------------------------------------
LSF_GROUP = "/family_srch"
MEMORY = 8000
# ----------------------------------------------------------------------------------
def checkout_family(rfam_acc):
"""
Checks out a family from Rfam based on a valid Rfam accession.
rfam_acc: A valid Rfam accession
return:
"""
cmd = "rfco.pl %s" % rfam_acc
subprocess.call(cmd, shell=True)
# add some checks here
# ----------------------------------------------------------------------------------
if __name__== '__main__':
pass
|
<commit_before><commit_msg>Add function to checkout a family<commit_after>
|
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ----------------------------------------------------------------------------------
import os
import sys
import subprocess
import argparse
# ------------------------------------- GLOBALS ------------------------------------
LSF_GROUP = "/family_srch"
MEMORY = 8000
# ----------------------------------------------------------------------------------
def checkout_family(rfam_acc):
"""
Checks out a family from Rfam based on a valid Rfam accession.
rfam_acc: A valid Rfam accession
return:
"""
cmd = "rfco.pl %s" % rfam_acc
subprocess.call(cmd, shell=True)
# add some checks here
# ----------------------------------------------------------------------------------
if __name__== '__main__':
pass
|
Add function to checkout a family"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ----------------------------------------------------------------------------------
import os
import sys
import subprocess
import argparse
# ------------------------------------- GLOBALS ------------------------------------
LSF_GROUP = "/family_srch"
MEMORY = 8000
# ----------------------------------------------------------------------------------
def checkout_family(rfam_acc):
"""
Checks out a family from Rfam based on a valid Rfam accession.
rfam_acc: A valid Rfam accession
return:
"""
cmd = "rfco.pl %s" % rfam_acc
subprocess.call(cmd, shell=True)
# add some checks here
# ----------------------------------------------------------------------------------
if __name__== '__main__':
pass
|
<commit_before><commit_msg>Add function to checkout a family<commit_after>"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ----------------------------------------------------------------------------------
import os
import sys
import subprocess
import argparse
# ------------------------------------- GLOBALS ------------------------------------
LSF_GROUP = "/family_srch"
MEMORY = 8000
# ----------------------------------------------------------------------------------
def checkout_family(rfam_acc):
"""
Checks out a family from Rfam based on a valid Rfam accession.
rfam_acc: A valid Rfam accession
return:
"""
cmd = "rfco.pl %s" % rfam_acc
subprocess.call(cmd, shell=True)
# add some checks here
# ----------------------------------------------------------------------------------
if __name__== '__main__':
pass
|
|
b6b617fe24f39bf4000977d4171349aee2a729b5
|
ocradmin/plugins/abbyy_nodes.py
|
ocradmin/plugins/abbyy_nodes.py
|
"""
Cuneiform Recogniser
"""
from nodetree import node, manager
from ocradmin import plugins
from ocradmin.plugins import stages, generic_nodes
import types
import os
import shutil
import tempfile
import subprocess as sp
NAME = "Abbyy"
class AbbyyRecognizerNode(generic_nodes.CommandLineRecognizerNode):
"""
Recognize an image using Cuneiform.
"""
name = "Abbyy::AbbyyRecognizer"
description = "Abbyy Native Text Recognizer"
binary = "abbyyocr"
stage = stages.RECOGNIZE
arity = 1
def get_command(self, outfile, image):
"""
Cuneiform command line. Simplified for now.
"""
return [self.binary, "-if", image, "-f", "XML", "-of", outfile]
def _eval(self):
"""
Convert a full page.
"""
from ocradmin.core.utils import FinereaderXmlParser
binary = self.get_input_data(0)
json = None
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.close()
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as btmp:
btmp.close()
self.write_binary(btmp.name, binary)
args = self.get_command(tmp.name, btmp.name)
self.logger.debug("Running: '%s'", " ".join(args))
proc = sp.Popen(args, stderr=sp.PIPE)
err = proc.stderr.read()
if proc.wait() != 0:
return "!!! %s CONVERSION ERROR %d: %s !!!" % (
os.path.basename(self.binary).upper(),
proc.returncode, err)
json = FinereaderXmlParser().parsefile(tmp.name)
#os.unlink(tmp.name)
#os.unlink(btmp.name)
plugins.set_progress(self.logger, self.progress_func, 100, 100)
return json
class Manager(manager.StandardManager):
"""
Handle Tesseract nodes.
"""
@classmethod
def get_node(self, name, **kwargs):
if name.find("::") != -1:
name = name.split("::")[-1]
if name == "AbbyyRecognizer":
return AbbyyRecognizerNode(**kwargs)
@classmethod
def get_nodes(cls, *oftypes):
return super(Manager, cls).get_nodes(
*oftypes, globals=globals())
if __name__ == "__main__":
for n in Manager.get_nodes():
print n
|
Add a quicky Abbyy recogniser node. Doesn't seem to work yet, but can't be sure 'cos our license has run out of pages...
|
Add a quicky Abbyy recogniser node. Doesn't seem to work yet, but can't be sure 'cos our license has run out of pages...
|
Python
|
apache-2.0
|
vitorio/ocropodium,vitorio/ocropodium,vitorio/ocropodium,vitorio/ocropodium
|
Add a quicky Abbyy recogniser node. Doesn't seem to work yet, but can't be sure 'cos our license has run out of pages...
|
"""
Cuneiform Recogniser
"""
from nodetree import node, manager
from ocradmin import plugins
from ocradmin.plugins import stages, generic_nodes
import types
import os
import shutil
import tempfile
import subprocess as sp
NAME = "Abbyy"
class AbbyyRecognizerNode(generic_nodes.CommandLineRecognizerNode):
"""
Recognize an image using Cuneiform.
"""
name = "Abbyy::AbbyyRecognizer"
description = "Abbyy Native Text Recognizer"
binary = "abbyyocr"
stage = stages.RECOGNIZE
arity = 1
def get_command(self, outfile, image):
"""
Cuneiform command line. Simplified for now.
"""
return [self.binary, "-if", image, "-f", "XML", "-of", outfile]
def _eval(self):
"""
Convert a full page.
"""
from ocradmin.core.utils import FinereaderXmlParser
binary = self.get_input_data(0)
json = None
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.close()
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as btmp:
btmp.close()
self.write_binary(btmp.name, binary)
args = self.get_command(tmp.name, btmp.name)
self.logger.debug("Running: '%s'", " ".join(args))
proc = sp.Popen(args, stderr=sp.PIPE)
err = proc.stderr.read()
if proc.wait() != 0:
return "!!! %s CONVERSION ERROR %d: %s !!!" % (
os.path.basename(self.binary).upper(),
proc.returncode, err)
json = FinereaderXmlParser().parsefile(tmp.name)
#os.unlink(tmp.name)
#os.unlink(btmp.name)
plugins.set_progress(self.logger, self.progress_func, 100, 100)
return json
class Manager(manager.StandardManager):
"""
Handle Tesseract nodes.
"""
@classmethod
def get_node(self, name, **kwargs):
if name.find("::") != -1:
name = name.split("::")[-1]
if name == "AbbyyRecognizer":
return AbbyyRecognizerNode(**kwargs)
@classmethod
def get_nodes(cls, *oftypes):
return super(Manager, cls).get_nodes(
*oftypes, globals=globals())
if __name__ == "__main__":
for n in Manager.get_nodes():
print n
|
<commit_before><commit_msg>Add a quicky Abbyy recogniser node. Doesn't seem to work yet, but can't be sure 'cos our license has run out of pages...<commit_after>
|
"""
Cuneiform Recogniser
"""
from nodetree import node, manager
from ocradmin import plugins
from ocradmin.plugins import stages, generic_nodes
import types
import os
import shutil
import tempfile
import subprocess as sp
NAME = "Abbyy"
class AbbyyRecognizerNode(generic_nodes.CommandLineRecognizerNode):
"""
Recognize an image using Cuneiform.
"""
name = "Abbyy::AbbyyRecognizer"
description = "Abbyy Native Text Recognizer"
binary = "abbyyocr"
stage = stages.RECOGNIZE
arity = 1
def get_command(self, outfile, image):
"""
Cuneiform command line. Simplified for now.
"""
return [self.binary, "-if", image, "-f", "XML", "-of", outfile]
def _eval(self):
"""
Convert a full page.
"""
from ocradmin.core.utils import FinereaderXmlParser
binary = self.get_input_data(0)
json = None
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.close()
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as btmp:
btmp.close()
self.write_binary(btmp.name, binary)
args = self.get_command(tmp.name, btmp.name)
self.logger.debug("Running: '%s'", " ".join(args))
proc = sp.Popen(args, stderr=sp.PIPE)
err = proc.stderr.read()
if proc.wait() != 0:
return "!!! %s CONVERSION ERROR %d: %s !!!" % (
os.path.basename(self.binary).upper(),
proc.returncode, err)
json = FinereaderXmlParser().parsefile(tmp.name)
#os.unlink(tmp.name)
#os.unlink(btmp.name)
plugins.set_progress(self.logger, self.progress_func, 100, 100)
return json
class Manager(manager.StandardManager):
"""
Handle Tesseract nodes.
"""
@classmethod
def get_node(self, name, **kwargs):
if name.find("::") != -1:
name = name.split("::")[-1]
if name == "AbbyyRecognizer":
return AbbyyRecognizerNode(**kwargs)
@classmethod
def get_nodes(cls, *oftypes):
return super(Manager, cls).get_nodes(
*oftypes, globals=globals())
if __name__ == "__main__":
for n in Manager.get_nodes():
print n
|
Add a quicky Abbyy recogniser node. Doesn't seem to work yet, but can't be sure 'cos our license has run out of pages..."""
Cuneiform Recogniser
"""
from nodetree import node, manager
from ocradmin import plugins
from ocradmin.plugins import stages, generic_nodes
import types
import os
import shutil
import tempfile
import subprocess as sp
NAME = "Abbyy"
class AbbyyRecognizerNode(generic_nodes.CommandLineRecognizerNode):
"""
Recognize an image using Cuneiform.
"""
name = "Abbyy::AbbyyRecognizer"
description = "Abbyy Native Text Recognizer"
binary = "abbyyocr"
stage = stages.RECOGNIZE
arity = 1
def get_command(self, outfile, image):
"""
Cuneiform command line. Simplified for now.
"""
return [self.binary, "-if", image, "-f", "XML", "-of", outfile]
def _eval(self):
"""
Convert a full page.
"""
from ocradmin.core.utils import FinereaderXmlParser
binary = self.get_input_data(0)
json = None
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.close()
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as btmp:
btmp.close()
self.write_binary(btmp.name, binary)
args = self.get_command(tmp.name, btmp.name)
self.logger.debug("Running: '%s'", " ".join(args))
proc = sp.Popen(args, stderr=sp.PIPE)
err = proc.stderr.read()
if proc.wait() != 0:
return "!!! %s CONVERSION ERROR %d: %s !!!" % (
os.path.basename(self.binary).upper(),
proc.returncode, err)
json = FinereaderXmlParser().parsefile(tmp.name)
#os.unlink(tmp.name)
#os.unlink(btmp.name)
plugins.set_progress(self.logger, self.progress_func, 100, 100)
return json
class Manager(manager.StandardManager):
"""
Handle Tesseract nodes.
"""
@classmethod
def get_node(self, name, **kwargs):
if name.find("::") != -1:
name = name.split("::")[-1]
if name == "AbbyyRecognizer":
return AbbyyRecognizerNode(**kwargs)
@classmethod
def get_nodes(cls, *oftypes):
return super(Manager, cls).get_nodes(
*oftypes, globals=globals())
if __name__ == "__main__":
for n in Manager.get_nodes():
print n
|
<commit_before><commit_msg>Add a quicky Abbyy recogniser node. Doesn't seem to work yet, but can't be sure 'cos our license has run out of pages...<commit_after>"""
Cuneiform Recogniser
"""
from nodetree import node, manager
from ocradmin import plugins
from ocradmin.plugins import stages, generic_nodes
import types
import os
import shutil
import tempfile
import subprocess as sp
NAME = "Abbyy"
class AbbyyRecognizerNode(generic_nodes.CommandLineRecognizerNode):
"""
Recognize an image using Cuneiform.
"""
name = "Abbyy::AbbyyRecognizer"
description = "Abbyy Native Text Recognizer"
binary = "abbyyocr"
stage = stages.RECOGNIZE
arity = 1
def get_command(self, outfile, image):
"""
Cuneiform command line. Simplified for now.
"""
return [self.binary, "-if", image, "-f", "XML", "-of", outfile]
def _eval(self):
"""
Convert a full page.
"""
from ocradmin.core.utils import FinereaderXmlParser
binary = self.get_input_data(0)
json = None
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.close()
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as btmp:
btmp.close()
self.write_binary(btmp.name, binary)
args = self.get_command(tmp.name, btmp.name)
self.logger.debug("Running: '%s'", " ".join(args))
proc = sp.Popen(args, stderr=sp.PIPE)
err = proc.stderr.read()
if proc.wait() != 0:
return "!!! %s CONVERSION ERROR %d: %s !!!" % (
os.path.basename(self.binary).upper(),
proc.returncode, err)
json = FinereaderXmlParser().parsefile(tmp.name)
#os.unlink(tmp.name)
#os.unlink(btmp.name)
plugins.set_progress(self.logger, self.progress_func, 100, 100)
return json
class Manager(manager.StandardManager):
"""
Handle Tesseract nodes.
"""
@classmethod
def get_node(self, name, **kwargs):
if name.find("::") != -1:
name = name.split("::")[-1]
if name == "AbbyyRecognizer":
return AbbyyRecognizerNode(**kwargs)
@classmethod
def get_nodes(cls, *oftypes):
return super(Manager, cls).get_nodes(
*oftypes, globals=globals())
if __name__ == "__main__":
for n in Manager.get_nodes():
print n
|
|
48c942e89e343cbd7de009a87302396a077054b4
|
apps/domain/tests/test_routes/test_groups.py
|
apps/domain/tests/test_routes/test_groups.py
|
def test_create_group(client):
result = client.post("/groups/", data={"group_name": "group test", "members": ["239y94asd", "whor244123"]})
assert result.status_code == 200
assert result.get_json() == {"msg": "Group created succesfully!"}
def test_get_all_groups(client):
result = client.get("/groups/")
assert result.status_code == 200
assert result.get_json() == {"groups": ["GroupA", "GroupB", "GroupC"]}
def test_get_specific_group(client):
result = client.get("/groups/5484626")
assert result.status_code == 200
assert result.get_json() == {"group": {"name": "Group A", "id": "5484626"}}
def test_update_group(client):
result = client.put("/groups/546313", data={"group_configs": "{configs}"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Group was updated succesfully!"}
def test_delete_group(client):
result = client.delete("/groups/546313")
assert result.status_code == 200
assert result.get_json() == {"msg": "Group was deleted succesfully!"}
|
ADD Domain groups unit tests
|
ADD Domain groups unit tests
|
Python
|
apache-2.0
|
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
|
ADD Domain groups unit tests
|
def test_create_group(client):
result = client.post("/groups/", data={"group_name": "group test", "members": ["239y94asd", "whor244123"]})
assert result.status_code == 200
assert result.get_json() == {"msg": "Group created succesfully!"}
def test_get_all_groups(client):
result = client.get("/groups/")
assert result.status_code == 200
assert result.get_json() == {"groups": ["GroupA", "GroupB", "GroupC"]}
def test_get_specific_group(client):
result = client.get("/groups/5484626")
assert result.status_code == 200
assert result.get_json() == {"group": {"name": "Group A", "id": "5484626"}}
def test_update_group(client):
result = client.put("/groups/546313", data={"group_configs": "{configs}"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Group was updated succesfully!"}
def test_delete_group(client):
result = client.delete("/groups/546313")
assert result.status_code == 200
assert result.get_json() == {"msg": "Group was deleted succesfully!"}
|
<commit_before><commit_msg>ADD Domain groups unit tests<commit_after>
|
def test_create_group(client):
result = client.post("/groups/", data={"group_name": "group test", "members": ["239y94asd", "whor244123"]})
assert result.status_code == 200
assert result.get_json() == {"msg": "Group created succesfully!"}
def test_get_all_groups(client):
result = client.get("/groups/")
assert result.status_code == 200
assert result.get_json() == {"groups": ["GroupA", "GroupB", "GroupC"]}
def test_get_specific_group(client):
result = client.get("/groups/5484626")
assert result.status_code == 200
assert result.get_json() == {"group": {"name": "Group A", "id": "5484626"}}
def test_update_group(client):
result = client.put("/groups/546313", data={"group_configs": "{configs}"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Group was updated succesfully!"}
def test_delete_group(client):
result = client.delete("/groups/546313")
assert result.status_code == 200
assert result.get_json() == {"msg": "Group was deleted succesfully!"}
|
ADD Domain groups unit tests
def test_create_group(client):
result = client.post("/groups/", data={"group_name": "group test", "members": ["239y94asd", "whor244123"]})
assert result.status_code == 200
assert result.get_json() == {"msg": "Group created succesfully!"}
def test_get_all_groups(client):
result = client.get("/groups/")
assert result.status_code == 200
assert result.get_json() == {"groups": ["GroupA", "GroupB", "GroupC"]}
def test_get_specific_group(client):
result = client.get("/groups/5484626")
assert result.status_code == 200
assert result.get_json() == {"group": {"name": "Group A", "id": "5484626"}}
def test_update_group(client):
result = client.put("/groups/546313", data={"group_configs": "{configs}"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Group was updated succesfully!"}
def test_delete_group(client):
result = client.delete("/groups/546313")
assert result.status_code == 200
assert result.get_json() == {"msg": "Group was deleted succesfully!"}
|
<commit_before><commit_msg>ADD Domain groups unit tests<commit_after>
def test_create_group(client):
result = client.post("/groups/", data={"group_name": "group test", "members": ["239y94asd", "whor244123"]})
assert result.status_code == 200
assert result.get_json() == {"msg": "Group created succesfully!"}
def test_get_all_groups(client):
result = client.get("/groups/")
assert result.status_code == 200
assert result.get_json() == {"groups": ["GroupA", "GroupB", "GroupC"]}
def test_get_specific_group(client):
result = client.get("/groups/5484626")
assert result.status_code == 200
assert result.get_json() == {"group": {"name": "Group A", "id": "5484626"}}
def test_update_group(client):
result = client.put("/groups/546313", data={"group_configs": "{configs}"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Group was updated succesfully!"}
def test_delete_group(client):
result = client.delete("/groups/546313")
assert result.status_code == 200
assert result.get_json() == {"msg": "Group was deleted succesfully!"}
|
|
9ebd1ed784bbd91ac06a401e3c0e4113a09436f1
|
test/test_regions.py
|
test/test_regions.py
|
import numpy as np
import pyfds as fds
def test_output():
out = fds.Output(fds.LineRegion([0, 1, 2], [0, 0.2], 'test output'))
out.signals = [np.linspace(0, 1) for _ in range(len(out.region.indices))]
assert np.allclose(out.mean_signal, np.linspace(0, 1))
|
Add test case for Output class.
|
Add test case for Output class.
|
Python
|
bsd-3-clause
|
emtpb/pyfds
|
Add test case for Output class.
|
import numpy as np
import pyfds as fds
def test_output():
out = fds.Output(fds.LineRegion([0, 1, 2], [0, 0.2], 'test output'))
out.signals = [np.linspace(0, 1) for _ in range(len(out.region.indices))]
assert np.allclose(out.mean_signal, np.linspace(0, 1))
|
<commit_before><commit_msg>Add test case for Output class.<commit_after>
|
import numpy as np
import pyfds as fds
def test_output():
out = fds.Output(fds.LineRegion([0, 1, 2], [0, 0.2], 'test output'))
out.signals = [np.linspace(0, 1) for _ in range(len(out.region.indices))]
assert np.allclose(out.mean_signal, np.linspace(0, 1))
|
Add test case for Output class.import numpy as np
import pyfds as fds
def test_output():
out = fds.Output(fds.LineRegion([0, 1, 2], [0, 0.2], 'test output'))
out.signals = [np.linspace(0, 1) for _ in range(len(out.region.indices))]
assert np.allclose(out.mean_signal, np.linspace(0, 1))
|
<commit_before><commit_msg>Add test case for Output class.<commit_after>import numpy as np
import pyfds as fds
def test_output():
out = fds.Output(fds.LineRegion([0, 1, 2], [0, 0.2], 'test output'))
out.signals = [np.linspace(0, 1) for _ in range(len(out.region.indices))]
assert np.allclose(out.mean_signal, np.linspace(0, 1))
|
|
b1076f68a33b1358b0de1825e683537bfd58979e
|
bin/2000/shape_msa_block.py
|
bin/2000/shape_msa_block.py
|
"""shape_msa_block.py
Output one shapefile per MSA featuring all the blocks it contains
"""
import os
import csv
import fiona
#
# Import MSA to blockgroup crosswalk
#
msa_to_block = {}
with open('data/2000/crosswalks/msa_block.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
block = rows[1]
if msa not in msa_to_block:
msa_to_block[msa] = []
msa_to_block[msa].append(block)
#
# Perform the extraction
#
for msa in msa_to_block:
states = list(set([b[:2] for b in msa_to_block[msa]]))
## Get all blockgroups
all_block = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/blocks.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_block[f['properties']['BLKIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_block = {block: all_block[block] for block in msa_to_block[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
schema = {'geometry': 'Polygon',
'properties': {'BLKIDFP00': 'str'}}
with fiona.open('data/2000/shp/msa/%s/blocks.shp'%msa, 'w',
'ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for block in msa_block:
rec = {'geometry':msa_block[block], 'properties':{'BLKIDFP00':block}}
output.write(rec)
|
Add script to extract the shape of all blocks contained in the 2000 MSA
|
Add script to extract the shape of all blocks contained in the 2000 MSA
|
Python
|
bsd-2-clause
|
scities/2000-us-metro-atlas
|
Add script to extract the shape of all blocks contained in the 2000 MSA
|
"""shape_msa_block.py
Output one shapefile per MSA featuring all the blocks it contains
"""
import os
import csv
import fiona
#
# Import MSA to blockgroup crosswalk
#
msa_to_block = {}
with open('data/2000/crosswalks/msa_block.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
block = rows[1]
if msa not in msa_to_block:
msa_to_block[msa] = []
msa_to_block[msa].append(block)
#
# Perform the extraction
#
for msa in msa_to_block:
states = list(set([b[:2] for b in msa_to_block[msa]]))
## Get all blockgroups
all_block = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/blocks.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_block[f['properties']['BLKIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_block = {block: all_block[block] for block in msa_to_block[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
schema = {'geometry': 'Polygon',
'properties': {'BLKIDFP00': 'str'}}
with fiona.open('data/2000/shp/msa/%s/blocks.shp'%msa, 'w',
'ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for block in msa_block:
rec = {'geometry':msa_block[block], 'properties':{'BLKIDFP00':block}}
output.write(rec)
|
<commit_before><commit_msg>Add script to extract the shape of all blocks contained in the 2000 MSA<commit_after>
|
"""shape_msa_block.py
Output one shapefile per MSA featuring all the blocks it contains
"""
import os
import csv
import fiona
#
# Import MSA to blockgroup crosswalk
#
msa_to_block = {}
with open('data/2000/crosswalks/msa_block.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
block = rows[1]
if msa not in msa_to_block:
msa_to_block[msa] = []
msa_to_block[msa].append(block)
#
# Perform the extraction
#
for msa in msa_to_block:
states = list(set([b[:2] for b in msa_to_block[msa]]))
## Get all blockgroups
all_block = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/blocks.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_block[f['properties']['BLKIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_block = {block: all_block[block] for block in msa_to_block[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
schema = {'geometry': 'Polygon',
'properties': {'BLKIDFP00': 'str'}}
with fiona.open('data/2000/shp/msa/%s/blocks.shp'%msa, 'w',
'ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for block in msa_block:
rec = {'geometry':msa_block[block], 'properties':{'BLKIDFP00':block}}
output.write(rec)
|
Add script to extract the shape of all blocks contained in the 2000 MSA"""shape_msa_block.py
Output one shapefile per MSA featuring all the blocks it contains
"""
import os
import csv
import fiona
#
# Import MSA to blockgroup crosswalk
#
msa_to_block = {}
with open('data/2000/crosswalks/msa_block.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
block = rows[1]
if msa not in msa_to_block:
msa_to_block[msa] = []
msa_to_block[msa].append(block)
#
# Perform the extraction
#
for msa in msa_to_block:
states = list(set([b[:2] for b in msa_to_block[msa]]))
## Get all blockgroups
all_block = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/blocks.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_block[f['properties']['BLKIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_block = {block: all_block[block] for block in msa_to_block[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
schema = {'geometry': 'Polygon',
'properties': {'BLKIDFP00': 'str'}}
with fiona.open('data/2000/shp/msa/%s/blocks.shp'%msa, 'w',
'ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for block in msa_block:
rec = {'geometry':msa_block[block], 'properties':{'BLKIDFP00':block}}
output.write(rec)
|
<commit_before><commit_msg>Add script to extract the shape of all blocks contained in the 2000 MSA<commit_after>"""shape_msa_block.py
Output one shapefile per MSA featuring all the blocks it contains
"""
import os
import csv
import fiona
#
# Import MSA to blockgroup crosswalk
#
msa_to_block = {}
with open('data/2000/crosswalks/msa_block.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
block = rows[1]
if msa not in msa_to_block:
msa_to_block[msa] = []
msa_to_block[msa].append(block)
#
# Perform the extraction
#
for msa in msa_to_block:
states = list(set([b[:2] for b in msa_to_block[msa]]))
## Get all blockgroups
all_block = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/blocks.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_block[f['properties']['BLKIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_block = {block: all_block[block] for block in msa_to_block[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
schema = {'geometry': 'Polygon',
'properties': {'BLKIDFP00': 'str'}}
with fiona.open('data/2000/shp/msa/%s/blocks.shp'%msa, 'w',
'ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for block in msa_block:
rec = {'geometry':msa_block[block], 'properties':{'BLKIDFP00':block}}
output.write(rec)
|
|
7b6081ac2918ca8a6c3400a3284129e1329c1cac
|
letsencrypt/client/apache_obj.py
|
letsencrypt/client/apache_obj.py
|
"""Module contains classes used by the Apache Configurator."""
class Addr(object):
"""Represents an Apache VirtualHost address."""
def __init__(self, addr):
""":param tuple addr: tuple of strings (ip, port)"""
self.tup = addr
@classmethod
def fromstring(cls, str_addr):
"""Initialize Addr from string."""
tup = str_addr.partition(':')
return cls((tup[0], tup[2]))
def __str__(self):
return ':'.join(self.tup)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.tup == other.tup
return False
def set_port(self, port):
"""Set the port of the address.
:param str port: new port
"""
self.tup = (self.tup[0], port)
def get_addr(self):
"""Return addr part of Addr object."""
return self.tup[0]
def get_port(self):
"""Return port."""
return self.tup[1]
def get_ssl_addr_obj(self):
return cls((self.tup[0], "443"))
def get_80_addr_obj(self):
return cls((self.tup[0], "80"))
def get_addr_obj(self, port):
return cls((self.tup[0], port))
class VH(object):
"""Represents an Apache Virtualhost.
:ivar str filep: file path of VH
:ivar str path: Augeas path to virtual host
:ivar set addrs: Virtual Host addresses (:class:`set` of :class:`Addr`)
:ivar set names: Server names/aliases of vhost
(:class:`list` of :class:`str`)
:ivar bool ssl: SSLEngine on in vhost
:ivar bool enabled: Virtual host is enabled
"""
def __init__(self, filep, path, addrs, ssl, enabled, names=None):
"""Initialize a VH."""
self.filep = filep
self.path = path
self.addrs = addrs
self.names = set() if names is None else names
self.ssl = ssl
self.enabled = enabled
def add_name(self, name):
"""Add name to vhost."""
self.names.add(name)
def __str__(self):
return ("file: %s\n"
"vh_path: %s\n"
"addrs: %s\n"
"names: %s\n"
"ssl: %s\n"
"enabled: %s" % (self.filep, self.path, self.addrs,
self.names, self.ssl, self.enabled))
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.filep == other.filep and self.path == other.path and
self.addrs == other.addrs and
self.names == other.names and
self.ssl == other.ssl and self.enabled == other.enabled)
return False
|
Move out Apache specific Objects
|
Move out Apache specific Objects
|
Python
|
apache-2.0
|
mrb/letsencrypt,dietsche/letsencrypt,tdfischer/lets-encrypt-preview,hlieberman/letsencrypt,lbeltrame/letsencrypt,ruo91/letsencrypt,rutsky/letsencrypt,skynet/letsencrypt,Sveder/letsencrypt,stweil/letsencrypt,g1franc/lets-encrypt-preview,rutsky/letsencrypt,BillKeenan/lets-encrypt-preview,jtl999/certbot,Jadaw1n/letsencrypt,hsduk/lets-encrypt-preview,beermix/letsencrypt,ruo91/letsencrypt,brentdax/letsencrypt,stweil/letsencrypt,mitnk/letsencrypt,Sveder/letsencrypt,jmhodges/letsencrypt,goofwear/letsencrypt,goofwear/letsencrypt,bsmr-misc-forks/letsencrypt,VladimirTyrin/letsencrypt,modulexcite/letsencrypt,riseofthetigers/letsencrypt,fmarier/letsencrypt,Jonadabe/letsencrypt,vcavallo/letsencrypt,rlustin/letsencrypt,TheBoegl/letsencrypt,deserted/letsencrypt,jsha/letsencrypt,bsmr-misc-forks/letsencrypt,martindale/letsencrypt,PeterMosmans/letsencrypt,ahojjati/letsencrypt,modulexcite/letsencrypt,Hasimir/letsencrypt,jmaurice/letsencrypt,TheBoegl/letsencrypt,jtl999/certbot,kuba/letsencrypt,deserted/letsencrypt,BillKeenan/lets-encrypt-preview,BKreisel/letsencrypt,thanatos/lets-encrypt-preview,diracdeltas/lets-encrypt-preview,stewnorriss/letsencrypt,mitnk/letsencrypt,beermix/letsencrypt,martindale/letsencrypt,rugk/letsencrypt,mrb/letsencrypt,stewnorriss/letsencrypt,ghyde/letsencrypt,ghyde/letsencrypt,twstrike/le_for_patching,letsencrypt/letsencrypt,diracdeltas/lets-encrypt-preview,solidgoldbomb/letsencrypt,rlustin/letsencrypt,Jadaw1n/letsencrypt,sjerdo/letsencrypt,luorenjin/letsencrypt,thanatos/lets-encrypt-preview,sapics/letsencrypt,kevinlondon/letsencrypt,kevinlondon/letsencrypt,armersong/letsencrypt,digideskio/lets-encrypt-preview,luorenjin/letsencrypt,sjerdo/letsencrypt,tdfischer/lets-encrypt-preview,DavidGarciaCat/letsencrypt,piru/letsencrypt,bestwpw/letsencrypt,jmaurice/letsencrypt,xgin/letsencrypt,VladimirTyrin/letsencrypt,fmarier/letsencrypt,lmcro/letsencrypt,rugk/letsencrypt,BKreisel/letsencrypt,dietsche/letsencrypt,wteiken/letsencrypt,jmhodges/letsencrypt,armersong/letsencrypt,lmcro/letsencrypt,twstrike/le_for_patching,sapics/letsencrypt,wteiken/letsencrypt,PeterMosmans/letsencrypt,hlieberman/letsencrypt,lbeltrame/letsencrypt,vcavallo/letsencrypt,tyagi-prashant/letsencrypt,kuba/letsencrypt,DavidGarciaCat/letsencrypt,piru/letsencrypt,Bachmann1234/letsencrypt,ahojjati/letsencrypt,Bachmann1234/letsencrypt,jsha/letsencrypt,Jonadabe/letsencrypt,Hasimir/letsencrypt,digideskio/lets-encrypt-preview,xgin/letsencrypt,hsduk/lets-encrypt-preview,g1franc/lets-encrypt-preview,skynet/letsencrypt,riseofthetigers/letsencrypt,bestwpw/letsencrypt,solidgoldbomb/letsencrypt,letsencrypt/letsencrypt,tyagi-prashant/letsencrypt,brentdax/letsencrypt
|
Move out Apache specific Objects
|
"""Module contains classes used by the Apache Configurator."""
class Addr(object):
"""Represents an Apache VirtualHost address."""
def __init__(self, addr):
""":param tuple addr: tuple of strings (ip, port)"""
self.tup = addr
@classmethod
def fromstring(cls, str_addr):
"""Initialize Addr from string."""
tup = str_addr.partition(':')
return cls((tup[0], tup[2]))
def __str__(self):
return ':'.join(self.tup)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.tup == other.tup
return False
def set_port(self, port):
"""Set the port of the address.
:param str port: new port
"""
self.tup = (self.tup[0], port)
def get_addr(self):
"""Return addr part of Addr object."""
return self.tup[0]
def get_port(self):
"""Return port."""
return self.tup[1]
def get_ssl_addr_obj(self):
return cls((self.tup[0], "443"))
def get_80_addr_obj(self):
return cls((self.tup[0], "80"))
def get_addr_obj(self, port):
return cls((self.tup[0], port))
class VH(object):
"""Represents an Apache Virtualhost.
:ivar str filep: file path of VH
:ivar str path: Augeas path to virtual host
:ivar set addrs: Virtual Host addresses (:class:`set` of :class:`Addr`)
:ivar set names: Server names/aliases of vhost
(:class:`list` of :class:`str`)
:ivar bool ssl: SSLEngine on in vhost
:ivar bool enabled: Virtual host is enabled
"""
def __init__(self, filep, path, addrs, ssl, enabled, names=None):
"""Initialize a VH."""
self.filep = filep
self.path = path
self.addrs = addrs
self.names = set() if names is None else names
self.ssl = ssl
self.enabled = enabled
def add_name(self, name):
"""Add name to vhost."""
self.names.add(name)
def __str__(self):
return ("file: %s\n"
"vh_path: %s\n"
"addrs: %s\n"
"names: %s\n"
"ssl: %s\n"
"enabled: %s" % (self.filep, self.path, self.addrs,
self.names, self.ssl, self.enabled))
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.filep == other.filep and self.path == other.path and
self.addrs == other.addrs and
self.names == other.names and
self.ssl == other.ssl and self.enabled == other.enabled)
return False
|
<commit_before><commit_msg>Move out Apache specific Objects<commit_after>
|
"""Module contains classes used by the Apache Configurator."""
class Addr(object):
"""Represents an Apache VirtualHost address."""
def __init__(self, addr):
""":param tuple addr: tuple of strings (ip, port)"""
self.tup = addr
@classmethod
def fromstring(cls, str_addr):
"""Initialize Addr from string."""
tup = str_addr.partition(':')
return cls((tup[0], tup[2]))
def __str__(self):
return ':'.join(self.tup)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.tup == other.tup
return False
def set_port(self, port):
"""Set the port of the address.
:param str port: new port
"""
self.tup = (self.tup[0], port)
def get_addr(self):
"""Return addr part of Addr object."""
return self.tup[0]
def get_port(self):
"""Return port."""
return self.tup[1]
def get_ssl_addr_obj(self):
return cls((self.tup[0], "443"))
def get_80_addr_obj(self):
return cls((self.tup[0], "80"))
def get_addr_obj(self, port):
return cls((self.tup[0], port))
class VH(object):
"""Represents an Apache Virtualhost.
:ivar str filep: file path of VH
:ivar str path: Augeas path to virtual host
:ivar set addrs: Virtual Host addresses (:class:`set` of :class:`Addr`)
:ivar set names: Server names/aliases of vhost
(:class:`list` of :class:`str`)
:ivar bool ssl: SSLEngine on in vhost
:ivar bool enabled: Virtual host is enabled
"""
def __init__(self, filep, path, addrs, ssl, enabled, names=None):
"""Initialize a VH."""
self.filep = filep
self.path = path
self.addrs = addrs
self.names = set() if names is None else names
self.ssl = ssl
self.enabled = enabled
def add_name(self, name):
"""Add name to vhost."""
self.names.add(name)
def __str__(self):
return ("file: %s\n"
"vh_path: %s\n"
"addrs: %s\n"
"names: %s\n"
"ssl: %s\n"
"enabled: %s" % (self.filep, self.path, self.addrs,
self.names, self.ssl, self.enabled))
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.filep == other.filep and self.path == other.path and
self.addrs == other.addrs and
self.names == other.names and
self.ssl == other.ssl and self.enabled == other.enabled)
return False
|
Move out Apache specific Objects"""Module contains classes used by the Apache Configurator."""
class Addr(object):
"""Represents an Apache VirtualHost address."""
def __init__(self, addr):
""":param tuple addr: tuple of strings (ip, port)"""
self.tup = addr
@classmethod
def fromstring(cls, str_addr):
"""Initialize Addr from string."""
tup = str_addr.partition(':')
return cls((tup[0], tup[2]))
def __str__(self):
return ':'.join(self.tup)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.tup == other.tup
return False
def set_port(self, port):
"""Set the port of the address.
:param str port: new port
"""
self.tup = (self.tup[0], port)
def get_addr(self):
"""Return addr part of Addr object."""
return self.tup[0]
def get_port(self):
"""Return port."""
return self.tup[1]
def get_ssl_addr_obj(self):
return cls((self.tup[0], "443"))
def get_80_addr_obj(self):
return cls((self.tup[0], "80"))
def get_addr_obj(self, port):
return cls((self.tup[0], port))
class VH(object):
"""Represents an Apache Virtualhost.
:ivar str filep: file path of VH
:ivar str path: Augeas path to virtual host
:ivar set addrs: Virtual Host addresses (:class:`set` of :class:`Addr`)
:ivar set names: Server names/aliases of vhost
(:class:`list` of :class:`str`)
:ivar bool ssl: SSLEngine on in vhost
:ivar bool enabled: Virtual host is enabled
"""
def __init__(self, filep, path, addrs, ssl, enabled, names=None):
"""Initialize a VH."""
self.filep = filep
self.path = path
self.addrs = addrs
self.names = set() if names is None else names
self.ssl = ssl
self.enabled = enabled
def add_name(self, name):
"""Add name to vhost."""
self.names.add(name)
def __str__(self):
return ("file: %s\n"
"vh_path: %s\n"
"addrs: %s\n"
"names: %s\n"
"ssl: %s\n"
"enabled: %s" % (self.filep, self.path, self.addrs,
self.names, self.ssl, self.enabled))
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.filep == other.filep and self.path == other.path and
self.addrs == other.addrs and
self.names == other.names and
self.ssl == other.ssl and self.enabled == other.enabled)
return False
|
<commit_before><commit_msg>Move out Apache specific Objects<commit_after>"""Module contains classes used by the Apache Configurator."""
class Addr(object):
"""Represents an Apache VirtualHost address."""
def __init__(self, addr):
""":param tuple addr: tuple of strings (ip, port)"""
self.tup = addr
@classmethod
def fromstring(cls, str_addr):
"""Initialize Addr from string."""
tup = str_addr.partition(':')
return cls((tup[0], tup[2]))
def __str__(self):
return ':'.join(self.tup)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.tup == other.tup
return False
def set_port(self, port):
"""Set the port of the address.
:param str port: new port
"""
self.tup = (self.tup[0], port)
def get_addr(self):
"""Return addr part of Addr object."""
return self.tup[0]
def get_port(self):
"""Return port."""
return self.tup[1]
def get_ssl_addr_obj(self):
return cls((self.tup[0], "443"))
def get_80_addr_obj(self):
return cls((self.tup[0], "80"))
def get_addr_obj(self, port):
return cls((self.tup[0], port))
class VH(object):
"""Represents an Apache Virtualhost.
:ivar str filep: file path of VH
:ivar str path: Augeas path to virtual host
:ivar set addrs: Virtual Host addresses (:class:`set` of :class:`Addr`)
:ivar set names: Server names/aliases of vhost
(:class:`list` of :class:`str`)
:ivar bool ssl: SSLEngine on in vhost
:ivar bool enabled: Virtual host is enabled
"""
def __init__(self, filep, path, addrs, ssl, enabled, names=None):
"""Initialize a VH."""
self.filep = filep
self.path = path
self.addrs = addrs
self.names = set() if names is None else names
self.ssl = ssl
self.enabled = enabled
def add_name(self, name):
"""Add name to vhost."""
self.names.add(name)
def __str__(self):
return ("file: %s\n"
"vh_path: %s\n"
"addrs: %s\n"
"names: %s\n"
"ssl: %s\n"
"enabled: %s" % (self.filep, self.path, self.addrs,
self.names, self.ssl, self.enabled))
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.filep == other.filep and self.path == other.path and
self.addrs == other.addrs and
self.names == other.names and
self.ssl == other.ssl and self.enabled == other.enabled)
return False
|
|
fba89c72c13279c0709b0967a597427ee177665b
|
tests/test_angles.py
|
tests/test_angles.py
|
import unittest
from flatlib import angle
class AngleTests(unittest.TestCase):
def setUp(self):
pass
def test_norm(self):
"""Tests angle normalizations."""
self.assertEqual(angle.norm(0), 0)
self.assertEqual(angle.norm(360), 0)
self.assertEqual(angle.norm(361), 1)
self.assertEqual(angle.norm(-1), 359)
def test_znorm(self):
"""Tests angle z-normalizations."""
self.assertEqual(angle.znorm(0), 0)
self.assertEqual(angle.znorm(90), 90)
self.assertEqual(angle.znorm(180), 180)
self.assertEqual(angle.znorm(181), -179)
self.assertEqual(angle.znorm(270), -90)
def test_distances(self):
"""Tests distances (counter-clockwise)"""
self.assertEqual(angle.distance(0, 0), 0)
self.assertEqual(angle.distance(0, 90), 90)
self.assertEqual(angle.distance(0, 180), 180)
self.assertEqual(angle.distance(0, -90), 270)
self.assertEqual(angle.distance(0, -180), 180)
def test_closest_distances(self):
"""Tests closest distances between two angles."""
self.assertEqual(angle.closestdistance(0, 0), 0)
self.assertEqual(angle.closestdistance(0, 90), 90)
self.assertEqual(angle.closestdistance(0, 180), 180)
self.assertEqual(angle.closestdistance(0, 270), -90)
self.assertEqual(angle.closestdistance(0, 359), -1)
|
Create tests for basic angle computations
|
Create tests for basic angle computations
|
Python
|
mit
|
flatangle/flatlib
|
Create tests for basic angle computations
|
import unittest
from flatlib import angle
class AngleTests(unittest.TestCase):
def setUp(self):
pass
def test_norm(self):
"""Tests angle normalizations."""
self.assertEqual(angle.norm(0), 0)
self.assertEqual(angle.norm(360), 0)
self.assertEqual(angle.norm(361), 1)
self.assertEqual(angle.norm(-1), 359)
def test_znorm(self):
"""Tests angle z-normalizations."""
self.assertEqual(angle.znorm(0), 0)
self.assertEqual(angle.znorm(90), 90)
self.assertEqual(angle.znorm(180), 180)
self.assertEqual(angle.znorm(181), -179)
self.assertEqual(angle.znorm(270), -90)
def test_distances(self):
"""Tests distances (counter-clockwise)"""
self.assertEqual(angle.distance(0, 0), 0)
self.assertEqual(angle.distance(0, 90), 90)
self.assertEqual(angle.distance(0, 180), 180)
self.assertEqual(angle.distance(0, -90), 270)
self.assertEqual(angle.distance(0, -180), 180)
def test_closest_distances(self):
"""Tests closest distances between two angles."""
self.assertEqual(angle.closestdistance(0, 0), 0)
self.assertEqual(angle.closestdistance(0, 90), 90)
self.assertEqual(angle.closestdistance(0, 180), 180)
self.assertEqual(angle.closestdistance(0, 270), -90)
self.assertEqual(angle.closestdistance(0, 359), -1)
|
<commit_before><commit_msg>Create tests for basic angle computations<commit_after>
|
import unittest
from flatlib import angle
class AngleTests(unittest.TestCase):
def setUp(self):
pass
def test_norm(self):
"""Tests angle normalizations."""
self.assertEqual(angle.norm(0), 0)
self.assertEqual(angle.norm(360), 0)
self.assertEqual(angle.norm(361), 1)
self.assertEqual(angle.norm(-1), 359)
def test_znorm(self):
"""Tests angle z-normalizations."""
self.assertEqual(angle.znorm(0), 0)
self.assertEqual(angle.znorm(90), 90)
self.assertEqual(angle.znorm(180), 180)
self.assertEqual(angle.znorm(181), -179)
self.assertEqual(angle.znorm(270), -90)
def test_distances(self):
"""Tests distances (counter-clockwise)"""
self.assertEqual(angle.distance(0, 0), 0)
self.assertEqual(angle.distance(0, 90), 90)
self.assertEqual(angle.distance(0, 180), 180)
self.assertEqual(angle.distance(0, -90), 270)
self.assertEqual(angle.distance(0, -180), 180)
def test_closest_distances(self):
"""Tests closest distances between two angles."""
self.assertEqual(angle.closestdistance(0, 0), 0)
self.assertEqual(angle.closestdistance(0, 90), 90)
self.assertEqual(angle.closestdistance(0, 180), 180)
self.assertEqual(angle.closestdistance(0, 270), -90)
self.assertEqual(angle.closestdistance(0, 359), -1)
|
Create tests for basic angle computationsimport unittest
from flatlib import angle
class AngleTests(unittest.TestCase):
def setUp(self):
pass
def test_norm(self):
"""Tests angle normalizations."""
self.assertEqual(angle.norm(0), 0)
self.assertEqual(angle.norm(360), 0)
self.assertEqual(angle.norm(361), 1)
self.assertEqual(angle.norm(-1), 359)
def test_znorm(self):
"""Tests angle z-normalizations."""
self.assertEqual(angle.znorm(0), 0)
self.assertEqual(angle.znorm(90), 90)
self.assertEqual(angle.znorm(180), 180)
self.assertEqual(angle.znorm(181), -179)
self.assertEqual(angle.znorm(270), -90)
def test_distances(self):
"""Tests distances (counter-clockwise)"""
self.assertEqual(angle.distance(0, 0), 0)
self.assertEqual(angle.distance(0, 90), 90)
self.assertEqual(angle.distance(0, 180), 180)
self.assertEqual(angle.distance(0, -90), 270)
self.assertEqual(angle.distance(0, -180), 180)
def test_closest_distances(self):
"""Tests closest distances between two angles."""
self.assertEqual(angle.closestdistance(0, 0), 0)
self.assertEqual(angle.closestdistance(0, 90), 90)
self.assertEqual(angle.closestdistance(0, 180), 180)
self.assertEqual(angle.closestdistance(0, 270), -90)
self.assertEqual(angle.closestdistance(0, 359), -1)
|
<commit_before><commit_msg>Create tests for basic angle computations<commit_after>import unittest
from flatlib import angle
class AngleTests(unittest.TestCase):
def setUp(self):
pass
def test_norm(self):
"""Tests angle normalizations."""
self.assertEqual(angle.norm(0), 0)
self.assertEqual(angle.norm(360), 0)
self.assertEqual(angle.norm(361), 1)
self.assertEqual(angle.norm(-1), 359)
def test_znorm(self):
"""Tests angle z-normalizations."""
self.assertEqual(angle.znorm(0), 0)
self.assertEqual(angle.znorm(90), 90)
self.assertEqual(angle.znorm(180), 180)
self.assertEqual(angle.znorm(181), -179)
self.assertEqual(angle.znorm(270), -90)
def test_distances(self):
"""Tests distances (counter-clockwise)"""
self.assertEqual(angle.distance(0, 0), 0)
self.assertEqual(angle.distance(0, 90), 90)
self.assertEqual(angle.distance(0, 180), 180)
self.assertEqual(angle.distance(0, -90), 270)
self.assertEqual(angle.distance(0, -180), 180)
def test_closest_distances(self):
"""Tests closest distances between two angles."""
self.assertEqual(angle.closestdistance(0, 0), 0)
self.assertEqual(angle.closestdistance(0, 90), 90)
self.assertEqual(angle.closestdistance(0, 180), 180)
self.assertEqual(angle.closestdistance(0, 270), -90)
self.assertEqual(angle.closestdistance(0, 359), -1)
|
|
8e221ddbd84b11364208fd505c9a7854aca1c854
|
mistral/db/sqlalchemy/migration/alembic_migrations/versions/015_add_unique_keys_for_non_locking_model.py
|
mistral/db/sqlalchemy/migration/alembic_migrations/versions/015_add_unique_keys_for_non_locking_model.py
|
# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add_unique_keys_for_non_locking_model
Revision ID: 015
Revises: 014
Create Date: 2016-08-08 11:05:20.109380
"""
# revision identifiers, used by Alembic.
revision = '015'
down_revision = '014'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'delayed_calls_v2',
sa.Column('unique_key', sa.String(length=80), nullable=True)
)
op.create_unique_constraint(
None,
'delayed_calls_v2',
['unique_key', 'processing']
)
op.add_column(
'task_executions_v2',
sa.Column('unique_key', sa.String(length=80), nullable=True)
)
op.create_unique_constraint(
None,
'task_executions_v2',
['unique_key']
)
|
Add unique keys for non locking model
|
Add unique keys for non locking model
The unique key colums are part of the new non locking model.
The migration scripts adds 2 unique key columns to 2 tables.
One of the new columns is already in the model and the other is
under review.
Change-Id: Icd352ed709f378a8141bcf6264bc9abba0f8db9a
Closes-Bug: #1610269
|
Python
|
apache-2.0
|
StackStorm/mistral,openstack/mistral,openstack/mistral,StackStorm/mistral
|
Add unique keys for non locking model
The unique key colums are part of the new non locking model.
The migration scripts adds 2 unique key columns to 2 tables.
One of the new columns is already in the model and the other is
under review.
Change-Id: Icd352ed709f378a8141bcf6264bc9abba0f8db9a
Closes-Bug: #1610269
|
# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add_unique_keys_for_non_locking_model
Revision ID: 015
Revises: 014
Create Date: 2016-08-08 11:05:20.109380
"""
# revision identifiers, used by Alembic.
revision = '015'
down_revision = '014'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'delayed_calls_v2',
sa.Column('unique_key', sa.String(length=80), nullable=True)
)
op.create_unique_constraint(
None,
'delayed_calls_v2',
['unique_key', 'processing']
)
op.add_column(
'task_executions_v2',
sa.Column('unique_key', sa.String(length=80), nullable=True)
)
op.create_unique_constraint(
None,
'task_executions_v2',
['unique_key']
)
|
<commit_before><commit_msg>Add unique keys for non locking model
The unique key colums are part of the new non locking model.
The migration scripts adds 2 unique key columns to 2 tables.
One of the new columns is already in the model and the other is
under review.
Change-Id: Icd352ed709f378a8141bcf6264bc9abba0f8db9a
Closes-Bug: #1610269<commit_after>
|
# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add_unique_keys_for_non_locking_model
Revision ID: 015
Revises: 014
Create Date: 2016-08-08 11:05:20.109380
"""
# revision identifiers, used by Alembic.
revision = '015'
down_revision = '014'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'delayed_calls_v2',
sa.Column('unique_key', sa.String(length=80), nullable=True)
)
op.create_unique_constraint(
None,
'delayed_calls_v2',
['unique_key', 'processing']
)
op.add_column(
'task_executions_v2',
sa.Column('unique_key', sa.String(length=80), nullable=True)
)
op.create_unique_constraint(
None,
'task_executions_v2',
['unique_key']
)
|
Add unique keys for non locking model
The unique key colums are part of the new non locking model.
The migration scripts adds 2 unique key columns to 2 tables.
One of the new columns is already in the model and the other is
under review.
Change-Id: Icd352ed709f378a8141bcf6264bc9abba0f8db9a
Closes-Bug: #1610269# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add_unique_keys_for_non_locking_model
Revision ID: 015
Revises: 014
Create Date: 2016-08-08 11:05:20.109380
"""
# revision identifiers, used by Alembic.
revision = '015'
down_revision = '014'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'delayed_calls_v2',
sa.Column('unique_key', sa.String(length=80), nullable=True)
)
op.create_unique_constraint(
None,
'delayed_calls_v2',
['unique_key', 'processing']
)
op.add_column(
'task_executions_v2',
sa.Column('unique_key', sa.String(length=80), nullable=True)
)
op.create_unique_constraint(
None,
'task_executions_v2',
['unique_key']
)
|
<commit_before><commit_msg>Add unique keys for non locking model
The unique key colums are part of the new non locking model.
The migration scripts adds 2 unique key columns to 2 tables.
One of the new columns is already in the model and the other is
under review.
Change-Id: Icd352ed709f378a8141bcf6264bc9abba0f8db9a
Closes-Bug: #1610269<commit_after># Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add_unique_keys_for_non_locking_model
Revision ID: 015
Revises: 014
Create Date: 2016-08-08 11:05:20.109380
"""
# revision identifiers, used by Alembic.
revision = '015'
down_revision = '014'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'delayed_calls_v2',
sa.Column('unique_key', sa.String(length=80), nullable=True)
)
op.create_unique_constraint(
None,
'delayed_calls_v2',
['unique_key', 'processing']
)
op.add_column(
'task_executions_v2',
sa.Column('unique_key', sa.String(length=80), nullable=True)
)
op.create_unique_constraint(
None,
'task_executions_v2',
['unique_key']
)
|
|
674721cc3e0f7fac030cf75fcdcfc4ee3d2f539f
|
runtime/python/test/test_err.py
|
runtime/python/test/test_err.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import clownfish
import sys
class TestErr(unittest.TestCase):
def testErrorHandling(self):
vec = clownfish.Vector()
try:
vec.grow(sys.maxsize)
except RuntimeError as e:
self.assertTrue(str(e).find("overflow") != -1)
if __name__ == '__main__':
unittest.main()
|
Test err handling in Py glue.
|
Test err handling in Py glue.
Check that a Clownfish exception gets transformed into a Python
exception.
|
Python
|
apache-2.0
|
rectang/lucy-clownfish,apache/lucy-clownfish,rectang/lucy-clownfish,apache/lucy-clownfish,nwellnhof/lucy-clownfish,rectang/lucy-clownfish,nwellnhof/lucy-clownfish,apache/lucy-clownfish,nwellnhof/lucy-clownfish,apache/lucy-clownfish,nwellnhof/lucy-clownfish,apache/lucy-clownfish,apache/lucy-clownfish,rectang/lucy-clownfish,nwellnhof/lucy-clownfish,rectang/lucy-clownfish,apache/lucy-clownfish,nwellnhof/lucy-clownfish,nwellnhof/lucy-clownfish,nwellnhof/lucy-clownfish,rectang/lucy-clownfish,rectang/lucy-clownfish,rectang/lucy-clownfish,rectang/lucy-clownfish,apache/lucy-clownfish
|
Test err handling in Py glue.
Check that a Clownfish exception gets transformed into a Python
exception.
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import clownfish
import sys
class TestErr(unittest.TestCase):
def testErrorHandling(self):
vec = clownfish.Vector()
try:
vec.grow(sys.maxsize)
except RuntimeError as e:
self.assertTrue(str(e).find("overflow") != -1)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test err handling in Py glue.
Check that a Clownfish exception gets transformed into a Python
exception.<commit_after>
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import clownfish
import sys
class TestErr(unittest.TestCase):
def testErrorHandling(self):
vec = clownfish.Vector()
try:
vec.grow(sys.maxsize)
except RuntimeError as e:
self.assertTrue(str(e).find("overflow") != -1)
if __name__ == '__main__':
unittest.main()
|
Test err handling in Py glue.
Check that a Clownfish exception gets transformed into a Python
exception.# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import clownfish
import sys
class TestErr(unittest.TestCase):
def testErrorHandling(self):
vec = clownfish.Vector()
try:
vec.grow(sys.maxsize)
except RuntimeError as e:
self.assertTrue(str(e).find("overflow") != -1)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test err handling in Py glue.
Check that a Clownfish exception gets transformed into a Python
exception.<commit_after># Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import clownfish
import sys
class TestErr(unittest.TestCase):
def testErrorHandling(self):
vec = clownfish.Vector()
try:
vec.grow(sys.maxsize)
except RuntimeError as e:
self.assertTrue(str(e).find("overflow") != -1)
if __name__ == '__main__':
unittest.main()
|
|
59c7a33693bfc45eca95a0bd3bd271f6ec904a3d
|
libs/playback/config/add_swap.py
|
libs/playback/config/add_swap.py
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Taio Jia (jiasir) <jiasir@icloud.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = 'jiasir'
usage = """
add_swap.py [ubuntu@controller01] [ubuntu@controller02] [...]
"""
swap_space = '24G'
from fabric.api import *
from fabric.contrib import files
import sys
def add_swap(space):
sudo('fallocate -l {space} /mnt/{space}.swap'.format(space=space))
sudo('mkswap /mnt/{space}.swap'.format(space=space))
sudo('swapon /mnt/{space}.swap'.format(space=space))
files.append('/etc/fstab', '/mnt/{space}.swap none swap sw 0 0'.format(space=space), use_sudo=True)
sudo('swapon -s')
sudo('chmod 600 /mnt/{space}.swap'.format(space=space))
def main():
env.hosts = sys.argv[1:]
for host in env.hosts:
env.host_string = host
add_swap(swap_space)
if __name__ == '__main__':
main()
|
Add compute swap when needed
|
Add compute swap when needed
|
Python
|
mit
|
nofdev/playback,jiasir/playback
|
Add compute swap when needed
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Taio Jia (jiasir) <jiasir@icloud.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = 'jiasir'
usage = """
add_swap.py [ubuntu@controller01] [ubuntu@controller02] [...]
"""
swap_space = '24G'
from fabric.api import *
from fabric.contrib import files
import sys
def add_swap(space):
sudo('fallocate -l {space} /mnt/{space}.swap'.format(space=space))
sudo('mkswap /mnt/{space}.swap'.format(space=space))
sudo('swapon /mnt/{space}.swap'.format(space=space))
files.append('/etc/fstab', '/mnt/{space}.swap none swap sw 0 0'.format(space=space), use_sudo=True)
sudo('swapon -s')
sudo('chmod 600 /mnt/{space}.swap'.format(space=space))
def main():
env.hosts = sys.argv[1:]
for host in env.hosts:
env.host_string = host
add_swap(swap_space)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add compute swap when needed<commit_after>
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Taio Jia (jiasir) <jiasir@icloud.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = 'jiasir'
usage = """
add_swap.py [ubuntu@controller01] [ubuntu@controller02] [...]
"""
swap_space = '24G'
from fabric.api import *
from fabric.contrib import files
import sys
def add_swap(space):
sudo('fallocate -l {space} /mnt/{space}.swap'.format(space=space))
sudo('mkswap /mnt/{space}.swap'.format(space=space))
sudo('swapon /mnt/{space}.swap'.format(space=space))
files.append('/etc/fstab', '/mnt/{space}.swap none swap sw 0 0'.format(space=space), use_sudo=True)
sudo('swapon -s')
sudo('chmod 600 /mnt/{space}.swap'.format(space=space))
def main():
env.hosts = sys.argv[1:]
for host in env.hosts:
env.host_string = host
add_swap(swap_space)
if __name__ == '__main__':
main()
|
Add compute swap when needed# The MIT License (MIT)
#
# Copyright (c) 2015 Taio Jia (jiasir) <jiasir@icloud.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = 'jiasir'
usage = """
add_swap.py [ubuntu@controller01] [ubuntu@controller02] [...]
"""
swap_space = '24G'
from fabric.api import *
from fabric.contrib import files
import sys
def add_swap(space):
sudo('fallocate -l {space} /mnt/{space}.swap'.format(space=space))
sudo('mkswap /mnt/{space}.swap'.format(space=space))
sudo('swapon /mnt/{space}.swap'.format(space=space))
files.append('/etc/fstab', '/mnt/{space}.swap none swap sw 0 0'.format(space=space), use_sudo=True)
sudo('swapon -s')
sudo('chmod 600 /mnt/{space}.swap'.format(space=space))
def main():
env.hosts = sys.argv[1:]
for host in env.hosts:
env.host_string = host
add_swap(swap_space)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add compute swap when needed<commit_after># The MIT License (MIT)
#
# Copyright (c) 2015 Taio Jia (jiasir) <jiasir@icloud.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = 'jiasir'
usage = """
add_swap.py [ubuntu@controller01] [ubuntu@controller02] [...]
"""
swap_space = '24G'
from fabric.api import *
from fabric.contrib import files
import sys
def add_swap(space):
sudo('fallocate -l {space} /mnt/{space}.swap'.format(space=space))
sudo('mkswap /mnt/{space}.swap'.format(space=space))
sudo('swapon /mnt/{space}.swap'.format(space=space))
files.append('/etc/fstab', '/mnt/{space}.swap none swap sw 0 0'.format(space=space), use_sudo=True)
sudo('swapon -s')
sudo('chmod 600 /mnt/{space}.swap'.format(space=space))
def main():
env.hosts = sys.argv[1:]
for host in env.hosts:
env.host_string = host
add_swap(swap_space)
if __name__ == '__main__':
main()
|
|
c1f01676e63280c379bd6b39f30d8b42750bda68
|
mpu9255.py
|
mpu9255.py
|
# mpu9255.py MicroPython driver for the InvenSense MPU9255 inertial measurement unit
# Authors Peter Hinch, Sebastian Plamauer
# V0.1 7th October 2022
'''
mpu9255 is a micropython module for the InvenSense MPU9255 sensor.
It measures acceleration, turn rate and the magnetic field in three axis.
The MPU9255 sensor is functionally equivalent to the MPU9250 except for the
device ID or chip ID which is 0x73.
The MIT License (MIT)
Copyright (c) 2014 Sebastian Plamauer, oeplse@gmail.com, Peter Hinch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from mpu9250 import MPU9250
class MPU9255(MPU9250):
_chip_id = 0x73
|
Fix bad chip ID error when using MPU9250 class with MPU9255 sensor
|
Fix bad chip ID error when using MPU9250 class with MPU9255 sensor
|
Python
|
mit
|
micropython-IMU/micropython-mpu9x50
|
Fix bad chip ID error when using MPU9250 class with MPU9255 sensor
|
# mpu9255.py MicroPython driver for the InvenSense MPU9255 inertial measurement unit
# Authors Peter Hinch, Sebastian Plamauer
# V0.1 7th October 2022
'''
mpu9255 is a micropython module for the InvenSense MPU9255 sensor.
It measures acceleration, turn rate and the magnetic field in three axis.
The MPU9255 sensor is functionally equivalent to the MPU9250 except for the
device ID or chip ID which is 0x73.
The MIT License (MIT)
Copyright (c) 2014 Sebastian Plamauer, oeplse@gmail.com, Peter Hinch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from mpu9250 import MPU9250
class MPU9255(MPU9250):
_chip_id = 0x73
|
<commit_before><commit_msg>Fix bad chip ID error when using MPU9250 class with MPU9255 sensor<commit_after>
|
# mpu9255.py MicroPython driver for the InvenSense MPU9255 inertial measurement unit
# Authors Peter Hinch, Sebastian Plamauer
# V0.1 7th October 2022
'''
mpu9255 is a micropython module for the InvenSense MPU9255 sensor.
It measures acceleration, turn rate and the magnetic field in three axis.
The MPU9255 sensor is functionally equivalent to the MPU9250 except for the
device ID or chip ID which is 0x73.
The MIT License (MIT)
Copyright (c) 2014 Sebastian Plamauer, oeplse@gmail.com, Peter Hinch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from mpu9250 import MPU9250
class MPU9255(MPU9250):
_chip_id = 0x73
|
Fix bad chip ID error when using MPU9250 class with MPU9255 sensor# mpu9255.py MicroPython driver for the InvenSense MPU9255 inertial measurement unit
# Authors Peter Hinch, Sebastian Plamauer
# V0.1 7th October 2022
'''
mpu9255 is a micropython module for the InvenSense MPU9255 sensor.
It measures acceleration, turn rate and the magnetic field in three axis.
The MPU9255 sensor is functionally equivalent to the MPU9250 except for the
device ID or chip ID which is 0x73.
The MIT License (MIT)
Copyright (c) 2014 Sebastian Plamauer, oeplse@gmail.com, Peter Hinch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from mpu9250 import MPU9250
class MPU9255(MPU9250):
_chip_id = 0x73
|
<commit_before><commit_msg>Fix bad chip ID error when using MPU9250 class with MPU9255 sensor<commit_after># mpu9255.py MicroPython driver for the InvenSense MPU9255 inertial measurement unit
# Authors Peter Hinch, Sebastian Plamauer
# V0.1 7th October 2022
'''
mpu9255 is a micropython module for the InvenSense MPU9255 sensor.
It measures acceleration, turn rate and the magnetic field in three axis.
The MPU9255 sensor is functionally equivalent to the MPU9250 except for the
device ID or chip ID which is 0x73.
The MIT License (MIT)
Copyright (c) 2014 Sebastian Plamauer, oeplse@gmail.com, Peter Hinch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from mpu9250 import MPU9250
class MPU9255(MPU9250):
_chip_id = 0x73
|
|
8651febe42d49d535095d07c27143331b0285b93
|
numba/cuda/tests/cudapy/test_datetime.py
|
numba/cuda/tests/cudapy/test_datetime.py
|
from __future__ import print_function
import numpy as np
from numba import cuda
from numba import unittest_support as unittest
from numba.tests.support import TestCase
from numba.cuda.testing import SerialMixin
class TestCudaAutojit(SerialMixin, TestCase):
def test_basic_datetime_kernel(self):
@cuda.jit
def foo(start, end, delta):
for i in range(cuda.grid(1), delta.size, cuda.gridsize(1)):
delta[i] = end[i] - start[i]
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr2 = arr1 + np.random.randint(0, 10000, arr1.size)
delta = np.zeros_like(arr1, dtype='timedelta64[D]')
foo[1, 32](arr1, arr2, delta)
self.assertPreciseEqual(delta, arr2 - arr1)
if __name__ == '__main__':
unittest.main()
|
Add test for gpu datetime
|
Add test for gpu datetime
|
Python
|
bsd-2-clause
|
seibert/numba,stonebig/numba,IntelLabs/numba,stonebig/numba,sklam/numba,cpcloud/numba,cpcloud/numba,seibert/numba,IntelLabs/numba,jriehl/numba,IntelLabs/numba,gmarkall/numba,stonebig/numba,sklam/numba,cpcloud/numba,sklam/numba,stuartarchibald/numba,numba/numba,jriehl/numba,gmarkall/numba,stuartarchibald/numba,gmarkall/numba,seibert/numba,gmarkall/numba,jriehl/numba,jriehl/numba,stuartarchibald/numba,stuartarchibald/numba,seibert/numba,IntelLabs/numba,cpcloud/numba,stonebig/numba,sklam/numba,stonebig/numba,IntelLabs/numba,numba/numba,jriehl/numba,numba/numba,seibert/numba,gmarkall/numba,cpcloud/numba,numba/numba,sklam/numba,stuartarchibald/numba,numba/numba
|
Add test for gpu datetime
|
from __future__ import print_function
import numpy as np
from numba import cuda
from numba import unittest_support as unittest
from numba.tests.support import TestCase
from numba.cuda.testing import SerialMixin
class TestCudaAutojit(SerialMixin, TestCase):
def test_basic_datetime_kernel(self):
@cuda.jit
def foo(start, end, delta):
for i in range(cuda.grid(1), delta.size, cuda.gridsize(1)):
delta[i] = end[i] - start[i]
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr2 = arr1 + np.random.randint(0, 10000, arr1.size)
delta = np.zeros_like(arr1, dtype='timedelta64[D]')
foo[1, 32](arr1, arr2, delta)
self.assertPreciseEqual(delta, arr2 - arr1)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for gpu datetime<commit_after>
|
from __future__ import print_function
import numpy as np
from numba import cuda
from numba import unittest_support as unittest
from numba.tests.support import TestCase
from numba.cuda.testing import SerialMixin
class TestCudaAutojit(SerialMixin, TestCase):
def test_basic_datetime_kernel(self):
@cuda.jit
def foo(start, end, delta):
for i in range(cuda.grid(1), delta.size, cuda.gridsize(1)):
delta[i] = end[i] - start[i]
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr2 = arr1 + np.random.randint(0, 10000, arr1.size)
delta = np.zeros_like(arr1, dtype='timedelta64[D]')
foo[1, 32](arr1, arr2, delta)
self.assertPreciseEqual(delta, arr2 - arr1)
if __name__ == '__main__':
unittest.main()
|
Add test for gpu datetimefrom __future__ import print_function
import numpy as np
from numba import cuda
from numba import unittest_support as unittest
from numba.tests.support import TestCase
from numba.cuda.testing import SerialMixin
class TestCudaAutojit(SerialMixin, TestCase):
def test_basic_datetime_kernel(self):
@cuda.jit
def foo(start, end, delta):
for i in range(cuda.grid(1), delta.size, cuda.gridsize(1)):
delta[i] = end[i] - start[i]
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr2 = arr1 + np.random.randint(0, 10000, arr1.size)
delta = np.zeros_like(arr1, dtype='timedelta64[D]')
foo[1, 32](arr1, arr2, delta)
self.assertPreciseEqual(delta, arr2 - arr1)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for gpu datetime<commit_after>from __future__ import print_function
import numpy as np
from numba import cuda
from numba import unittest_support as unittest
from numba.tests.support import TestCase
from numba.cuda.testing import SerialMixin
class TestCudaAutojit(SerialMixin, TestCase):
def test_basic_datetime_kernel(self):
@cuda.jit
def foo(start, end, delta):
for i in range(cuda.grid(1), delta.size, cuda.gridsize(1)):
delta[i] = end[i] - start[i]
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr2 = arr1 + np.random.randint(0, 10000, arr1.size)
delta = np.zeros_like(arr1, dtype='timedelta64[D]')
foo[1, 32](arr1, arr2, delta)
self.assertPreciseEqual(delta, arr2 - arr1)
if __name__ == '__main__':
unittest.main()
|
|
5fab5508f5710c764f88bc27378711128df5f962
|
src/dbus_python_client_gen/_errors.py
|
src/dbus_python_client_gen/_errors.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Exception hierarchy for this package.
"""
class DPClientError(Exception):
"""
Top-level error.
"""
pass
class DPClientGenerationError(Exception):
"""
Exception during generation of classes.
"""
pass
|
Add a few error classes
|
Add a few error classes
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
|
Python
|
mpl-2.0
|
mulkieran/dbus-python-client-gen
|
Add a few error classes
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Exception hierarchy for this package.
"""
class DPClientError(Exception):
"""
Top-level error.
"""
pass
class DPClientGenerationError(Exception):
"""
Exception during generation of classes.
"""
pass
|
<commit_before><commit_msg>Add a few error classes
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com><commit_after>
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Exception hierarchy for this package.
"""
class DPClientError(Exception):
"""
Top-level error.
"""
pass
class DPClientGenerationError(Exception):
"""
Exception during generation of classes.
"""
pass
|
Add a few error classes
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Exception hierarchy for this package.
"""
class DPClientError(Exception):
"""
Top-level error.
"""
pass
class DPClientGenerationError(Exception):
"""
Exception during generation of classes.
"""
pass
|
<commit_before><commit_msg>Add a few error classes
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com><commit_after># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Exception hierarchy for this package.
"""
class DPClientError(Exception):
"""
Top-level error.
"""
pass
class DPClientGenerationError(Exception):
"""
Exception during generation of classes.
"""
pass
|
|
01b0afde57aa109f7b5fa6fccce0d67cdeeec7dc
|
belpy/tests/test_biopax.py
|
belpy/tests/test_biopax.py
|
from belpy.biopax import biopax_api
from belpy.pysb_assembler import PysbAssembler
def test_hyphenated_agent_names():
"""This query should contain reactions with agent names RAF1-BRAF,
which need to be canonicalized to Python-compatible names before
model assembly."""
bp = biopax_api.process_pc_neighborhood(['BRAF'])
bp.get_phosphorylation()
pa = PysbAssembler()
pa.add_statements(bp.statements)
pa.make_model()
|
Test for handling hyphenated agents in biopax
|
Test for handling hyphenated agents in biopax
Also agents including spaces, etc.
|
Python
|
bsd-2-clause
|
sorgerlab/belpy,sorgerlab/indra,johnbachman/belpy,pvtodorov/indra,decarlin/indra,johnbachman/indra,sorgerlab/indra,johnbachman/indra,jmuhlich/indra,bgyori/indra,decarlin/indra,sorgerlab/indra,pvtodorov/indra,pvtodorov/indra,johnbachman/belpy,sorgerlab/belpy,bgyori/indra,bgyori/indra,pvtodorov/indra,jmuhlich/indra,decarlin/indra,johnbachman/belpy,jmuhlich/indra,johnbachman/indra,sorgerlab/belpy
|
Test for handling hyphenated agents in biopax
Also agents including spaces, etc.
|
from belpy.biopax import biopax_api
from belpy.pysb_assembler import PysbAssembler
def test_hyphenated_agent_names():
"""This query should contain reactions with agent names RAF1-BRAF,
which need to be canonicalized to Python-compatible names before
model assembly."""
bp = biopax_api.process_pc_neighborhood(['BRAF'])
bp.get_phosphorylation()
pa = PysbAssembler()
pa.add_statements(bp.statements)
pa.make_model()
|
<commit_before><commit_msg>Test for handling hyphenated agents in biopax
Also agents including spaces, etc.<commit_after>
|
from belpy.biopax import biopax_api
from belpy.pysb_assembler import PysbAssembler
def test_hyphenated_agent_names():
"""This query should contain reactions with agent names RAF1-BRAF,
which need to be canonicalized to Python-compatible names before
model assembly."""
bp = biopax_api.process_pc_neighborhood(['BRAF'])
bp.get_phosphorylation()
pa = PysbAssembler()
pa.add_statements(bp.statements)
pa.make_model()
|
Test for handling hyphenated agents in biopax
Also agents including spaces, etc.from belpy.biopax import biopax_api
from belpy.pysb_assembler import PysbAssembler
def test_hyphenated_agent_names():
"""This query should contain reactions with agent names RAF1-BRAF,
which need to be canonicalized to Python-compatible names before
model assembly."""
bp = biopax_api.process_pc_neighborhood(['BRAF'])
bp.get_phosphorylation()
pa = PysbAssembler()
pa.add_statements(bp.statements)
pa.make_model()
|
<commit_before><commit_msg>Test for handling hyphenated agents in biopax
Also agents including spaces, etc.<commit_after>from belpy.biopax import biopax_api
from belpy.pysb_assembler import PysbAssembler
def test_hyphenated_agent_names():
"""This query should contain reactions with agent names RAF1-BRAF,
which need to be canonicalized to Python-compatible names before
model assembly."""
bp = biopax_api.process_pc_neighborhood(['BRAF'])
bp.get_phosphorylation()
pa = PysbAssembler()
pa.add_statements(bp.statements)
pa.make_model()
|
|
f99682315fd5a50523fc28baf89710fe9a0ec18e
|
cluster/__init__.py
|
cluster/__init__.py
|
#!/usr/bin/env python2
def is_this_chef():
from socket import gethostname
return gethostname() == 'chef.compbio.ucsf.edu'
def require_chef():
if not is_this_chef():
raise SystemExit("This script must be run on chef.")
|
Add a function to say whether or not chef is the current host.
|
Add a function to say whether or not chef is the current host.
|
Python
|
mit
|
Kortemme-Lab/klab,Kortemme-Lab/klab,Kortemme-Lab/klab,Kortemme-Lab/klab
|
Add a function to say whether or not chef is the current host.
|
#!/usr/bin/env python2
def is_this_chef():
from socket import gethostname
return gethostname() == 'chef.compbio.ucsf.edu'
def require_chef():
if not is_this_chef():
raise SystemExit("This script must be run on chef.")
|
<commit_before><commit_msg>Add a function to say whether or not chef is the current host.<commit_after>
|
#!/usr/bin/env python2
def is_this_chef():
from socket import gethostname
return gethostname() == 'chef.compbio.ucsf.edu'
def require_chef():
if not is_this_chef():
raise SystemExit("This script must be run on chef.")
|
Add a function to say whether or not chef is the current host.#!/usr/bin/env python2
def is_this_chef():
from socket import gethostname
return gethostname() == 'chef.compbio.ucsf.edu'
def require_chef():
if not is_this_chef():
raise SystemExit("This script must be run on chef.")
|
<commit_before><commit_msg>Add a function to say whether or not chef is the current host.<commit_after>#!/usr/bin/env python2
def is_this_chef():
from socket import gethostname
return gethostname() == 'chef.compbio.ucsf.edu'
def require_chef():
if not is_this_chef():
raise SystemExit("This script must be run on chef.")
|
|
6501a871bc26981e274b3780206175be9db5c027
|
migrations/versions/0059_add_letter_template_type.py
|
migrations/versions/0059_add_letter_template_type.py
|
"""empty message
Revision ID: f266fb67597a
Revises: 0058_add_letters_flag
Create Date: 2016-11-07 16:13:18.961527
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0059_add_letter_template_type'
down_revision = '0058_add_letters_flag'
name = 'template_type'
tmp_name = 'tmp_' + name
old_options = ('sms', 'email')
new_options = old_options + ('letter',)
new_type = sa.Enum(*new_options, name=name)
old_type = sa.Enum(*old_options, name=name)
tcr = sa.sql.table(
'templates',
sa.Column('template_type', new_type, nullable=False)
)
def upgrade():
op.execute('ALTER TYPE ' + name + ' RENAME TO ' + tmp_name)
new_type.create(op.get_bind())
op.execute(
'ALTER TABLE templates ALTER COLUMN template_type ' +
'TYPE ' + name + ' USING template_type::text::' + name
)
op.execute('DROP TYPE ' + tmp_name)
def downgrade():
# Convert 'letter' template into 'email'
op.execute(
tcr.update().where(tcr.c.template_type=='letter').values(template_type='email')
)
op.execute('ALTER TYPE ' + name + ' RENAME TO ' + tmp_name)
old_type.create(op.get_bind())
op.execute(
'ALTER TABLE templates ALTER COLUMN template_type ' +
'TYPE ' + name + ' USING template_type::text::' + name
)
op.execute('DROP TYPE ' + tmp_name)
|
Add letter to possible template types in DB
|
Add letter to possible template types in DB
A letter type was added to the `enum` in the `Template` model at the
same it was added to the `Notification` model. But the migration was
only done for the `notifications` table, not the `templates` table.
See: https://github.com/alphagov/notifications-api/commit/25db1bce#diff-516aab258e161fc65e7564dabd2c625aR19
This commit does the migration to add `letter` as a possible value for
the `template_type` column, which is a bit fiddly because `enum`s.
Before:
```
notification_api=# select enum_range(null::template_type);
enum_range
-------------
{sms,email}
(1 row)
```
After upgrade:
```
notification_api=# select enum_range(null::template_type);
enum_range
--------------------
{sms,email,letter}
(1 row)
```
After downgrade
```
notification_api=# select enum_range(null::template_type);
enum_range
-------------
{sms,email}
(1 row)
```
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add letter to possible template types in DB
A letter type was added to the `enum` in the `Template` model at the
same it was added to the `Notification` model. But the migration was
only done for the `notifications` table, not the `templates` table.
See: https://github.com/alphagov/notifications-api/commit/25db1bce#diff-516aab258e161fc65e7564dabd2c625aR19
This commit does the migration to add `letter` as a possible value for
the `template_type` column, which is a bit fiddly because `enum`s.
Before:
```
notification_api=# select enum_range(null::template_type);
enum_range
-------------
{sms,email}
(1 row)
```
After upgrade:
```
notification_api=# select enum_range(null::template_type);
enum_range
--------------------
{sms,email,letter}
(1 row)
```
After downgrade
```
notification_api=# select enum_range(null::template_type);
enum_range
-------------
{sms,email}
(1 row)
```
|
"""empty message
Revision ID: f266fb67597a
Revises: 0058_add_letters_flag
Create Date: 2016-11-07 16:13:18.961527
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0059_add_letter_template_type'
down_revision = '0058_add_letters_flag'
name = 'template_type'
tmp_name = 'tmp_' + name
old_options = ('sms', 'email')
new_options = old_options + ('letter',)
new_type = sa.Enum(*new_options, name=name)
old_type = sa.Enum(*old_options, name=name)
tcr = sa.sql.table(
'templates',
sa.Column('template_type', new_type, nullable=False)
)
def upgrade():
op.execute('ALTER TYPE ' + name + ' RENAME TO ' + tmp_name)
new_type.create(op.get_bind())
op.execute(
'ALTER TABLE templates ALTER COLUMN template_type ' +
'TYPE ' + name + ' USING template_type::text::' + name
)
op.execute('DROP TYPE ' + tmp_name)
def downgrade():
# Convert 'letter' template into 'email'
op.execute(
tcr.update().where(tcr.c.template_type=='letter').values(template_type='email')
)
op.execute('ALTER TYPE ' + name + ' RENAME TO ' + tmp_name)
old_type.create(op.get_bind())
op.execute(
'ALTER TABLE templates ALTER COLUMN template_type ' +
'TYPE ' + name + ' USING template_type::text::' + name
)
op.execute('DROP TYPE ' + tmp_name)
|
<commit_before><commit_msg>Add letter to possible template types in DB
A letter type was added to the `enum` in the `Template` model at the
same it was added to the `Notification` model. But the migration was
only done for the `notifications` table, not the `templates` table.
See: https://github.com/alphagov/notifications-api/commit/25db1bce#diff-516aab258e161fc65e7564dabd2c625aR19
This commit does the migration to add `letter` as a possible value for
the `template_type` column, which is a bit fiddly because `enum`s.
Before:
```
notification_api=# select enum_range(null::template_type);
enum_range
-------------
{sms,email}
(1 row)
```
After upgrade:
```
notification_api=# select enum_range(null::template_type);
enum_range
--------------------
{sms,email,letter}
(1 row)
```
After downgrade
```
notification_api=# select enum_range(null::template_type);
enum_range
-------------
{sms,email}
(1 row)
```<commit_after>
|
"""empty message
Revision ID: f266fb67597a
Revises: 0058_add_letters_flag
Create Date: 2016-11-07 16:13:18.961527
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0059_add_letter_template_type'
down_revision = '0058_add_letters_flag'
name = 'template_type'
tmp_name = 'tmp_' + name
old_options = ('sms', 'email')
new_options = old_options + ('letter',)
new_type = sa.Enum(*new_options, name=name)
old_type = sa.Enum(*old_options, name=name)
tcr = sa.sql.table(
'templates',
sa.Column('template_type', new_type, nullable=False)
)
def upgrade():
op.execute('ALTER TYPE ' + name + ' RENAME TO ' + tmp_name)
new_type.create(op.get_bind())
op.execute(
'ALTER TABLE templates ALTER COLUMN template_type ' +
'TYPE ' + name + ' USING template_type::text::' + name
)
op.execute('DROP TYPE ' + tmp_name)
def downgrade():
# Convert 'letter' template into 'email'
op.execute(
tcr.update().where(tcr.c.template_type=='letter').values(template_type='email')
)
op.execute('ALTER TYPE ' + name + ' RENAME TO ' + tmp_name)
old_type.create(op.get_bind())
op.execute(
'ALTER TABLE templates ALTER COLUMN template_type ' +
'TYPE ' + name + ' USING template_type::text::' + name
)
op.execute('DROP TYPE ' + tmp_name)
|
Add letter to possible template types in DB
A letter type was added to the `enum` in the `Template` model at the
same it was added to the `Notification` model. But the migration was
only done for the `notifications` table, not the `templates` table.
See: https://github.com/alphagov/notifications-api/commit/25db1bce#diff-516aab258e161fc65e7564dabd2c625aR19
This commit does the migration to add `letter` as a possible value for
the `template_type` column, which is a bit fiddly because `enum`s.
Before:
```
notification_api=# select enum_range(null::template_type);
enum_range
-------------
{sms,email}
(1 row)
```
After upgrade:
```
notification_api=# select enum_range(null::template_type);
enum_range
--------------------
{sms,email,letter}
(1 row)
```
After downgrade
```
notification_api=# select enum_range(null::template_type);
enum_range
-------------
{sms,email}
(1 row)
```"""empty message
Revision ID: f266fb67597a
Revises: 0058_add_letters_flag
Create Date: 2016-11-07 16:13:18.961527
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0059_add_letter_template_type'
down_revision = '0058_add_letters_flag'
name = 'template_type'
tmp_name = 'tmp_' + name
old_options = ('sms', 'email')
new_options = old_options + ('letter',)
new_type = sa.Enum(*new_options, name=name)
old_type = sa.Enum(*old_options, name=name)
tcr = sa.sql.table(
'templates',
sa.Column('template_type', new_type, nullable=False)
)
def upgrade():
op.execute('ALTER TYPE ' + name + ' RENAME TO ' + tmp_name)
new_type.create(op.get_bind())
op.execute(
'ALTER TABLE templates ALTER COLUMN template_type ' +
'TYPE ' + name + ' USING template_type::text::' + name
)
op.execute('DROP TYPE ' + tmp_name)
def downgrade():
# Convert 'letter' template into 'email'
op.execute(
tcr.update().where(tcr.c.template_type=='letter').values(template_type='email')
)
op.execute('ALTER TYPE ' + name + ' RENAME TO ' + tmp_name)
old_type.create(op.get_bind())
op.execute(
'ALTER TABLE templates ALTER COLUMN template_type ' +
'TYPE ' + name + ' USING template_type::text::' + name
)
op.execute('DROP TYPE ' + tmp_name)
|
<commit_before><commit_msg>Add letter to possible template types in DB
A letter type was added to the `enum` in the `Template` model at the
same it was added to the `Notification` model. But the migration was
only done for the `notifications` table, not the `templates` table.
See: https://github.com/alphagov/notifications-api/commit/25db1bce#diff-516aab258e161fc65e7564dabd2c625aR19
This commit does the migration to add `letter` as a possible value for
the `template_type` column, which is a bit fiddly because `enum`s.
Before:
```
notification_api=# select enum_range(null::template_type);
enum_range
-------------
{sms,email}
(1 row)
```
After upgrade:
```
notification_api=# select enum_range(null::template_type);
enum_range
--------------------
{sms,email,letter}
(1 row)
```
After downgrade
```
notification_api=# select enum_range(null::template_type);
enum_range
-------------
{sms,email}
(1 row)
```<commit_after>"""empty message
Revision ID: f266fb67597a
Revises: 0058_add_letters_flag
Create Date: 2016-11-07 16:13:18.961527
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0059_add_letter_template_type'
down_revision = '0058_add_letters_flag'
name = 'template_type'
tmp_name = 'tmp_' + name
old_options = ('sms', 'email')
new_options = old_options + ('letter',)
new_type = sa.Enum(*new_options, name=name)
old_type = sa.Enum(*old_options, name=name)
tcr = sa.sql.table(
'templates',
sa.Column('template_type', new_type, nullable=False)
)
def upgrade():
op.execute('ALTER TYPE ' + name + ' RENAME TO ' + tmp_name)
new_type.create(op.get_bind())
op.execute(
'ALTER TABLE templates ALTER COLUMN template_type ' +
'TYPE ' + name + ' USING template_type::text::' + name
)
op.execute('DROP TYPE ' + tmp_name)
def downgrade():
# Convert 'letter' template into 'email'
op.execute(
tcr.update().where(tcr.c.template_type=='letter').values(template_type='email')
)
op.execute('ALTER TYPE ' + name + ' RENAME TO ' + tmp_name)
old_type.create(op.get_bind())
op.execute(
'ALTER TABLE templates ALTER COLUMN template_type ' +
'TYPE ' + name + ' USING template_type::text::' + name
)
op.execute('DROP TYPE ' + tmp_name)
|
|
fbc7622ffba8a527c45efed11f7044580deb3072
|
mvp/extensions.py
|
mvp/extensions.py
|
import os
from . import hooks
from .viewport import playblast
def default_handler(data, options):
_, ext = os.path.splitext(data['filename'])
is_qt = ext == '.mov'
kwargs = dict(
camera=data['camera'],
state=data['state'],
width=data['width'],
height=data['height'],
)
if is_qt:
kwargs.update(
format='qt',
compression='H.264',
filename=data['filename'],
sound=data['sound'],
)
else:
# PNG settings
# No sound
# .png extension removed
kwargs.update(
format='image',
compression='png',
filename=data['filename'].rsplit('.', 1)[0],
)
playblast(**kwargs)
hooks.register_extension(
name='h.264',
ext='.mov',
handler=default_handler,
)
hooks.register_extension(
name='png',
ext='.png',
handler=default_handler,
)
|
Implement h264 and png as ext hooks
|
Implement h264 and png as ext hooks
|
Python
|
mit
|
danbradham/mvp
|
Implement h264 and png as ext hooks
|
import os
from . import hooks
from .viewport import playblast
def default_handler(data, options):
_, ext = os.path.splitext(data['filename'])
is_qt = ext == '.mov'
kwargs = dict(
camera=data['camera'],
state=data['state'],
width=data['width'],
height=data['height'],
)
if is_qt:
kwargs.update(
format='qt',
compression='H.264',
filename=data['filename'],
sound=data['sound'],
)
else:
# PNG settings
# No sound
# .png extension removed
kwargs.update(
format='image',
compression='png',
filename=data['filename'].rsplit('.', 1)[0],
)
playblast(**kwargs)
hooks.register_extension(
name='h.264',
ext='.mov',
handler=default_handler,
)
hooks.register_extension(
name='png',
ext='.png',
handler=default_handler,
)
|
<commit_before><commit_msg>Implement h264 and png as ext hooks<commit_after>
|
import os
from . import hooks
from .viewport import playblast
def default_handler(data, options):
_, ext = os.path.splitext(data['filename'])
is_qt = ext == '.mov'
kwargs = dict(
camera=data['camera'],
state=data['state'],
width=data['width'],
height=data['height'],
)
if is_qt:
kwargs.update(
format='qt',
compression='H.264',
filename=data['filename'],
sound=data['sound'],
)
else:
# PNG settings
# No sound
# .png extension removed
kwargs.update(
format='image',
compression='png',
filename=data['filename'].rsplit('.', 1)[0],
)
playblast(**kwargs)
hooks.register_extension(
name='h.264',
ext='.mov',
handler=default_handler,
)
hooks.register_extension(
name='png',
ext='.png',
handler=default_handler,
)
|
Implement h264 and png as ext hooksimport os
from . import hooks
from .viewport import playblast
def default_handler(data, options):
_, ext = os.path.splitext(data['filename'])
is_qt = ext == '.mov'
kwargs = dict(
camera=data['camera'],
state=data['state'],
width=data['width'],
height=data['height'],
)
if is_qt:
kwargs.update(
format='qt',
compression='H.264',
filename=data['filename'],
sound=data['sound'],
)
else:
# PNG settings
# No sound
# .png extension removed
kwargs.update(
format='image',
compression='png',
filename=data['filename'].rsplit('.', 1)[0],
)
playblast(**kwargs)
hooks.register_extension(
name='h.264',
ext='.mov',
handler=default_handler,
)
hooks.register_extension(
name='png',
ext='.png',
handler=default_handler,
)
|
<commit_before><commit_msg>Implement h264 and png as ext hooks<commit_after>import os
from . import hooks
from .viewport import playblast
def default_handler(data, options):
_, ext = os.path.splitext(data['filename'])
is_qt = ext == '.mov'
kwargs = dict(
camera=data['camera'],
state=data['state'],
width=data['width'],
height=data['height'],
)
if is_qt:
kwargs.update(
format='qt',
compression='H.264',
filename=data['filename'],
sound=data['sound'],
)
else:
# PNG settings
# No sound
# .png extension removed
kwargs.update(
format='image',
compression='png',
filename=data['filename'].rsplit('.', 1)[0],
)
playblast(**kwargs)
hooks.register_extension(
name='h.264',
ext='.mov',
handler=default_handler,
)
hooks.register_extension(
name='png',
ext='.png',
handler=default_handler,
)
|
|
bc0f4bccf07d87bcc50469b5b20db44679f30128
|
scripts/add-neuron-catalogue-concepts.py
|
scripts/add-neuron-catalogue-concepts.py
|
#!/usr/bin/env python
from common import db_connection
import sys
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>"
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[2])
c = db_connection.cursor()
for class_name in ('driver_line', 'cell_body_location'):
c.execute("SELECT * FROM class WHERE project_id = %s AND class_name = %s",
(project_id, class_name))
if c.fetchall():
print >> sys.stderr, "The class '%s' has already been inserted" % (class_name,)
else:
c.execute("INSERT INTO class (user_id, project_id, class_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, class_name))
for relation_name in ('expresses_in', 'has_cell_body'):
c.execute("SELECT * FROM relation WHERE project_id = %s AND relation_name = %s",
(project_id, relation_name))
if c.fetchall():
print >> sys.stderr, "The relation '%s' has already been inserted" % (relation_name,)
else:
c.execute("INSERT INTO relation (user_id, project_id, relation_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, relation_name))
db_connection.commit()
c.close()
db_connection.close()
|
Add the concepts required for the neuron catalogue Django application
|
Add the concepts required for the neuron catalogue Django application
|
Python
|
agpl-3.0
|
fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID,htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID
|
Add the concepts required for the neuron catalogue Django application
|
#!/usr/bin/env python
from common import db_connection
import sys
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>"
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[2])
c = db_connection.cursor()
for class_name in ('driver_line', 'cell_body_location'):
c.execute("SELECT * FROM class WHERE project_id = %s AND class_name = %s",
(project_id, class_name))
if c.fetchall():
print >> sys.stderr, "The class '%s' has already been inserted" % (class_name,)
else:
c.execute("INSERT INTO class (user_id, project_id, class_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, class_name))
for relation_name in ('expresses_in', 'has_cell_body'):
c.execute("SELECT * FROM relation WHERE project_id = %s AND relation_name = %s",
(project_id, relation_name))
if c.fetchall():
print >> sys.stderr, "The relation '%s' has already been inserted" % (relation_name,)
else:
c.execute("INSERT INTO relation (user_id, project_id, relation_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, relation_name))
db_connection.commit()
c.close()
db_connection.close()
|
<commit_before><commit_msg>Add the concepts required for the neuron catalogue Django application<commit_after>
|
#!/usr/bin/env python
from common import db_connection
import sys
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>"
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[2])
c = db_connection.cursor()
for class_name in ('driver_line', 'cell_body_location'):
c.execute("SELECT * FROM class WHERE project_id = %s AND class_name = %s",
(project_id, class_name))
if c.fetchall():
print >> sys.stderr, "The class '%s' has already been inserted" % (class_name,)
else:
c.execute("INSERT INTO class (user_id, project_id, class_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, class_name))
for relation_name in ('expresses_in', 'has_cell_body'):
c.execute("SELECT * FROM relation WHERE project_id = %s AND relation_name = %s",
(project_id, relation_name))
if c.fetchall():
print >> sys.stderr, "The relation '%s' has already been inserted" % (relation_name,)
else:
c.execute("INSERT INTO relation (user_id, project_id, relation_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, relation_name))
db_connection.commit()
c.close()
db_connection.close()
|
Add the concepts required for the neuron catalogue Django application#!/usr/bin/env python
from common import db_connection
import sys
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>"
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[2])
c = db_connection.cursor()
for class_name in ('driver_line', 'cell_body_location'):
c.execute("SELECT * FROM class WHERE project_id = %s AND class_name = %s",
(project_id, class_name))
if c.fetchall():
print >> sys.stderr, "The class '%s' has already been inserted" % (class_name,)
else:
c.execute("INSERT INTO class (user_id, project_id, class_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, class_name))
for relation_name in ('expresses_in', 'has_cell_body'):
c.execute("SELECT * FROM relation WHERE project_id = %s AND relation_name = %s",
(project_id, relation_name))
if c.fetchall():
print >> sys.stderr, "The relation '%s' has already been inserted" % (relation_name,)
else:
c.execute("INSERT INTO relation (user_id, project_id, relation_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, relation_name))
db_connection.commit()
c.close()
db_connection.close()
|
<commit_before><commit_msg>Add the concepts required for the neuron catalogue Django application<commit_after>#!/usr/bin/env python
from common import db_connection
import sys
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>"
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[2])
c = db_connection.cursor()
for class_name in ('driver_line', 'cell_body_location'):
c.execute("SELECT * FROM class WHERE project_id = %s AND class_name = %s",
(project_id, class_name))
if c.fetchall():
print >> sys.stderr, "The class '%s' has already been inserted" % (class_name,)
else:
c.execute("INSERT INTO class (user_id, project_id, class_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, class_name))
for relation_name in ('expresses_in', 'has_cell_body'):
c.execute("SELECT * FROM relation WHERE project_id = %s AND relation_name = %s",
(project_id, relation_name))
if c.fetchall():
print >> sys.stderr, "The relation '%s' has already been inserted" % (relation_name,)
else:
c.execute("INSERT INTO relation (user_id, project_id, relation_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, relation_name))
db_connection.commit()
c.close()
db_connection.close()
|
|
1eced53f26bffd42df81fc7c655da46591204136
|
examples/decryption_test.py
|
examples/decryption_test.py
|
"""
This test demonstrates the use of encryption/decryption.
(Technically, obfuscation/unobfuscation.)
"""
from seleniumbase import BaseCase
from seleniumbase import encryption
class MyTestClass(BaseCase):
def test_rate_limited_printing(self):
self.open("https://www.saucedemo.com/")
self.update_text("#user-name", "standard_user")
encrypted_password = "$^*ENCRYPT=S3BDTAdCWzMmKEY8Gjg=?&#$"
print("Encrypted Password = %s" % encrypted_password)
password = encryption.decrypt(encrypted_password)
print("Password = %s" % password)
self.update_text("#password", password)
self.click('input[type="submit"]')
self.assert_text("Products", "div.product_label")
self.assert_element("#inventory_container")
|
Add a new example test
|
Add a new example test
|
Python
|
mit
|
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase
|
Add a new example test
|
"""
This test demonstrates the use of encryption/decryption.
(Technically, obfuscation/unobfuscation.)
"""
from seleniumbase import BaseCase
from seleniumbase import encryption
class MyTestClass(BaseCase):
def test_rate_limited_printing(self):
self.open("https://www.saucedemo.com/")
self.update_text("#user-name", "standard_user")
encrypted_password = "$^*ENCRYPT=S3BDTAdCWzMmKEY8Gjg=?&#$"
print("Encrypted Password = %s" % encrypted_password)
password = encryption.decrypt(encrypted_password)
print("Password = %s" % password)
self.update_text("#password", password)
self.click('input[type="submit"]')
self.assert_text("Products", "div.product_label")
self.assert_element("#inventory_container")
|
<commit_before><commit_msg>Add a new example test<commit_after>
|
"""
This test demonstrates the use of encryption/decryption.
(Technically, obfuscation/unobfuscation.)
"""
from seleniumbase import BaseCase
from seleniumbase import encryption
class MyTestClass(BaseCase):
def test_rate_limited_printing(self):
self.open("https://www.saucedemo.com/")
self.update_text("#user-name", "standard_user")
encrypted_password = "$^*ENCRYPT=S3BDTAdCWzMmKEY8Gjg=?&#$"
print("Encrypted Password = %s" % encrypted_password)
password = encryption.decrypt(encrypted_password)
print("Password = %s" % password)
self.update_text("#password", password)
self.click('input[type="submit"]')
self.assert_text("Products", "div.product_label")
self.assert_element("#inventory_container")
|
Add a new example test"""
This test demonstrates the use of encryption/decryption.
(Technically, obfuscation/unobfuscation.)
"""
from seleniumbase import BaseCase
from seleniumbase import encryption
class MyTestClass(BaseCase):
def test_rate_limited_printing(self):
self.open("https://www.saucedemo.com/")
self.update_text("#user-name", "standard_user")
encrypted_password = "$^*ENCRYPT=S3BDTAdCWzMmKEY8Gjg=?&#$"
print("Encrypted Password = %s" % encrypted_password)
password = encryption.decrypt(encrypted_password)
print("Password = %s" % password)
self.update_text("#password", password)
self.click('input[type="submit"]')
self.assert_text("Products", "div.product_label")
self.assert_element("#inventory_container")
|
<commit_before><commit_msg>Add a new example test<commit_after>"""
This test demonstrates the use of encryption/decryption.
(Technically, obfuscation/unobfuscation.)
"""
from seleniumbase import BaseCase
from seleniumbase import encryption
class MyTestClass(BaseCase):
def test_rate_limited_printing(self):
self.open("https://www.saucedemo.com/")
self.update_text("#user-name", "standard_user")
encrypted_password = "$^*ENCRYPT=S3BDTAdCWzMmKEY8Gjg=?&#$"
print("Encrypted Password = %s" % encrypted_password)
password = encryption.decrypt(encrypted_password)
print("Password = %s" % password)
self.update_text("#password", password)
self.click('input[type="submit"]')
self.assert_text("Products", "div.product_label")
self.assert_element("#inventory_container")
|
|
5782fc5cb505ff6af8d20411004a05ad53e82b90
|
fabfile/testbeds/testbed_mlab.py
|
fabfile/testbeds/testbed_mlab.py
|
from fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = 'root@10.84.19.42'
host2 = 'root@10.84.19.43'
host3 = 'root@10.84.19.44'
host4 = 'root@10.84.19.45'
host5 = 'root@10.84.19.46'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 64512
#Host from which the fab commands are triggered to install and provision
host_build = 'root@10.84.5.31'
#Role definition of the hosts.
env.roledefs = {
'all': [host1, host2, host3, host4, host5],
'cfgm': [host1],
'openstack': [host1],
'control': [host2, host3],
'compute': [host4, host5],
'collector': [host1, host2, host3],
'webui': [host1, host2, host3],
'database': [host1, host2, host3],
'build': [host_build],
}
env.hostnames = {
'all': ['z0', 'z1', 'z2','10.84.19.45', '10.84.19.46']
}
#Openstack admin password
env.openstack_admin_password = 'chei9APh'
env.password = 'c0ntrail123'
#Passwords of each host
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host_build: 'c0ntrail123',
}
#For reimage purpose
env.ostypes = {
host1: 'ubuntu',
host2: 'ubuntu',
host3: 'ubuntu',
host4: 'ubuntu',
host5: 'ubuntu',
}
env.test_repo_dir='/root/contrail-sanity/contrail-test'
|
Add testbed file for mlab
|
Add testbed file for mlab
|
Python
|
apache-2.0
|
Juniper/contrail-fabric-utils,Juniper/contrail-fabric-utils
|
Add testbed file for mlab
|
from fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = 'root@10.84.19.42'
host2 = 'root@10.84.19.43'
host3 = 'root@10.84.19.44'
host4 = 'root@10.84.19.45'
host5 = 'root@10.84.19.46'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 64512
#Host from which the fab commands are triggered to install and provision
host_build = 'root@10.84.5.31'
#Role definition of the hosts.
env.roledefs = {
'all': [host1, host2, host3, host4, host5],
'cfgm': [host1],
'openstack': [host1],
'control': [host2, host3],
'compute': [host4, host5],
'collector': [host1, host2, host3],
'webui': [host1, host2, host3],
'database': [host1, host2, host3],
'build': [host_build],
}
env.hostnames = {
'all': ['z0', 'z1', 'z2','10.84.19.45', '10.84.19.46']
}
#Openstack admin password
env.openstack_admin_password = 'chei9APh'
env.password = 'c0ntrail123'
#Passwords of each host
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host_build: 'c0ntrail123',
}
#For reimage purpose
env.ostypes = {
host1: 'ubuntu',
host2: 'ubuntu',
host3: 'ubuntu',
host4: 'ubuntu',
host5: 'ubuntu',
}
env.test_repo_dir='/root/contrail-sanity/contrail-test'
|
<commit_before><commit_msg>Add testbed file for mlab<commit_after>
|
from fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = 'root@10.84.19.42'
host2 = 'root@10.84.19.43'
host3 = 'root@10.84.19.44'
host4 = 'root@10.84.19.45'
host5 = 'root@10.84.19.46'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 64512
#Host from which the fab commands are triggered to install and provision
host_build = 'root@10.84.5.31'
#Role definition of the hosts.
env.roledefs = {
'all': [host1, host2, host3, host4, host5],
'cfgm': [host1],
'openstack': [host1],
'control': [host2, host3],
'compute': [host4, host5],
'collector': [host1, host2, host3],
'webui': [host1, host2, host3],
'database': [host1, host2, host3],
'build': [host_build],
}
env.hostnames = {
'all': ['z0', 'z1', 'z2','10.84.19.45', '10.84.19.46']
}
#Openstack admin password
env.openstack_admin_password = 'chei9APh'
env.password = 'c0ntrail123'
#Passwords of each host
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host_build: 'c0ntrail123',
}
#For reimage purpose
env.ostypes = {
host1: 'ubuntu',
host2: 'ubuntu',
host3: 'ubuntu',
host4: 'ubuntu',
host5: 'ubuntu',
}
env.test_repo_dir='/root/contrail-sanity/contrail-test'
|
Add testbed file for mlabfrom fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = 'root@10.84.19.42'
host2 = 'root@10.84.19.43'
host3 = 'root@10.84.19.44'
host4 = 'root@10.84.19.45'
host5 = 'root@10.84.19.46'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 64512
#Host from which the fab commands are triggered to install and provision
host_build = 'root@10.84.5.31'
#Role definition of the hosts.
env.roledefs = {
'all': [host1, host2, host3, host4, host5],
'cfgm': [host1],
'openstack': [host1],
'control': [host2, host3],
'compute': [host4, host5],
'collector': [host1, host2, host3],
'webui': [host1, host2, host3],
'database': [host1, host2, host3],
'build': [host_build],
}
env.hostnames = {
'all': ['z0', 'z1', 'z2','10.84.19.45', '10.84.19.46']
}
#Openstack admin password
env.openstack_admin_password = 'chei9APh'
env.password = 'c0ntrail123'
#Passwords of each host
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host_build: 'c0ntrail123',
}
#For reimage purpose
env.ostypes = {
host1: 'ubuntu',
host2: 'ubuntu',
host3: 'ubuntu',
host4: 'ubuntu',
host5: 'ubuntu',
}
env.test_repo_dir='/root/contrail-sanity/contrail-test'
|
<commit_before><commit_msg>Add testbed file for mlab<commit_after>from fabric.api import env
#Management ip addresses of hosts in the cluster
host1 = 'root@10.84.19.42'
host2 = 'root@10.84.19.43'
host3 = 'root@10.84.19.44'
host4 = 'root@10.84.19.45'
host5 = 'root@10.84.19.46'
#External routers if any
#for eg.
#ext_routers = [('mx1', '10.204.216.253')]
ext_routers = []
#Autonomous system number
router_asn = 64512
#Host from which the fab commands are triggered to install and provision
host_build = 'root@10.84.5.31'
#Role definition of the hosts.
env.roledefs = {
'all': [host1, host2, host3, host4, host5],
'cfgm': [host1],
'openstack': [host1],
'control': [host2, host3],
'compute': [host4, host5],
'collector': [host1, host2, host3],
'webui': [host1, host2, host3],
'database': [host1, host2, host3],
'build': [host_build],
}
env.hostnames = {
'all': ['z0', 'z1', 'z2','10.84.19.45', '10.84.19.46']
}
#Openstack admin password
env.openstack_admin_password = 'chei9APh'
env.password = 'c0ntrail123'
#Passwords of each host
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host_build: 'c0ntrail123',
}
#For reimage purpose
env.ostypes = {
host1: 'ubuntu',
host2: 'ubuntu',
host3: 'ubuntu',
host4: 'ubuntu',
host5: 'ubuntu',
}
env.test_repo_dir='/root/contrail-sanity/contrail-test'
|
|
4002e0088a50d1a4ac5c20e52425c9f78d6761c6
|
tests/unit/proposals/test_models.py
|
tests/unit/proposals/test_models.py
|
# Third Party Stuff
import pytest
from tests import factories as f
@pytest.mark.parametrize('method', [
'get_absolute_url',
'get_delete_url',
'get_down_vote_url',
'get_hashid',
'get_remove_vote_url',
'get_review_url',
'get_slug',
'get_up_vote_url',
'get_update_url',
'get_vote_url',
'__str__',
])
def test_proposal_model_method_works(db, method):
proposal = f.ProposalFactory()
assert getattr(proposal, method)()
|
Add unit tests for proposal method getter methods
|
Add unit tests for proposal method getter methods
closes #448
|
Python
|
mit
|
ChillarAnand/junction,ChillarAnand/junction,pythonindia/junction,ChillarAnand/junction,ChillarAnand/junction,pythonindia/junction,pythonindia/junction,pythonindia/junction
|
Add unit tests for proposal method getter methods
closes #448
|
# Third Party Stuff
import pytest
from tests import factories as f
@pytest.mark.parametrize('method', [
'get_absolute_url',
'get_delete_url',
'get_down_vote_url',
'get_hashid',
'get_remove_vote_url',
'get_review_url',
'get_slug',
'get_up_vote_url',
'get_update_url',
'get_vote_url',
'__str__',
])
def test_proposal_model_method_works(db, method):
proposal = f.ProposalFactory()
assert getattr(proposal, method)()
|
<commit_before><commit_msg>Add unit tests for proposal method getter methods
closes #448<commit_after>
|
# Third Party Stuff
import pytest
from tests import factories as f
@pytest.mark.parametrize('method', [
'get_absolute_url',
'get_delete_url',
'get_down_vote_url',
'get_hashid',
'get_remove_vote_url',
'get_review_url',
'get_slug',
'get_up_vote_url',
'get_update_url',
'get_vote_url',
'__str__',
])
def test_proposal_model_method_works(db, method):
proposal = f.ProposalFactory()
assert getattr(proposal, method)()
|
Add unit tests for proposal method getter methods
closes #448# Third Party Stuff
import pytest
from tests import factories as f
@pytest.mark.parametrize('method', [
'get_absolute_url',
'get_delete_url',
'get_down_vote_url',
'get_hashid',
'get_remove_vote_url',
'get_review_url',
'get_slug',
'get_up_vote_url',
'get_update_url',
'get_vote_url',
'__str__',
])
def test_proposal_model_method_works(db, method):
proposal = f.ProposalFactory()
assert getattr(proposal, method)()
|
<commit_before><commit_msg>Add unit tests for proposal method getter methods
closes #448<commit_after># Third Party Stuff
import pytest
from tests import factories as f
@pytest.mark.parametrize('method', [
'get_absolute_url',
'get_delete_url',
'get_down_vote_url',
'get_hashid',
'get_remove_vote_url',
'get_review_url',
'get_slug',
'get_up_vote_url',
'get_update_url',
'get_vote_url',
'__str__',
])
def test_proposal_model_method_works(db, method):
proposal = f.ProposalFactory()
assert getattr(proposal, method)()
|
|
8f0443c35e25c69ec4216e6d5c6717b3c069ebd0
|
scripts/remove_wiki_title_forward_slashes.py
|
scripts/remove_wiki_title_forward_slashes.py
|
"""
Remove forward slashes from wiki page titles, since it is no longer an allowed character and
breaks validation.
"""
import logging
import sys
from framework.mongo import database as db
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.project.model import Node
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main():
wiki_pages = db.nodewikipage.find({}, {'_id': True, 'page_name': True, 'node': True})
wiki_pages = wiki_pages.batch_size(200)
fix_wiki_titles(wiki_pages)
def fix_wiki_titles(wiki_pages):
for i, wiki in enumerate(wiki_pages):
if '/' in wiki['page_name']:
old_name = wiki['page_name']
new_name = wiki['page_name'].replace('/', '')
# update wiki page name
db.nodewikipage.update({'_id': wiki['_id']}, {'$set': {'page_name': new_name}})
logger.info('Updated wiki {} title to {}'.format(wiki['_id'], new_name))
node = Node.load(wiki['node'])
if not node:
logger.info('Invalid node {} for wiki {}'.format(node, wiki['_id']))
continue
# update node wiki page records
if old_name in node.wiki_pages_versions:
node.wiki_pages_versions[new_name] = node.wiki_pages_versions[old_name]
del node.wiki_pages_versions[old_name]
if old_name in node.wiki_pages_current:
node.wiki_pages_current[new_name] = node.wiki_pages_current[old_name]
del node.wiki_pages_current[old_name]
if old_name in node.wiki_private_uuids:
node.wiki_private_uuids[new_name] = node.wiki_private_uuids[old_name]
del node.wiki_private_uuids[old_name]
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
init_app(routes=False, set_backends=True)
with TokuTransaction():
main()
if dry:
raise Exception('Dry Run -- Aborting Transaction')
|
Remove forward slashes from wiki page names
|
Remove forward slashes from wiki page names
|
Python
|
apache-2.0
|
binoculars/osf.io,cslzchen/osf.io,RomanZWang/osf.io,alexschiller/osf.io,TomBaxter/osf.io,leb2dg/osf.io,rdhyee/osf.io,baylee-d/osf.io,wearpants/osf.io,hmoco/osf.io,HalcyonChimera/osf.io,felliott/osf.io,acshi/osf.io,aaxelb/osf.io,acshi/osf.io,DanielSBrown/osf.io,doublebits/osf.io,monikagrabowska/osf.io,TomHeatwole/osf.io,wearpants/osf.io,samchrisinger/osf.io,RomanZWang/osf.io,saradbowman/osf.io,amyshi188/osf.io,binoculars/osf.io,icereval/osf.io,hmoco/osf.io,zachjanicki/osf.io,caseyrollins/osf.io,icereval/osf.io,samchrisinger/osf.io,abought/osf.io,abought/osf.io,caneruguz/osf.io,amyshi188/osf.io,erinspace/osf.io,zachjanicki/osf.io,Nesiehr/osf.io,HalcyonChimera/osf.io,jnayak1/osf.io,caneruguz/osf.io,adlius/osf.io,doublebits/osf.io,cwisecarver/osf.io,TomHeatwole/osf.io,mluke93/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,pattisdr/osf.io,chrisseto/osf.io,caseyrollins/osf.io,hmoco/osf.io,kch8qx/osf.io,amyshi188/osf.io,DanielSBrown/osf.io,felliott/osf.io,brianjgeiger/osf.io,acshi/osf.io,doublebits/osf.io,kwierman/osf.io,chrisseto/osf.io,Johnetordoff/osf.io,rdhyee/osf.io,pattisdr/osf.io,jnayak1/osf.io,RomanZWang/osf.io,adlius/osf.io,binoculars/osf.io,alexschiller/osf.io,icereval/osf.io,mattclark/osf.io,mluo613/osf.io,samchrisinger/osf.io,TomHeatwole/osf.io,mluo613/osf.io,adlius/osf.io,rdhyee/osf.io,mluke93/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,caneruguz/osf.io,leb2dg/osf.io,Nesiehr/osf.io,Johnetordoff/osf.io,kwierman/osf.io,samchrisinger/osf.io,mfraezz/osf.io,zamattiac/osf.io,kch8qx/osf.io,mattclark/osf.io,erinspace/osf.io,adlius/osf.io,aaxelb/osf.io,wearpants/osf.io,hmoco/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,crcresearch/osf.io,cslzchen/osf.io,rdhyee/osf.io,wearpants/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,laurenrevere/osf.io,SSJohns/osf.io,erinspace/osf.io,RomanZWang/osf.io,doublebits/osf.io,kch8qx/osf.io,chrisseto/osf.io,chennan47/osf.io,baylee-d/osf.io,emetsger/osf.io,emetsger/osf.io,Nesiehr/osf.io,laurenrevere/osf.io,mluke93/osf.io,mfraezz/osf.io,emetsger/osf.io,TomHeatwole/osf.io,mfraezz/osf.io,leb2dg/osf.io,cwisecarver/osf.io,zachjanicki/osf.io,TomBaxter/osf.io,monikagrabowska/osf.io,felliott/osf.io,saradbowman/osf.io,jnayak1/osf.io,abought/osf.io,kwierman/osf.io,baylee-d/osf.io,zamattiac/osf.io,kch8qx/osf.io,jnayak1/osf.io,zachjanicki/osf.io,monikagrabowska/osf.io,zamattiac/osf.io,SSJohns/osf.io,sloria/osf.io,acshi/osf.io,amyshi188/osf.io,leb2dg/osf.io,Nesiehr/osf.io,mluo613/osf.io,SSJohns/osf.io,brianjgeiger/osf.io,mattclark/osf.io,aaxelb/osf.io,alexschiller/osf.io,chennan47/osf.io,mluo613/osf.io,abought/osf.io,chennan47/osf.io,zamattiac/osf.io,TomBaxter/osf.io,crcresearch/osf.io,cwisecarver/osf.io,sloria/osf.io,monikagrabowska/osf.io,caneruguz/osf.io,mluke93/osf.io,mluo613/osf.io,chrisseto/osf.io,cwisecarver/osf.io,CenterForOpenScience/osf.io,acshi/osf.io,RomanZWang/osf.io,pattisdr/osf.io,cslzchen/osf.io,crcresearch/osf.io,DanielSBrown/osf.io,alexschiller/osf.io,sloria/osf.io,mfraezz/osf.io,caseyrollins/osf.io,laurenrevere/osf.io,SSJohns/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,kwierman/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,kch8qx/osf.io,emetsger/osf.io,CenterForOpenScience/osf.io,DanielSBrown/osf.io,doublebits/osf.io
|
Remove forward slashes from wiki page names
|
"""
Remove forward slashes from wiki page titles, since it is no longer an allowed character and
breaks validation.
"""
import logging
import sys
from framework.mongo import database as db
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.project.model import Node
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main():
wiki_pages = db.nodewikipage.find({}, {'_id': True, 'page_name': True, 'node': True})
wiki_pages = wiki_pages.batch_size(200)
fix_wiki_titles(wiki_pages)
def fix_wiki_titles(wiki_pages):
for i, wiki in enumerate(wiki_pages):
if '/' in wiki['page_name']:
old_name = wiki['page_name']
new_name = wiki['page_name'].replace('/', '')
# update wiki page name
db.nodewikipage.update({'_id': wiki['_id']}, {'$set': {'page_name': new_name}})
logger.info('Updated wiki {} title to {}'.format(wiki['_id'], new_name))
node = Node.load(wiki['node'])
if not node:
logger.info('Invalid node {} for wiki {}'.format(node, wiki['_id']))
continue
# update node wiki page records
if old_name in node.wiki_pages_versions:
node.wiki_pages_versions[new_name] = node.wiki_pages_versions[old_name]
del node.wiki_pages_versions[old_name]
if old_name in node.wiki_pages_current:
node.wiki_pages_current[new_name] = node.wiki_pages_current[old_name]
del node.wiki_pages_current[old_name]
if old_name in node.wiki_private_uuids:
node.wiki_private_uuids[new_name] = node.wiki_private_uuids[old_name]
del node.wiki_private_uuids[old_name]
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
init_app(routes=False, set_backends=True)
with TokuTransaction():
main()
if dry:
raise Exception('Dry Run -- Aborting Transaction')
|
<commit_before><commit_msg>Remove forward slashes from wiki page names<commit_after>
|
"""
Remove forward slashes from wiki page titles, since it is no longer an allowed character and
breaks validation.
"""
import logging
import sys
from framework.mongo import database as db
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.project.model import Node
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main():
wiki_pages = db.nodewikipage.find({}, {'_id': True, 'page_name': True, 'node': True})
wiki_pages = wiki_pages.batch_size(200)
fix_wiki_titles(wiki_pages)
def fix_wiki_titles(wiki_pages):
for i, wiki in enumerate(wiki_pages):
if '/' in wiki['page_name']:
old_name = wiki['page_name']
new_name = wiki['page_name'].replace('/', '')
# update wiki page name
db.nodewikipage.update({'_id': wiki['_id']}, {'$set': {'page_name': new_name}})
logger.info('Updated wiki {} title to {}'.format(wiki['_id'], new_name))
node = Node.load(wiki['node'])
if not node:
logger.info('Invalid node {} for wiki {}'.format(node, wiki['_id']))
continue
# update node wiki page records
if old_name in node.wiki_pages_versions:
node.wiki_pages_versions[new_name] = node.wiki_pages_versions[old_name]
del node.wiki_pages_versions[old_name]
if old_name in node.wiki_pages_current:
node.wiki_pages_current[new_name] = node.wiki_pages_current[old_name]
del node.wiki_pages_current[old_name]
if old_name in node.wiki_private_uuids:
node.wiki_private_uuids[new_name] = node.wiki_private_uuids[old_name]
del node.wiki_private_uuids[old_name]
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
init_app(routes=False, set_backends=True)
with TokuTransaction():
main()
if dry:
raise Exception('Dry Run -- Aborting Transaction')
|
Remove forward slashes from wiki page names"""
Remove forward slashes from wiki page titles, since it is no longer an allowed character and
breaks validation.
"""
import logging
import sys
from framework.mongo import database as db
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.project.model import Node
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main():
wiki_pages = db.nodewikipage.find({}, {'_id': True, 'page_name': True, 'node': True})
wiki_pages = wiki_pages.batch_size(200)
fix_wiki_titles(wiki_pages)
def fix_wiki_titles(wiki_pages):
for i, wiki in enumerate(wiki_pages):
if '/' in wiki['page_name']:
old_name = wiki['page_name']
new_name = wiki['page_name'].replace('/', '')
# update wiki page name
db.nodewikipage.update({'_id': wiki['_id']}, {'$set': {'page_name': new_name}})
logger.info('Updated wiki {} title to {}'.format(wiki['_id'], new_name))
node = Node.load(wiki['node'])
if not node:
logger.info('Invalid node {} for wiki {}'.format(node, wiki['_id']))
continue
# update node wiki page records
if old_name in node.wiki_pages_versions:
node.wiki_pages_versions[new_name] = node.wiki_pages_versions[old_name]
del node.wiki_pages_versions[old_name]
if old_name in node.wiki_pages_current:
node.wiki_pages_current[new_name] = node.wiki_pages_current[old_name]
del node.wiki_pages_current[old_name]
if old_name in node.wiki_private_uuids:
node.wiki_private_uuids[new_name] = node.wiki_private_uuids[old_name]
del node.wiki_private_uuids[old_name]
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
init_app(routes=False, set_backends=True)
with TokuTransaction():
main()
if dry:
raise Exception('Dry Run -- Aborting Transaction')
|
<commit_before><commit_msg>Remove forward slashes from wiki page names<commit_after>"""
Remove forward slashes from wiki page titles, since it is no longer an allowed character and
breaks validation.
"""
import logging
import sys
from framework.mongo import database as db
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.project.model import Node
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main():
wiki_pages = db.nodewikipage.find({}, {'_id': True, 'page_name': True, 'node': True})
wiki_pages = wiki_pages.batch_size(200)
fix_wiki_titles(wiki_pages)
def fix_wiki_titles(wiki_pages):
for i, wiki in enumerate(wiki_pages):
if '/' in wiki['page_name']:
old_name = wiki['page_name']
new_name = wiki['page_name'].replace('/', '')
# update wiki page name
db.nodewikipage.update({'_id': wiki['_id']}, {'$set': {'page_name': new_name}})
logger.info('Updated wiki {} title to {}'.format(wiki['_id'], new_name))
node = Node.load(wiki['node'])
if not node:
logger.info('Invalid node {} for wiki {}'.format(node, wiki['_id']))
continue
# update node wiki page records
if old_name in node.wiki_pages_versions:
node.wiki_pages_versions[new_name] = node.wiki_pages_versions[old_name]
del node.wiki_pages_versions[old_name]
if old_name in node.wiki_pages_current:
node.wiki_pages_current[new_name] = node.wiki_pages_current[old_name]
del node.wiki_pages_current[old_name]
if old_name in node.wiki_private_uuids:
node.wiki_private_uuids[new_name] = node.wiki_private_uuids[old_name]
del node.wiki_private_uuids[old_name]
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
init_app(routes=False, set_backends=True)
with TokuTransaction():
main()
if dry:
raise Exception('Dry Run -- Aborting Transaction')
|
|
7295c18179870db944f0496d521a7031e483d1e2
|
skan/test/test_skeleton_class.py
|
skan/test/test_skeleton_class.py
|
import os, sys
from collections import defaultdict
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from skan.csr import Skeleton
from skan._testdata import (tinycycle, tinyline, skeleton0, skeleton1,
skeleton2, skeleton3d, topograph1d, skeleton4)
def test_skeleton1_stats():
skeleton = Skeleton(skeleton1)
assert skeleton.paths.shape == (4, np.sum(skeleton1) + 1)
paths_list = skeleton.paths_list()
reference_paths = [
[8, 6, 1, 2, 3, 4, 5, 7, 11, 10, 13],
[8, 9, 13],
[8, 12, 14],
[13, 15, 16, 17]
]
d0 = 1 + np.sqrt(2)
reference_distances = [5 * d0, d0, d0, 1 + d0]
for path in reference_paths:
assert path in paths_list or path[::-1] in paths_list
assert_allclose(sorted(skeleton.path_lengths()),
sorted(reference_distances))
def test_tip_junction_edges():
skeleton = Skeleton(skeleton4)
reference_paths = [[1, 2], [2, 4, 5], [2, 7]]
paths_list = skeleton.paths_list()
for path in reference_paths:
assert path in paths_list or path[::-1] in paths_list
|
Add tests for new skeleton class
|
Add tests for new skeleton class
|
Python
|
bsd-3-clause
|
jni/skan
|
Add tests for new skeleton class
|
import os, sys
from collections import defaultdict
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from skan.csr import Skeleton
from skan._testdata import (tinycycle, tinyline, skeleton0, skeleton1,
skeleton2, skeleton3d, topograph1d, skeleton4)
def test_skeleton1_stats():
skeleton = Skeleton(skeleton1)
assert skeleton.paths.shape == (4, np.sum(skeleton1) + 1)
paths_list = skeleton.paths_list()
reference_paths = [
[8, 6, 1, 2, 3, 4, 5, 7, 11, 10, 13],
[8, 9, 13],
[8, 12, 14],
[13, 15, 16, 17]
]
d0 = 1 + np.sqrt(2)
reference_distances = [5 * d0, d0, d0, 1 + d0]
for path in reference_paths:
assert path in paths_list or path[::-1] in paths_list
assert_allclose(sorted(skeleton.path_lengths()),
sorted(reference_distances))
def test_tip_junction_edges():
skeleton = Skeleton(skeleton4)
reference_paths = [[1, 2], [2, 4, 5], [2, 7]]
paths_list = skeleton.paths_list()
for path in reference_paths:
assert path in paths_list or path[::-1] in paths_list
|
<commit_before><commit_msg>Add tests for new skeleton class<commit_after>
|
import os, sys
from collections import defaultdict
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from skan.csr import Skeleton
from skan._testdata import (tinycycle, tinyline, skeleton0, skeleton1,
skeleton2, skeleton3d, topograph1d, skeleton4)
def test_skeleton1_stats():
skeleton = Skeleton(skeleton1)
assert skeleton.paths.shape == (4, np.sum(skeleton1) + 1)
paths_list = skeleton.paths_list()
reference_paths = [
[8, 6, 1, 2, 3, 4, 5, 7, 11, 10, 13],
[8, 9, 13],
[8, 12, 14],
[13, 15, 16, 17]
]
d0 = 1 + np.sqrt(2)
reference_distances = [5 * d0, d0, d0, 1 + d0]
for path in reference_paths:
assert path in paths_list or path[::-1] in paths_list
assert_allclose(sorted(skeleton.path_lengths()),
sorted(reference_distances))
def test_tip_junction_edges():
skeleton = Skeleton(skeleton4)
reference_paths = [[1, 2], [2, 4, 5], [2, 7]]
paths_list = skeleton.paths_list()
for path in reference_paths:
assert path in paths_list or path[::-1] in paths_list
|
Add tests for new skeleton classimport os, sys
from collections import defaultdict
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from skan.csr import Skeleton
from skan._testdata import (tinycycle, tinyline, skeleton0, skeleton1,
skeleton2, skeleton3d, topograph1d, skeleton4)
def test_skeleton1_stats():
skeleton = Skeleton(skeleton1)
assert skeleton.paths.shape == (4, np.sum(skeleton1) + 1)
paths_list = skeleton.paths_list()
reference_paths = [
[8, 6, 1, 2, 3, 4, 5, 7, 11, 10, 13],
[8, 9, 13],
[8, 12, 14],
[13, 15, 16, 17]
]
d0 = 1 + np.sqrt(2)
reference_distances = [5 * d0, d0, d0, 1 + d0]
for path in reference_paths:
assert path in paths_list or path[::-1] in paths_list
assert_allclose(sorted(skeleton.path_lengths()),
sorted(reference_distances))
def test_tip_junction_edges():
skeleton = Skeleton(skeleton4)
reference_paths = [[1, 2], [2, 4, 5], [2, 7]]
paths_list = skeleton.paths_list()
for path in reference_paths:
assert path in paths_list or path[::-1] in paths_list
|
<commit_before><commit_msg>Add tests for new skeleton class<commit_after>import os, sys
from collections import defaultdict
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from skan.csr import Skeleton
from skan._testdata import (tinycycle, tinyline, skeleton0, skeleton1,
skeleton2, skeleton3d, topograph1d, skeleton4)
def test_skeleton1_stats():
skeleton = Skeleton(skeleton1)
assert skeleton.paths.shape == (4, np.sum(skeleton1) + 1)
paths_list = skeleton.paths_list()
reference_paths = [
[8, 6, 1, 2, 3, 4, 5, 7, 11, 10, 13],
[8, 9, 13],
[8, 12, 14],
[13, 15, 16, 17]
]
d0 = 1 + np.sqrt(2)
reference_distances = [5 * d0, d0, d0, 1 + d0]
for path in reference_paths:
assert path in paths_list or path[::-1] in paths_list
assert_allclose(sorted(skeleton.path_lengths()),
sorted(reference_distances))
def test_tip_junction_edges():
skeleton = Skeleton(skeleton4)
reference_paths = [[1, 2], [2, 4, 5], [2, 7]]
paths_list = skeleton.paths_list()
for path in reference_paths:
assert path in paths_list or path[::-1] in paths_list
|
|
118576da487634c16a2ea3907d12cc452b67cc0f
|
src/detectors/detector_utils.py
|
src/detectors/detector_utils.py
|
"""
Detector utilities
"""
__license__ = """
This file is part of RAPD
Copyright (C) 2016, Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2016-11-21"
__maintainer__ = "Frank Murphy"
__email__ = "fmurphy@anl.gov"
__status__ = "Development"
# Standard imports
import sys
# RAPD imports
import utils.text as text
def print_detector_info(image):
"""
Print out information on the detector given an image
"""
from iotbx.detectors import ImageFactory
i = ImageFactory(image)
print "%20s: %s" % ("vendortype", str(i.vendortype))
print "\nParameters:"
print "==========="
for key, val in i.parameters.iteritems():
print "%20s: %s" % (key, val)
if __name__ == "__main__":
# Get image name from the commandline
if len(sys.argv) > 1:
test_image = sys.argv[1]
else:
print text.red + "No input image" + text.stop
sys.exit(9)
print_detector_info(test_image)
|
Add utility t print out detector information from an image file
|
Add utility t print out detector information from an image file
|
Python
|
agpl-3.0
|
RAPD/RAPD,RAPD/RAPD,RAPD/RAPD,RAPD/RAPD,RAPD/RAPD
|
Add utility t print out detector information from an image file
|
"""
Detector utilities
"""
__license__ = """
This file is part of RAPD
Copyright (C) 2016, Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2016-11-21"
__maintainer__ = "Frank Murphy"
__email__ = "fmurphy@anl.gov"
__status__ = "Development"
# Standard imports
import sys
# RAPD imports
import utils.text as text
def print_detector_info(image):
"""
Print out information on the detector given an image
"""
from iotbx.detectors import ImageFactory
i = ImageFactory(image)
print "%20s: %s" % ("vendortype", str(i.vendortype))
print "\nParameters:"
print "==========="
for key, val in i.parameters.iteritems():
print "%20s: %s" % (key, val)
if __name__ == "__main__":
# Get image name from the commandline
if len(sys.argv) > 1:
test_image = sys.argv[1]
else:
print text.red + "No input image" + text.stop
sys.exit(9)
print_detector_info(test_image)
|
<commit_before><commit_msg>Add utility t print out detector information from an image file<commit_after>
|
"""
Detector utilities
"""
__license__ = """
This file is part of RAPD
Copyright (C) 2016, Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2016-11-21"
__maintainer__ = "Frank Murphy"
__email__ = "fmurphy@anl.gov"
__status__ = "Development"
# Standard imports
import sys
# RAPD imports
import utils.text as text
def print_detector_info(image):
"""
Print out information on the detector given an image
"""
from iotbx.detectors import ImageFactory
i = ImageFactory(image)
print "%20s: %s" % ("vendortype", str(i.vendortype))
print "\nParameters:"
print "==========="
for key, val in i.parameters.iteritems():
print "%20s: %s" % (key, val)
if __name__ == "__main__":
# Get image name from the commandline
if len(sys.argv) > 1:
test_image = sys.argv[1]
else:
print text.red + "No input image" + text.stop
sys.exit(9)
print_detector_info(test_image)
|
Add utility t print out detector information from an image file"""
Detector utilities
"""
__license__ = """
This file is part of RAPD
Copyright (C) 2016, Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2016-11-21"
__maintainer__ = "Frank Murphy"
__email__ = "fmurphy@anl.gov"
__status__ = "Development"
# Standard imports
import sys
# RAPD imports
import utils.text as text
def print_detector_info(image):
"""
Print out information on the detector given an image
"""
from iotbx.detectors import ImageFactory
i = ImageFactory(image)
print "%20s: %s" % ("vendortype", str(i.vendortype))
print "\nParameters:"
print "==========="
for key, val in i.parameters.iteritems():
print "%20s: %s" % (key, val)
if __name__ == "__main__":
# Get image name from the commandline
if len(sys.argv) > 1:
test_image = sys.argv[1]
else:
print text.red + "No input image" + text.stop
sys.exit(9)
print_detector_info(test_image)
|
<commit_before><commit_msg>Add utility t print out detector information from an image file<commit_after>"""
Detector utilities
"""
__license__ = """
This file is part of RAPD
Copyright (C) 2016, Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2016-11-21"
__maintainer__ = "Frank Murphy"
__email__ = "fmurphy@anl.gov"
__status__ = "Development"
# Standard imports
import sys
# RAPD imports
import utils.text as text
def print_detector_info(image):
"""
Print out information on the detector given an image
"""
from iotbx.detectors import ImageFactory
i = ImageFactory(image)
print "%20s: %s" % ("vendortype", str(i.vendortype))
print "\nParameters:"
print "==========="
for key, val in i.parameters.iteritems():
print "%20s: %s" % (key, val)
if __name__ == "__main__":
# Get image name from the commandline
if len(sys.argv) > 1:
test_image = sys.argv[1]
else:
print text.red + "No input image" + text.stop
sys.exit(9)
print_detector_info(test_image)
|
|
e976c426bef01cc9abdc985e36eae3695d8ee384
|
tests/test_frozen_at_when_copying_dataset.py
|
tests/test_frozen_at_when_copying_dataset.py
|
"""
Tests for https://github.com/jic-dtool/dtoolcore/issues/20
"""
import os
import time
from . import tmp_dir_fixture # NOQA
import dtoolcore as dc
def test_frozen_at_value_when_copying_dataset(tmp_dir_fixture): # NOQA
with dc.DataSetCreator("delete-me", tmp_dir_fixture) as ds_creator:
src_uri = ds_creator.uri
dest_base_uri = os.path.join(tmp_dir_fixture, "dest")
os.mkdir(dest_base_uri)
src_dataset = dc.DataSet.from_uri(src_uri)
src_frozen_at = src_dataset._admin_metadata["frozen_at"]
time.sleep(0.1)
dest_uri = dc.copy(src_uri, dest_base_uri)
dest_dataset = dc.DataSet.from_uri(dest_uri)
dest_frozen_at = dest_dataset._admin_metadata["frozen_at"]
assert src_frozen_at == dest_frozen_at
|
Add missing test from functionality in previous commit
|
Add missing test from functionality in previous commit
|
Python
|
mit
|
JIC-CSB/dtoolcore
|
Add missing test from functionality in previous commit
|
"""
Tests for https://github.com/jic-dtool/dtoolcore/issues/20
"""
import os
import time
from . import tmp_dir_fixture # NOQA
import dtoolcore as dc
def test_frozen_at_value_when_copying_dataset(tmp_dir_fixture): # NOQA
with dc.DataSetCreator("delete-me", tmp_dir_fixture) as ds_creator:
src_uri = ds_creator.uri
dest_base_uri = os.path.join(tmp_dir_fixture, "dest")
os.mkdir(dest_base_uri)
src_dataset = dc.DataSet.from_uri(src_uri)
src_frozen_at = src_dataset._admin_metadata["frozen_at"]
time.sleep(0.1)
dest_uri = dc.copy(src_uri, dest_base_uri)
dest_dataset = dc.DataSet.from_uri(dest_uri)
dest_frozen_at = dest_dataset._admin_metadata["frozen_at"]
assert src_frozen_at == dest_frozen_at
|
<commit_before><commit_msg>Add missing test from functionality in previous commit<commit_after>
|
"""
Tests for https://github.com/jic-dtool/dtoolcore/issues/20
"""
import os
import time
from . import tmp_dir_fixture # NOQA
import dtoolcore as dc
def test_frozen_at_value_when_copying_dataset(tmp_dir_fixture): # NOQA
with dc.DataSetCreator("delete-me", tmp_dir_fixture) as ds_creator:
src_uri = ds_creator.uri
dest_base_uri = os.path.join(tmp_dir_fixture, "dest")
os.mkdir(dest_base_uri)
src_dataset = dc.DataSet.from_uri(src_uri)
src_frozen_at = src_dataset._admin_metadata["frozen_at"]
time.sleep(0.1)
dest_uri = dc.copy(src_uri, dest_base_uri)
dest_dataset = dc.DataSet.from_uri(dest_uri)
dest_frozen_at = dest_dataset._admin_metadata["frozen_at"]
assert src_frozen_at == dest_frozen_at
|
Add missing test from functionality in previous commit"""
Tests for https://github.com/jic-dtool/dtoolcore/issues/20
"""
import os
import time
from . import tmp_dir_fixture # NOQA
import dtoolcore as dc
def test_frozen_at_value_when_copying_dataset(tmp_dir_fixture): # NOQA
with dc.DataSetCreator("delete-me", tmp_dir_fixture) as ds_creator:
src_uri = ds_creator.uri
dest_base_uri = os.path.join(tmp_dir_fixture, "dest")
os.mkdir(dest_base_uri)
src_dataset = dc.DataSet.from_uri(src_uri)
src_frozen_at = src_dataset._admin_metadata["frozen_at"]
time.sleep(0.1)
dest_uri = dc.copy(src_uri, dest_base_uri)
dest_dataset = dc.DataSet.from_uri(dest_uri)
dest_frozen_at = dest_dataset._admin_metadata["frozen_at"]
assert src_frozen_at == dest_frozen_at
|
<commit_before><commit_msg>Add missing test from functionality in previous commit<commit_after>"""
Tests for https://github.com/jic-dtool/dtoolcore/issues/20
"""
import os
import time
from . import tmp_dir_fixture # NOQA
import dtoolcore as dc
def test_frozen_at_value_when_copying_dataset(tmp_dir_fixture): # NOQA
with dc.DataSetCreator("delete-me", tmp_dir_fixture) as ds_creator:
src_uri = ds_creator.uri
dest_base_uri = os.path.join(tmp_dir_fixture, "dest")
os.mkdir(dest_base_uri)
src_dataset = dc.DataSet.from_uri(src_uri)
src_frozen_at = src_dataset._admin_metadata["frozen_at"]
time.sleep(0.1)
dest_uri = dc.copy(src_uri, dest_base_uri)
dest_dataset = dc.DataSet.from_uri(dest_uri)
dest_frozen_at = dest_dataset._admin_metadata["frozen_at"]
assert src_frozen_at == dest_frozen_at
|
|
0457c1d56e210db960f84f78d90ead3736056eb4
|
test/functional/test_cli.py
|
test/functional/test_cli.py
|
# Copyright (c) 2013, Sascha Peilicke <saschpe@gmx.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (see the file COPYING); if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
import sys
import unittest
from rapport import __version__
from rapport.util import silent_popen
class CLIFunctionalTestCase(unittest.TestCase):
def setUp(self):
self.result = "rapport {0}\n".format(__version__)
def test_call_rapport_cli(self):
args = [sys.executable, "rapport/cli.py", "--version"]
self.assertEqual(silent_popen(args), self.result)
def test_call_script_wrapper(self):
args = ["scripts/rapport", "--version"]
# The script is meant to be with rapport installed, i.e. not
# from the (development) tree. Thus we have to adjust PYTHONPATH:
env = os.environ.copy()
env.update({"PYTHONPATH": "."})
self.assertEqual(silent_popen(args, env=env), self.result)
|
Add functional test for command-line interface
|
Add functional test for command-line interface
|
Python
|
apache-2.0
|
saschpe/rapport
|
Add functional test for command-line interface
|
# Copyright (c) 2013, Sascha Peilicke <saschpe@gmx.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (see the file COPYING); if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
import sys
import unittest
from rapport import __version__
from rapport.util import silent_popen
class CLIFunctionalTestCase(unittest.TestCase):
def setUp(self):
self.result = "rapport {0}\n".format(__version__)
def test_call_rapport_cli(self):
args = [sys.executable, "rapport/cli.py", "--version"]
self.assertEqual(silent_popen(args), self.result)
def test_call_script_wrapper(self):
args = ["scripts/rapport", "--version"]
# The script is meant to be with rapport installed, i.e. not
# from the (development) tree. Thus we have to adjust PYTHONPATH:
env = os.environ.copy()
env.update({"PYTHONPATH": "."})
self.assertEqual(silent_popen(args, env=env), self.result)
|
<commit_before><commit_msg>Add functional test for command-line interface<commit_after>
|
# Copyright (c) 2013, Sascha Peilicke <saschpe@gmx.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (see the file COPYING); if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
import sys
import unittest
from rapport import __version__
from rapport.util import silent_popen
class CLIFunctionalTestCase(unittest.TestCase):
def setUp(self):
self.result = "rapport {0}\n".format(__version__)
def test_call_rapport_cli(self):
args = [sys.executable, "rapport/cli.py", "--version"]
self.assertEqual(silent_popen(args), self.result)
def test_call_script_wrapper(self):
args = ["scripts/rapport", "--version"]
# The script is meant to be with rapport installed, i.e. not
# from the (development) tree. Thus we have to adjust PYTHONPATH:
env = os.environ.copy()
env.update({"PYTHONPATH": "."})
self.assertEqual(silent_popen(args, env=env), self.result)
|
Add functional test for command-line interface# Copyright (c) 2013, Sascha Peilicke <saschpe@gmx.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (see the file COPYING); if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
import sys
import unittest
from rapport import __version__
from rapport.util import silent_popen
class CLIFunctionalTestCase(unittest.TestCase):
def setUp(self):
self.result = "rapport {0}\n".format(__version__)
def test_call_rapport_cli(self):
args = [sys.executable, "rapport/cli.py", "--version"]
self.assertEqual(silent_popen(args), self.result)
def test_call_script_wrapper(self):
args = ["scripts/rapport", "--version"]
# The script is meant to be with rapport installed, i.e. not
# from the (development) tree. Thus we have to adjust PYTHONPATH:
env = os.environ.copy()
env.update({"PYTHONPATH": "."})
self.assertEqual(silent_popen(args, env=env), self.result)
|
<commit_before><commit_msg>Add functional test for command-line interface<commit_after># Copyright (c) 2013, Sascha Peilicke <saschpe@gmx.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (see the file COPYING); if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
import sys
import unittest
from rapport import __version__
from rapport.util import silent_popen
class CLIFunctionalTestCase(unittest.TestCase):
def setUp(self):
self.result = "rapport {0}\n".format(__version__)
def test_call_rapport_cli(self):
args = [sys.executable, "rapport/cli.py", "--version"]
self.assertEqual(silent_popen(args), self.result)
def test_call_script_wrapper(self):
args = ["scripts/rapport", "--version"]
# The script is meant to be with rapport installed, i.e. not
# from the (development) tree. Thus we have to adjust PYTHONPATH:
env = os.environ.copy()
env.update({"PYTHONPATH": "."})
self.assertEqual(silent_popen(args, env=env), self.result)
|
|
e02b7e4dc0d6ba94f63ee2ed48db120c52eb11cd
|
utilities/src/d1_util/download_sciobj.py
|
utilities/src/d1_util/download_sciobj.py
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Download Science Objects from a Member Node or Coordinating Node.
This is an example on how to use the DataONE Client and Common libraries for Python. It
shows how to:
- Download a Science Object from a MN or CN.
"""
import logging
import sys
import d1_common.utils.filesystem
import d1_client.cnclient_2_0
import d1_client.command_line
log = logging.getLogger(__name__)
def main():
parser = d1_client.command_line.get_standard_arg_parser(__doc__)
parser.add_argument("pid", help="PID of object to download")
parser.add_argument(
"path",
type=str,
nargs="?",
help=(
"Optional save path for downloaded object. "
"If not specified, saved in current directory "
"with name derived from the PID"
),
)
args = parser.parse_args()
d1_client.command_line.log_setup(args)
client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(
**d1_client.command_line.args_adapter(args)
)
path = args.path or d1_common.utils.filesystem.gen_safe_path_element(args.pid)
try:
with open(path, "wb") as f:
client.get_and_save(args.pid, f)
except Exception as e:
log.error("Download failed: {}".format(str(e)))
return 1
log.info("Downloaded successfully to: {}".format(path))
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add utility/example for downloading single sciobj from CN or MN
|
Add utility/example for downloading single sciobj from CN or MN
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add utility/example for downloading single sciobj from CN or MN
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Download Science Objects from a Member Node or Coordinating Node.
This is an example on how to use the DataONE Client and Common libraries for Python. It
shows how to:
- Download a Science Object from a MN or CN.
"""
import logging
import sys
import d1_common.utils.filesystem
import d1_client.cnclient_2_0
import d1_client.command_line
log = logging.getLogger(__name__)
def main():
parser = d1_client.command_line.get_standard_arg_parser(__doc__)
parser.add_argument("pid", help="PID of object to download")
parser.add_argument(
"path",
type=str,
nargs="?",
help=(
"Optional save path for downloaded object. "
"If not specified, saved in current directory "
"with name derived from the PID"
),
)
args = parser.parse_args()
d1_client.command_line.log_setup(args)
client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(
**d1_client.command_line.args_adapter(args)
)
path = args.path or d1_common.utils.filesystem.gen_safe_path_element(args.pid)
try:
with open(path, "wb") as f:
client.get_and_save(args.pid, f)
except Exception as e:
log.error("Download failed: {}".format(str(e)))
return 1
log.info("Downloaded successfully to: {}".format(path))
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add utility/example for downloading single sciobj from CN or MN<commit_after>
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Download Science Objects from a Member Node or Coordinating Node.
This is an example on how to use the DataONE Client and Common libraries for Python. It
shows how to:
- Download a Science Object from a MN or CN.
"""
import logging
import sys
import d1_common.utils.filesystem
import d1_client.cnclient_2_0
import d1_client.command_line
log = logging.getLogger(__name__)
def main():
parser = d1_client.command_line.get_standard_arg_parser(__doc__)
parser.add_argument("pid", help="PID of object to download")
parser.add_argument(
"path",
type=str,
nargs="?",
help=(
"Optional save path for downloaded object. "
"If not specified, saved in current directory "
"with name derived from the PID"
),
)
args = parser.parse_args()
d1_client.command_line.log_setup(args)
client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(
**d1_client.command_line.args_adapter(args)
)
path = args.path or d1_common.utils.filesystem.gen_safe_path_element(args.pid)
try:
with open(path, "wb") as f:
client.get_and_save(args.pid, f)
except Exception as e:
log.error("Download failed: {}".format(str(e)))
return 1
log.info("Downloaded successfully to: {}".format(path))
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add utility/example for downloading single sciobj from CN or MN#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Download Science Objects from a Member Node or Coordinating Node.
This is an example on how to use the DataONE Client and Common libraries for Python. It
shows how to:
- Download a Science Object from a MN or CN.
"""
import logging
import sys
import d1_common.utils.filesystem
import d1_client.cnclient_2_0
import d1_client.command_line
log = logging.getLogger(__name__)
def main():
parser = d1_client.command_line.get_standard_arg_parser(__doc__)
parser.add_argument("pid", help="PID of object to download")
parser.add_argument(
"path",
type=str,
nargs="?",
help=(
"Optional save path for downloaded object. "
"If not specified, saved in current directory "
"with name derived from the PID"
),
)
args = parser.parse_args()
d1_client.command_line.log_setup(args)
client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(
**d1_client.command_line.args_adapter(args)
)
path = args.path or d1_common.utils.filesystem.gen_safe_path_element(args.pid)
try:
with open(path, "wb") as f:
client.get_and_save(args.pid, f)
except Exception as e:
log.error("Download failed: {}".format(str(e)))
return 1
log.info("Downloaded successfully to: {}".format(path))
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add utility/example for downloading single sciobj from CN or MN<commit_after>#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Download Science Objects from a Member Node or Coordinating Node.
This is an example on how to use the DataONE Client and Common libraries for Python. It
shows how to:
- Download a Science Object from a MN or CN.
"""
import logging
import sys
import d1_common.utils.filesystem
import d1_client.cnclient_2_0
import d1_client.command_line
log = logging.getLogger(__name__)
def main():
parser = d1_client.command_line.get_standard_arg_parser(__doc__)
parser.add_argument("pid", help="PID of object to download")
parser.add_argument(
"path",
type=str,
nargs="?",
help=(
"Optional save path for downloaded object. "
"If not specified, saved in current directory "
"with name derived from the PID"
),
)
args = parser.parse_args()
d1_client.command_line.log_setup(args)
client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(
**d1_client.command_line.args_adapter(args)
)
path = args.path or d1_common.utils.filesystem.gen_safe_path_element(args.pid)
try:
with open(path, "wb") as f:
client.get_and_save(args.pid, f)
except Exception as e:
log.error("Download failed: {}".format(str(e)))
return 1
log.info("Downloaded successfully to: {}".format(path))
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
2048ec26a2218e6060e1fd03ab876b59bd576a07
|
tests/test_hybrid_properties.py
|
tests/test_hybrid_properties.py
|
from pytest import raises
import sqlalchemy as sa
from sqlalchemy.ext.hybrid import hybrid_property
from wtforms_alchemy import ModelForm
from wtforms_alchemy.exc import AttributeTypeException
from tests import ModelFormTestCase
class TestHybridProperties(ModelFormTestCase):
def test_hybrid_property_returning_column_property(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
_test_column = sa.Column('test_column', sa.Boolean, nullable=False)
@hybrid_property
def test_column_hybrid(self):
return self._test_column
@test_column_hybrid.setter
def test_column_hybrid(self, value):
self._test_column = value
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_str_validator = None
not_null_validator = None
include = ('test_column_hybrid', )
exclude = ('_test_column', )
form = ModelTestForm()
assert form.test_column_hybrid
def test_hybrid_property_returning_expression(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
_test_column = sa.Column('test_column', sa.Boolean, nullable=False)
@hybrid_property
def test_column_hybrid(self):
return self._test_column + self._test_column
@test_column_hybrid.setter
def test_column_hybrid(self, value):
self._test_column = value
with raises(AttributeTypeException):
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_str_validator = None
not_null_validator = None
include = ('test_column_hybrid', )
exclude = ('_test_column', )
|
Add tests for hybrid properties
|
Add tests for hybrid properties
|
Python
|
bsd-3-clause
|
kelvinhammond/wtforms-alchemy,williamwu0220/wtforms-alchemy,quantus/wtforms-alchemy
|
Add tests for hybrid properties
|
from pytest import raises
import sqlalchemy as sa
from sqlalchemy.ext.hybrid import hybrid_property
from wtforms_alchemy import ModelForm
from wtforms_alchemy.exc import AttributeTypeException
from tests import ModelFormTestCase
class TestHybridProperties(ModelFormTestCase):
def test_hybrid_property_returning_column_property(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
_test_column = sa.Column('test_column', sa.Boolean, nullable=False)
@hybrid_property
def test_column_hybrid(self):
return self._test_column
@test_column_hybrid.setter
def test_column_hybrid(self, value):
self._test_column = value
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_str_validator = None
not_null_validator = None
include = ('test_column_hybrid', )
exclude = ('_test_column', )
form = ModelTestForm()
assert form.test_column_hybrid
def test_hybrid_property_returning_expression(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
_test_column = sa.Column('test_column', sa.Boolean, nullable=False)
@hybrid_property
def test_column_hybrid(self):
return self._test_column + self._test_column
@test_column_hybrid.setter
def test_column_hybrid(self, value):
self._test_column = value
with raises(AttributeTypeException):
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_str_validator = None
not_null_validator = None
include = ('test_column_hybrid', )
exclude = ('_test_column', )
|
<commit_before><commit_msg>Add tests for hybrid properties<commit_after>
|
from pytest import raises
import sqlalchemy as sa
from sqlalchemy.ext.hybrid import hybrid_property
from wtforms_alchemy import ModelForm
from wtforms_alchemy.exc import AttributeTypeException
from tests import ModelFormTestCase
class TestHybridProperties(ModelFormTestCase):
def test_hybrid_property_returning_column_property(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
_test_column = sa.Column('test_column', sa.Boolean, nullable=False)
@hybrid_property
def test_column_hybrid(self):
return self._test_column
@test_column_hybrid.setter
def test_column_hybrid(self, value):
self._test_column = value
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_str_validator = None
not_null_validator = None
include = ('test_column_hybrid', )
exclude = ('_test_column', )
form = ModelTestForm()
assert form.test_column_hybrid
def test_hybrid_property_returning_expression(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
_test_column = sa.Column('test_column', sa.Boolean, nullable=False)
@hybrid_property
def test_column_hybrid(self):
return self._test_column + self._test_column
@test_column_hybrid.setter
def test_column_hybrid(self, value):
self._test_column = value
with raises(AttributeTypeException):
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_str_validator = None
not_null_validator = None
include = ('test_column_hybrid', )
exclude = ('_test_column', )
|
Add tests for hybrid propertiesfrom pytest import raises
import sqlalchemy as sa
from sqlalchemy.ext.hybrid import hybrid_property
from wtforms_alchemy import ModelForm
from wtforms_alchemy.exc import AttributeTypeException
from tests import ModelFormTestCase
class TestHybridProperties(ModelFormTestCase):
def test_hybrid_property_returning_column_property(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
_test_column = sa.Column('test_column', sa.Boolean, nullable=False)
@hybrid_property
def test_column_hybrid(self):
return self._test_column
@test_column_hybrid.setter
def test_column_hybrid(self, value):
self._test_column = value
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_str_validator = None
not_null_validator = None
include = ('test_column_hybrid', )
exclude = ('_test_column', )
form = ModelTestForm()
assert form.test_column_hybrid
def test_hybrid_property_returning_expression(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
_test_column = sa.Column('test_column', sa.Boolean, nullable=False)
@hybrid_property
def test_column_hybrid(self):
return self._test_column + self._test_column
@test_column_hybrid.setter
def test_column_hybrid(self, value):
self._test_column = value
with raises(AttributeTypeException):
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_str_validator = None
not_null_validator = None
include = ('test_column_hybrid', )
exclude = ('_test_column', )
|
<commit_before><commit_msg>Add tests for hybrid properties<commit_after>from pytest import raises
import sqlalchemy as sa
from sqlalchemy.ext.hybrid import hybrid_property
from wtforms_alchemy import ModelForm
from wtforms_alchemy.exc import AttributeTypeException
from tests import ModelFormTestCase
class TestHybridProperties(ModelFormTestCase):
def test_hybrid_property_returning_column_property(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
_test_column = sa.Column('test_column', sa.Boolean, nullable=False)
@hybrid_property
def test_column_hybrid(self):
return self._test_column
@test_column_hybrid.setter
def test_column_hybrid(self, value):
self._test_column = value
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_str_validator = None
not_null_validator = None
include = ('test_column_hybrid', )
exclude = ('_test_column', )
form = ModelTestForm()
assert form.test_column_hybrid
def test_hybrid_property_returning_expression(self):
class ModelTest(self.base):
__tablename__ = 'model_test'
id = sa.Column(sa.Integer, primary_key=True)
_test_column = sa.Column('test_column', sa.Boolean, nullable=False)
@hybrid_property
def test_column_hybrid(self):
return self._test_column + self._test_column
@test_column_hybrid.setter
def test_column_hybrid(self, value):
self._test_column = value
with raises(AttributeTypeException):
class ModelTestForm(ModelForm):
class Meta:
model = ModelTest
not_null_str_validator = None
not_null_validator = None
include = ('test_column_hybrid', )
exclude = ('_test_column', )
|
|
7c4d13d1f2591ae0a3eb8dc7ffa5fa03258c7662
|
django/applications/catmaid/management/commands/catmaid_set_user_profiles_to_default.py
|
django/applications/catmaid/management/commands/catmaid_set_user_profiles_to_default.py
|
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import NoArgsCommand, CommandError
class Command(NoArgsCommand):
help = "Set the user profile settings of every user to the defaults"
def handle_noargs(self, **options):
for u in User.objects.all():
up = u.userprofile
# Expect user profiles to be there and add all default settings
up.inverse_mouse_wheel = settings.PROFILE_DEFAULT_INVERSE_MOUSE_WHEEL
up.show_text_label_tool = settings.PROFILE_SHOW_TEXT_LABEL_TOOL
up.show_tagging_tool = settings.PROFILE_SHOW_TAGGING_TOOL
up.show_cropping_tool = settings.PROFILE_SHOW_CROPPING_TOOL
up.show_segmentation_tool = settings.PROFILE_SHOW_SEGMENTATION_TOOL
up.show_tracing_tool = settings.PROFILE_SHOW_TRACING_TOOL
# Save the changes
up.save()
|
Add management command to set all user profiles to defaults
|
Add management command to set all user profiles to defaults
|
Python
|
agpl-3.0
|
htem/CATMAID,htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID,fzadow/CATMAID
|
Add management command to set all user profiles to defaults
|
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import NoArgsCommand, CommandError
class Command(NoArgsCommand):
help = "Set the user profile settings of every user to the defaults"
def handle_noargs(self, **options):
for u in User.objects.all():
up = u.userprofile
# Expect user profiles to be there and add all default settings
up.inverse_mouse_wheel = settings.PROFILE_DEFAULT_INVERSE_MOUSE_WHEEL
up.show_text_label_tool = settings.PROFILE_SHOW_TEXT_LABEL_TOOL
up.show_tagging_tool = settings.PROFILE_SHOW_TAGGING_TOOL
up.show_cropping_tool = settings.PROFILE_SHOW_CROPPING_TOOL
up.show_segmentation_tool = settings.PROFILE_SHOW_SEGMENTATION_TOOL
up.show_tracing_tool = settings.PROFILE_SHOW_TRACING_TOOL
# Save the changes
up.save()
|
<commit_before><commit_msg>Add management command to set all user profiles to defaults<commit_after>
|
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import NoArgsCommand, CommandError
class Command(NoArgsCommand):
help = "Set the user profile settings of every user to the defaults"
def handle_noargs(self, **options):
for u in User.objects.all():
up = u.userprofile
# Expect user profiles to be there and add all default settings
up.inverse_mouse_wheel = settings.PROFILE_DEFAULT_INVERSE_MOUSE_WHEEL
up.show_text_label_tool = settings.PROFILE_SHOW_TEXT_LABEL_TOOL
up.show_tagging_tool = settings.PROFILE_SHOW_TAGGING_TOOL
up.show_cropping_tool = settings.PROFILE_SHOW_CROPPING_TOOL
up.show_segmentation_tool = settings.PROFILE_SHOW_SEGMENTATION_TOOL
up.show_tracing_tool = settings.PROFILE_SHOW_TRACING_TOOL
# Save the changes
up.save()
|
Add management command to set all user profiles to defaultsfrom django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import NoArgsCommand, CommandError
class Command(NoArgsCommand):
help = "Set the user profile settings of every user to the defaults"
def handle_noargs(self, **options):
for u in User.objects.all():
up = u.userprofile
# Expect user profiles to be there and add all default settings
up.inverse_mouse_wheel = settings.PROFILE_DEFAULT_INVERSE_MOUSE_WHEEL
up.show_text_label_tool = settings.PROFILE_SHOW_TEXT_LABEL_TOOL
up.show_tagging_tool = settings.PROFILE_SHOW_TAGGING_TOOL
up.show_cropping_tool = settings.PROFILE_SHOW_CROPPING_TOOL
up.show_segmentation_tool = settings.PROFILE_SHOW_SEGMENTATION_TOOL
up.show_tracing_tool = settings.PROFILE_SHOW_TRACING_TOOL
# Save the changes
up.save()
|
<commit_before><commit_msg>Add management command to set all user profiles to defaults<commit_after>from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import NoArgsCommand, CommandError
class Command(NoArgsCommand):
help = "Set the user profile settings of every user to the defaults"
def handle_noargs(self, **options):
for u in User.objects.all():
up = u.userprofile
# Expect user profiles to be there and add all default settings
up.inverse_mouse_wheel = settings.PROFILE_DEFAULT_INVERSE_MOUSE_WHEEL
up.show_text_label_tool = settings.PROFILE_SHOW_TEXT_LABEL_TOOL
up.show_tagging_tool = settings.PROFILE_SHOW_TAGGING_TOOL
up.show_cropping_tool = settings.PROFILE_SHOW_CROPPING_TOOL
up.show_segmentation_tool = settings.PROFILE_SHOW_SEGMENTATION_TOOL
up.show_tracing_tool = settings.PROFILE_SHOW_TRACING_TOOL
# Save the changes
up.save()
|
|
66931b591cc40dcafecf4c8daff95b8fc17771f3
|
tools/ui-languages-check.py
|
tools/ui-languages-check.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os.path
import sys
import subprocess
from txclib import utils
from txclib.project import Project
try:
from json import loads as parse_json
except ImportError:
from simplejson import loads as parse_json
class UiLanguagesCheck(Project):
def get_ui_langs(self):
our_path = os.path.dirname(os.path.realpath(__file__))
core_file = our_path + '/../app/config/core.php'
php_script = ('class Cache {'
' function Config() {}'
'}'
'class Configure {'
' function write($var, $val) {'
' if ($var == "UI.languages") {'
' print json_encode(array_map('
' function($v) {'
' return $v[0];'
' },'
' $val'
' ));'
' }'
' }'
'}'
'include "' + core_file + '";')
php_cmd = "php -r '" + php_script + "'"
proc = subprocess.Popen(php_cmd, shell=True, stdout=subprocess.PIPE)
return parse_json(proc.stdout.read())
def run(self, resource=None):
if not resource:
resource = self.get_resource_list()[0]
try:
project_slug, resource_slug = resource.split('.', 1)
except ValueError:
logger.error("Invalid resource name: {}".format(resource))
sys.exit(1)
lang_map = self.get_resource_lang_mapping(resource)
ui_langs = self.get_ui_langs()
self.url_info = {
'host': self.get_resource_host(None),
'project': project_slug,
'resource': resource_slug
}
all_stats = self._get_stats_for_resource()
stats_iter = sorted(all_stats.iteritems(), key=lambda (k,v): int(v['completed'][:-1]))
print("{:3s} [{}]".format('lang', 'is included in core.php'))
for tx_code, lang_stats in stats_iter:
try:
our_code = lang_map[tx_code]
except KeyError:
continue
available = our_code in ui_langs
print("{:3s}: {:>4s} [{}]".format(our_code, lang_stats['completed'], 'X' if available else ' '))
def main(argv):
path_to_tx = utils.find_dot_tx()
check = UiLanguagesCheck(path_to_tx)
check.run()
if __name__ == "__main__":
main(sys.argv[1:])
|
Add script to check UI languages coverage
|
Add script to check UI languages coverage
|
Python
|
agpl-3.0
|
shashank19gaurav/tatoeba2,shashank19gaurav/tatoeba2,Tatoeba/tatoeba2,Tatoeba/tatoeba2,Tatoeba/tatoeba2,shashank19gaurav/tatoeba2,shashank19gaurav/tatoeba2,Tatoeba/tatoeba2,Tatoeba/tatoeba2
|
Add script to check UI languages coverage
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os.path
import sys
import subprocess
from txclib import utils
from txclib.project import Project
try:
from json import loads as parse_json
except ImportError:
from simplejson import loads as parse_json
class UiLanguagesCheck(Project):
def get_ui_langs(self):
our_path = os.path.dirname(os.path.realpath(__file__))
core_file = our_path + '/../app/config/core.php'
php_script = ('class Cache {'
' function Config() {}'
'}'
'class Configure {'
' function write($var, $val) {'
' if ($var == "UI.languages") {'
' print json_encode(array_map('
' function($v) {'
' return $v[0];'
' },'
' $val'
' ));'
' }'
' }'
'}'
'include "' + core_file + '";')
php_cmd = "php -r '" + php_script + "'"
proc = subprocess.Popen(php_cmd, shell=True, stdout=subprocess.PIPE)
return parse_json(proc.stdout.read())
def run(self, resource=None):
if not resource:
resource = self.get_resource_list()[0]
try:
project_slug, resource_slug = resource.split('.', 1)
except ValueError:
logger.error("Invalid resource name: {}".format(resource))
sys.exit(1)
lang_map = self.get_resource_lang_mapping(resource)
ui_langs = self.get_ui_langs()
self.url_info = {
'host': self.get_resource_host(None),
'project': project_slug,
'resource': resource_slug
}
all_stats = self._get_stats_for_resource()
stats_iter = sorted(all_stats.iteritems(), key=lambda (k,v): int(v['completed'][:-1]))
print("{:3s} [{}]".format('lang', 'is included in core.php'))
for tx_code, lang_stats in stats_iter:
try:
our_code = lang_map[tx_code]
except KeyError:
continue
available = our_code in ui_langs
print("{:3s}: {:>4s} [{}]".format(our_code, lang_stats['completed'], 'X' if available else ' '))
def main(argv):
path_to_tx = utils.find_dot_tx()
check = UiLanguagesCheck(path_to_tx)
check.run()
if __name__ == "__main__":
main(sys.argv[1:])
|
<commit_before><commit_msg>Add script to check UI languages coverage<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os.path
import sys
import subprocess
from txclib import utils
from txclib.project import Project
try:
from json import loads as parse_json
except ImportError:
from simplejson import loads as parse_json
class UiLanguagesCheck(Project):
def get_ui_langs(self):
our_path = os.path.dirname(os.path.realpath(__file__))
core_file = our_path + '/../app/config/core.php'
php_script = ('class Cache {'
' function Config() {}'
'}'
'class Configure {'
' function write($var, $val) {'
' if ($var == "UI.languages") {'
' print json_encode(array_map('
' function($v) {'
' return $v[0];'
' },'
' $val'
' ));'
' }'
' }'
'}'
'include "' + core_file + '";')
php_cmd = "php -r '" + php_script + "'"
proc = subprocess.Popen(php_cmd, shell=True, stdout=subprocess.PIPE)
return parse_json(proc.stdout.read())
def run(self, resource=None):
if not resource:
resource = self.get_resource_list()[0]
try:
project_slug, resource_slug = resource.split('.', 1)
except ValueError:
logger.error("Invalid resource name: {}".format(resource))
sys.exit(1)
lang_map = self.get_resource_lang_mapping(resource)
ui_langs = self.get_ui_langs()
self.url_info = {
'host': self.get_resource_host(None),
'project': project_slug,
'resource': resource_slug
}
all_stats = self._get_stats_for_resource()
stats_iter = sorted(all_stats.iteritems(), key=lambda (k,v): int(v['completed'][:-1]))
print("{:3s} [{}]".format('lang', 'is included in core.php'))
for tx_code, lang_stats in stats_iter:
try:
our_code = lang_map[tx_code]
except KeyError:
continue
available = our_code in ui_langs
print("{:3s}: {:>4s} [{}]".format(our_code, lang_stats['completed'], 'X' if available else ' '))
def main(argv):
path_to_tx = utils.find_dot_tx()
check = UiLanguagesCheck(path_to_tx)
check.run()
if __name__ == "__main__":
main(sys.argv[1:])
|
Add script to check UI languages coverage#!/usr/bin/python
# -*- coding: utf-8 -*-
import os.path
import sys
import subprocess
from txclib import utils
from txclib.project import Project
try:
from json import loads as parse_json
except ImportError:
from simplejson import loads as parse_json
class UiLanguagesCheck(Project):
def get_ui_langs(self):
our_path = os.path.dirname(os.path.realpath(__file__))
core_file = our_path + '/../app/config/core.php'
php_script = ('class Cache {'
' function Config() {}'
'}'
'class Configure {'
' function write($var, $val) {'
' if ($var == "UI.languages") {'
' print json_encode(array_map('
' function($v) {'
' return $v[0];'
' },'
' $val'
' ));'
' }'
' }'
'}'
'include "' + core_file + '";')
php_cmd = "php -r '" + php_script + "'"
proc = subprocess.Popen(php_cmd, shell=True, stdout=subprocess.PIPE)
return parse_json(proc.stdout.read())
def run(self, resource=None):
if not resource:
resource = self.get_resource_list()[0]
try:
project_slug, resource_slug = resource.split('.', 1)
except ValueError:
logger.error("Invalid resource name: {}".format(resource))
sys.exit(1)
lang_map = self.get_resource_lang_mapping(resource)
ui_langs = self.get_ui_langs()
self.url_info = {
'host': self.get_resource_host(None),
'project': project_slug,
'resource': resource_slug
}
all_stats = self._get_stats_for_resource()
stats_iter = sorted(all_stats.iteritems(), key=lambda (k,v): int(v['completed'][:-1]))
print("{:3s} [{}]".format('lang', 'is included in core.php'))
for tx_code, lang_stats in stats_iter:
try:
our_code = lang_map[tx_code]
except KeyError:
continue
available = our_code in ui_langs
print("{:3s}: {:>4s} [{}]".format(our_code, lang_stats['completed'], 'X' if available else ' '))
def main(argv):
path_to_tx = utils.find_dot_tx()
check = UiLanguagesCheck(path_to_tx)
check.run()
if __name__ == "__main__":
main(sys.argv[1:])
|
<commit_before><commit_msg>Add script to check UI languages coverage<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
import os.path
import sys
import subprocess
from txclib import utils
from txclib.project import Project
try:
from json import loads as parse_json
except ImportError:
from simplejson import loads as parse_json
class UiLanguagesCheck(Project):
def get_ui_langs(self):
our_path = os.path.dirname(os.path.realpath(__file__))
core_file = our_path + '/../app/config/core.php'
php_script = ('class Cache {'
' function Config() {}'
'}'
'class Configure {'
' function write($var, $val) {'
' if ($var == "UI.languages") {'
' print json_encode(array_map('
' function($v) {'
' return $v[0];'
' },'
' $val'
' ));'
' }'
' }'
'}'
'include "' + core_file + '";')
php_cmd = "php -r '" + php_script + "'"
proc = subprocess.Popen(php_cmd, shell=True, stdout=subprocess.PIPE)
return parse_json(proc.stdout.read())
def run(self, resource=None):
if not resource:
resource = self.get_resource_list()[0]
try:
project_slug, resource_slug = resource.split('.', 1)
except ValueError:
logger.error("Invalid resource name: {}".format(resource))
sys.exit(1)
lang_map = self.get_resource_lang_mapping(resource)
ui_langs = self.get_ui_langs()
self.url_info = {
'host': self.get_resource_host(None),
'project': project_slug,
'resource': resource_slug
}
all_stats = self._get_stats_for_resource()
stats_iter = sorted(all_stats.iteritems(), key=lambda (k,v): int(v['completed'][:-1]))
print("{:3s} [{}]".format('lang', 'is included in core.php'))
for tx_code, lang_stats in stats_iter:
try:
our_code = lang_map[tx_code]
except KeyError:
continue
available = our_code in ui_langs
print("{:3s}: {:>4s} [{}]".format(our_code, lang_stats['completed'], 'X' if available else ' '))
def main(argv):
path_to_tx = utils.find_dot_tx()
check = UiLanguagesCheck(path_to_tx)
check.run()
if __name__ == "__main__":
main(sys.argv[1:])
|
|
9e3d460d797b9ab3032a8be0c92b88efe136458b
|
openquake/commands/webui.py
|
openquake/commands/webui.py
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
Add a command to start the WebUI using oq
|
Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly bbfc5549fb632d535ed1934e0d2bd1226ccd4507]
Former-commit-id: 97050796d5de66127acc07596b2f85c1544047f6
|
Python
|
agpl-3.0
|
gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine
|
Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly bbfc5549fb632d535ed1934e0d2bd1226ccd4507]
Former-commit-id: 97050796d5de66127acc07596b2f85c1544047f6
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
<commit_before><commit_msg>Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly bbfc5549fb632d535ed1934e0d2bd1226ccd4507]
Former-commit-id: 97050796d5de66127acc07596b2f85c1544047f6<commit_after>
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly bbfc5549fb632d535ed1934e0d2bd1226ccd4507]
Former-commit-id: 97050796d5de66127acc07596b2f85c1544047f6# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
<commit_before><commit_msg>Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly bbfc5549fb632d535ed1934e0d2bd1226ccd4507]
Former-commit-id: 97050796d5de66127acc07596b2f85c1544047f6<commit_after># -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
|
c08b4d2922d366e6078e6d6dda542de79031918d
|
examples/translations/korean_test_1.py
|
examples/translations/korean_test_1.py
|
# Korean Language Test - Python 3 Only!
from seleniumbase.translate.korean import μ
λ λ_ν
μ€νΈ_μΌμ΄μ€ # noqa
class ν
μ€νΈ_ν΄λμ€(μ
λ λ_ν
μ€νΈ_μΌμ΄μ€): # noqa
def test_μ€μμ_1(self):
self.URL_μ΄κΈ°("https://ko.wikipedia.org/wiki/")
self.ν
μ€νΈ_νμΈ("μν€λ°±κ³Ό")
self.μμ_νμΈ('[title="μν€λ°±κ³Ό:μκ°"]')
self.ν
μ€νΈλ₯Ό_μ
λ°μ΄νΈ("#searchInput", "κΉμΉ")
self.ν΄λ¦("#searchButton")
self.ν
μ€νΈ_νμΈ("κΉμΉ", "#firstHeading")
self.μμ_νμΈ('img[alt="Various kimchi.jpg"]')
self.ν
μ€νΈλ₯Ό_μ
λ°μ΄νΈ("#searchInput", "λΉλΉλ°₯")
self.ν΄λ¦("#searchButton")
self.ν
μ€νΈ_νμΈ("λΉλΉλ°₯", "#firstHeading")
self.μμ_νμΈ('img[alt="Dolsot-bibimbap.jpg"]')
|
Add a Korean example test
|
Add a Korean example test
|
Python
|
mit
|
seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase
|
Add a Korean example test
|
# Korean Language Test - Python 3 Only!
from seleniumbase.translate.korean import μ
λ λ_ν
μ€νΈ_μΌμ΄μ€ # noqa
class ν
μ€νΈ_ν΄λμ€(μ
λ λ_ν
μ€νΈ_μΌμ΄μ€): # noqa
def test_μ€μμ_1(self):
self.URL_μ΄κΈ°("https://ko.wikipedia.org/wiki/")
self.ν
μ€νΈ_νμΈ("μν€λ°±κ³Ό")
self.μμ_νμΈ('[title="μν€λ°±κ³Ό:μκ°"]')
self.ν
μ€νΈλ₯Ό_μ
λ°μ΄νΈ("#searchInput", "κΉμΉ")
self.ν΄λ¦("#searchButton")
self.ν
μ€νΈ_νμΈ("κΉμΉ", "#firstHeading")
self.μμ_νμΈ('img[alt="Various kimchi.jpg"]')
self.ν
μ€νΈλ₯Ό_μ
λ°μ΄νΈ("#searchInput", "λΉλΉλ°₯")
self.ν΄λ¦("#searchButton")
self.ν
μ€νΈ_νμΈ("λΉλΉλ°₯", "#firstHeading")
self.μμ_νμΈ('img[alt="Dolsot-bibimbap.jpg"]')
|
<commit_before><commit_msg>Add a Korean example test<commit_after>
|
# Korean Language Test - Python 3 Only!
from seleniumbase.translate.korean import μ
λ λ_ν
μ€νΈ_μΌμ΄μ€ # noqa
class ν
μ€νΈ_ν΄λμ€(μ
λ λ_ν
μ€νΈ_μΌμ΄μ€): # noqa
def test_μ€μμ_1(self):
self.URL_μ΄κΈ°("https://ko.wikipedia.org/wiki/")
self.ν
μ€νΈ_νμΈ("μν€λ°±κ³Ό")
self.μμ_νμΈ('[title="μν€λ°±κ³Ό:μκ°"]')
self.ν
μ€νΈλ₯Ό_μ
λ°μ΄νΈ("#searchInput", "κΉμΉ")
self.ν΄λ¦("#searchButton")
self.ν
μ€νΈ_νμΈ("κΉμΉ", "#firstHeading")
self.μμ_νμΈ('img[alt="Various kimchi.jpg"]')
self.ν
μ€νΈλ₯Ό_μ
λ°μ΄νΈ("#searchInput", "λΉλΉλ°₯")
self.ν΄λ¦("#searchButton")
self.ν
μ€νΈ_νμΈ("λΉλΉλ°₯", "#firstHeading")
self.μμ_νμΈ('img[alt="Dolsot-bibimbap.jpg"]')
|
Add a Korean example test# Korean Language Test - Python 3 Only!
from seleniumbase.translate.korean import μ
λ λ_ν
μ€νΈ_μΌμ΄μ€ # noqa
class ν
μ€νΈ_ν΄λμ€(μ
λ λ_ν
μ€νΈ_μΌμ΄μ€): # noqa
def test_μ€μμ_1(self):
self.URL_μ΄κΈ°("https://ko.wikipedia.org/wiki/")
self.ν
μ€νΈ_νμΈ("μν€λ°±κ³Ό")
self.μμ_νμΈ('[title="μν€λ°±κ³Ό:μκ°"]')
self.ν
μ€νΈλ₯Ό_μ
λ°μ΄νΈ("#searchInput", "κΉμΉ")
self.ν΄λ¦("#searchButton")
self.ν
μ€νΈ_νμΈ("κΉμΉ", "#firstHeading")
self.μμ_νμΈ('img[alt="Various kimchi.jpg"]')
self.ν
μ€νΈλ₯Ό_μ
λ°μ΄νΈ("#searchInput", "λΉλΉλ°₯")
self.ν΄λ¦("#searchButton")
self.ν
μ€νΈ_νμΈ("λΉλΉλ°₯", "#firstHeading")
self.μμ_νμΈ('img[alt="Dolsot-bibimbap.jpg"]')
|
<commit_before><commit_msg>Add a Korean example test<commit_after># Korean Language Test - Python 3 Only!
from seleniumbase.translate.korean import μ
λ λ_ν
μ€νΈ_μΌμ΄μ€ # noqa
class ν
μ€νΈ_ν΄λμ€(μ
λ λ_ν
μ€νΈ_μΌμ΄μ€): # noqa
def test_μ€μμ_1(self):
self.URL_μ΄κΈ°("https://ko.wikipedia.org/wiki/")
self.ν
μ€νΈ_νμΈ("μν€λ°±κ³Ό")
self.μμ_νμΈ('[title="μν€λ°±κ³Ό:μκ°"]')
self.ν
μ€νΈλ₯Ό_μ
λ°μ΄νΈ("#searchInput", "κΉμΉ")
self.ν΄λ¦("#searchButton")
self.ν
μ€νΈ_νμΈ("κΉμΉ", "#firstHeading")
self.μμ_νμΈ('img[alt="Various kimchi.jpg"]')
self.ν
μ€νΈλ₯Ό_μ
λ°μ΄νΈ("#searchInput", "λΉλΉλ°₯")
self.ν΄λ¦("#searchButton")
self.ν
μ€νΈ_νμΈ("λΉλΉλ°₯", "#firstHeading")
self.μμ_νμΈ('img[alt="Dolsot-bibimbap.jpg"]')
|
|
c6e628fe12397a57afa9c7744a3feb6667315987
|
locations/spiders/loves.py
|
locations/spiders/loves.py
|
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
HEADERS = {'Content-Type': 'application/json'}
class LovesSpider(scrapy.Spider):
name = "loves"
allowed_domains = ["www.loves.com"]
download_delay = 0.2
def start_requests(self):
payload = json.dumps({"StoreTypes":[],"Amenities":[],"Restaurants":[],"FoodConcepts":[],"State":"All","City":"All","Highway":"All"})
yield scrapy.Request(
"https://www.loves.com/api/sitecore/StoreSearch/SearchStores",
method='POST',
body=payload,
headers=HEADERS
)
def parse(self, response):
stores = json.loads(response.body_as_unicode())
for store in stores[0]['Points']:
yield GeojsonPointItem(
name=store['Name'],
ref=store['SiteId'],
addr_full=store['Address1'],
city=store['City'],
state=store['State'],
postcode=store['Zip'],
phone=store['PhoneNumber'],
lat=float(store['Latitude']),
lon=float(store['Longitude']),
)
|
Add spider for Loves Travel Stops
|
Add spider for Loves Travel Stops
|
Python
|
mit
|
iandees/all-the-places,iandees/all-the-places,iandees/all-the-places
|
Add spider for Loves Travel Stops
|
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
HEADERS = {'Content-Type': 'application/json'}
class LovesSpider(scrapy.Spider):
name = "loves"
allowed_domains = ["www.loves.com"]
download_delay = 0.2
def start_requests(self):
payload = json.dumps({"StoreTypes":[],"Amenities":[],"Restaurants":[],"FoodConcepts":[],"State":"All","City":"All","Highway":"All"})
yield scrapy.Request(
"https://www.loves.com/api/sitecore/StoreSearch/SearchStores",
method='POST',
body=payload,
headers=HEADERS
)
def parse(self, response):
stores = json.loads(response.body_as_unicode())
for store in stores[0]['Points']:
yield GeojsonPointItem(
name=store['Name'],
ref=store['SiteId'],
addr_full=store['Address1'],
city=store['City'],
state=store['State'],
postcode=store['Zip'],
phone=store['PhoneNumber'],
lat=float(store['Latitude']),
lon=float(store['Longitude']),
)
|
<commit_before><commit_msg>Add spider for Loves Travel Stops<commit_after>
|
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
HEADERS = {'Content-Type': 'application/json'}
class LovesSpider(scrapy.Spider):
name = "loves"
allowed_domains = ["www.loves.com"]
download_delay = 0.2
def start_requests(self):
payload = json.dumps({"StoreTypes":[],"Amenities":[],"Restaurants":[],"FoodConcepts":[],"State":"All","City":"All","Highway":"All"})
yield scrapy.Request(
"https://www.loves.com/api/sitecore/StoreSearch/SearchStores",
method='POST',
body=payload,
headers=HEADERS
)
def parse(self, response):
stores = json.loads(response.body_as_unicode())
for store in stores[0]['Points']:
yield GeojsonPointItem(
name=store['Name'],
ref=store['SiteId'],
addr_full=store['Address1'],
city=store['City'],
state=store['State'],
postcode=store['Zip'],
phone=store['PhoneNumber'],
lat=float(store['Latitude']),
lon=float(store['Longitude']),
)
|
Add spider for Loves Travel Stops# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
HEADERS = {'Content-Type': 'application/json'}
class LovesSpider(scrapy.Spider):
name = "loves"
allowed_domains = ["www.loves.com"]
download_delay = 0.2
def start_requests(self):
payload = json.dumps({"StoreTypes":[],"Amenities":[],"Restaurants":[],"FoodConcepts":[],"State":"All","City":"All","Highway":"All"})
yield scrapy.Request(
"https://www.loves.com/api/sitecore/StoreSearch/SearchStores",
method='POST',
body=payload,
headers=HEADERS
)
def parse(self, response):
stores = json.loads(response.body_as_unicode())
for store in stores[0]['Points']:
yield GeojsonPointItem(
name=store['Name'],
ref=store['SiteId'],
addr_full=store['Address1'],
city=store['City'],
state=store['State'],
postcode=store['Zip'],
phone=store['PhoneNumber'],
lat=float(store['Latitude']),
lon=float(store['Longitude']),
)
|
<commit_before><commit_msg>Add spider for Loves Travel Stops<commit_after># -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
HEADERS = {'Content-Type': 'application/json'}
class LovesSpider(scrapy.Spider):
name = "loves"
allowed_domains = ["www.loves.com"]
download_delay = 0.2
def start_requests(self):
payload = json.dumps({"StoreTypes":[],"Amenities":[],"Restaurants":[],"FoodConcepts":[],"State":"All","City":"All","Highway":"All"})
yield scrapy.Request(
"https://www.loves.com/api/sitecore/StoreSearch/SearchStores",
method='POST',
body=payload,
headers=HEADERS
)
def parse(self, response):
stores = json.loads(response.body_as_unicode())
for store in stores[0]['Points']:
yield GeojsonPointItem(
name=store['Name'],
ref=store['SiteId'],
addr_full=store['Address1'],
city=store['City'],
state=store['State'],
postcode=store['Zip'],
phone=store['PhoneNumber'],
lat=float(store['Latitude']),
lon=float(store['Longitude']),
)
|
|
6ef4254e1167185598a31bfb6c0deb52388fa42a
|
HammingDistance/Python/HammingDistance.py
|
HammingDistance/Python/HammingDistance.py
|
def HammingDistance(s1, s2):
if len(s1) != len(s2):
raise ValueError("ERROR: Strings must have the same length")
return sum(c1 != c2 for c1, c2 in zip(s1, s2))
|
Add Hamming Distance in Python
|
Add Hamming Distance in Python
|
Python
|
apache-2.0
|
maazsq/Algorithms_Example,alok760/Algorithms_Example,churrizo/Algorithms_Example,Thuva4/Algorithms_Example,maazsq/Algorithms_Example,AtoMc/Algorithms_Example,pranjalrai/Algorithms_Example,AtoMc/Algorithms_Example,maazsq/Algorithms_Example,Astrophilic/Algorithms_Example,Anat-Port/Algorithms_Example,Astrophilic/Algorithms_Example,Anat-Port/Algorithms_Example,alok760/Algorithms_Example,pranjalrai/Algorithms_Example,AtoMc/Algorithms_Example,Thuva4/Algorithms_Example,AtoMc/Algorithms_Example,xiroV/Algorithms_Example,Anat-Port/Algorithms_Example,Thuva4/Algorithms_Example,AtoMc/Algorithms_Example,xiroV/Algorithms_Example,AtoMc/Algorithms_Example,alok760/Algorithms_Example,churrizo/Algorithms_Example,maazsq/Algorithms_Example,Thuva4/Algorithms_Example,pranjalrai/Algorithms_Example,Thuva4/Algorithms_Example,churrizo/Algorithms_Example,alok760/Algorithms_Example,xiroV/Algorithms_Example,xiroV/Algorithms_Example,Anat-Port/Algorithms_Example,alok760/Algorithms_Example,Anat-Port/Algorithms_Example,maazsq/Algorithms_Example,maazsq/Algorithms_Example,Thuva4/Algorithms_Example,Thuva4/Algorithms_Example,xiroV/Algorithms_Example,alok760/Algorithms_Example,maazsq/Algorithms_Example,alok760/Algorithms_Example,Anat-Port/Algorithms_Example,churrizo/Algorithms_Example,maazsq/Algorithms_Example,pranjalrai/Algorithms_Example,pranjalrai/Algorithms_Example,AtoMc/Algorithms_Example,pranjalrai/Algorithms_Example,xiroV/Algorithms_Example,Astrophilic/Algorithms_Example,Anat-Port/Algorithms_Example,Thuva4/Algorithms_Example,AtoMc/Algorithms_Example,xiroV/Algorithms_Example,maazsq/Algorithms_Example,AtoMc/Algorithms_Example,Thuva4/Algorithms_Example,pranjalrai/Algorithms_Example,Thuva4/Algorithms_Example,xiroV/Algorithms_Example,AtoMc/Algorithms_Example,Astrophilic/Algorithms_Example,AtoMc/Algorithms_Example,alok760/Algorithms_Example,Thuva4/Algorithms_Example,pranjalrai/Algorithms_Example,alok760/Algorithms_Example,Anat-Port/Algorithms_Example,alok760/Algorithms_Example,churrizo/Algorithms_Example,maazsq/Algorithms_Example,churrizo/Algorithms_Example,Anat-Port/Algorithms_Example,Astrophilic/Algorithms_Example,pranjalrai/Algorithms_Example,churrizo/Algorithms_Example,pranjalrai/Algorithms_Example,xiroV/Algorithms_Example,alok760/Algorithms_Example,churrizo/Algorithms_Example,AtoMc/Algorithms_Example,maazsq/Algorithms_Example,Anat-Port/Algorithms_Example,alok760/Algorithms_Example,Astrophilic/Algorithms_Example,Astrophilic/Algorithms_Example,churrizo/Algorithms_Example,Astrophilic/Algorithms_Example,churrizo/Algorithms_Example,xiroV/Algorithms_Example,churrizo/Algorithms_Example,Thuva4/Algorithms_Example,xiroV/Algorithms_Example,maazsq/Algorithms_Example,Anat-Port/Algorithms_Example,pranjalrai/Algorithms_Example,Astrophilic/Algorithms_Example,Astrophilic/Algorithms_Example,churrizo/Algorithms_Example,Astrophilic/Algorithms_Example
|
Add Hamming Distance in Python
|
def HammingDistance(s1, s2):
if len(s1) != len(s2):
raise ValueError("ERROR: Strings must have the same length")
return sum(c1 != c2 for c1, c2 in zip(s1, s2))
|
<commit_before><commit_msg>Add Hamming Distance in Python<commit_after>
|
def HammingDistance(s1, s2):
if len(s1) != len(s2):
raise ValueError("ERROR: Strings must have the same length")
return sum(c1 != c2 for c1, c2 in zip(s1, s2))
|
Add Hamming Distance in Pythondef HammingDistance(s1, s2):
if len(s1) != len(s2):
raise ValueError("ERROR: Strings must have the same length")
return sum(c1 != c2 for c1, c2 in zip(s1, s2))
|
<commit_before><commit_msg>Add Hamming Distance in Python<commit_after>def HammingDistance(s1, s2):
if len(s1) != len(s2):
raise ValueError("ERROR: Strings must have the same length")
return sum(c1 != c2 for c1, c2 in zip(s1, s2))
|
|
cb62cea248d217504947ff76bdd0af9306f508ce
|
temperature_measurement_to_db.py
|
temperature_measurement_to_db.py
|
import os
import glob
import time
import datetime
import MySQLdb as mdb
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(10)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
### temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c ###, temp_f
while True:
try:
time.sleep(5*60)
pi_temp = read_temp()
dtime = datetime.datetime.now()
### ans_time = time.mktime(dtime.timetuple())
print "datetime : %s temp : %s" % (dtime, pi_temp)
con = mdb.connect('localhost', 'logger', 'password', 'temperatures');
cur = con.cursor()
cur.execute("""INSERT INTO temperaturedata(dateandtime, temperature) VALUES(%s, %s)""", (dtime, pi_temp))
con.commit()
except mdb.Error, e:
con.rollback()
print "Error %d: %s" % (e.args[0],e.args[1])
sys.exit(1)
finally:
if con:
con.close()
time.sleep(10)
|
Add python script to project
|
Add python script to project
Temperature measurement using raspberry pi. Temperatures are save to MySql database.
|
Python
|
mit
|
tnisula/IoT_Kurssi,tnisula/IoT_Kurssi,tnisula/IoT_Kurssi,tnisula/IoT_Kurssi
|
Add python script to project
Temperature measurement using raspberry pi. Temperatures are save to MySql database.
|
import os
import glob
import time
import datetime
import MySQLdb as mdb
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(10)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
### temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c ###, temp_f
while True:
try:
time.sleep(5*60)
pi_temp = read_temp()
dtime = datetime.datetime.now()
### ans_time = time.mktime(dtime.timetuple())
print "datetime : %s temp : %s" % (dtime, pi_temp)
con = mdb.connect('localhost', 'logger', 'password', 'temperatures');
cur = con.cursor()
cur.execute("""INSERT INTO temperaturedata(dateandtime, temperature) VALUES(%s, %s)""", (dtime, pi_temp))
con.commit()
except mdb.Error, e:
con.rollback()
print "Error %d: %s" % (e.args[0],e.args[1])
sys.exit(1)
finally:
if con:
con.close()
time.sleep(10)
|
<commit_before><commit_msg>Add python script to project
Temperature measurement using raspberry pi. Temperatures are save to MySql database.<commit_after>
|
import os
import glob
import time
import datetime
import MySQLdb as mdb
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(10)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
### temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c ###, temp_f
while True:
try:
time.sleep(5*60)
pi_temp = read_temp()
dtime = datetime.datetime.now()
### ans_time = time.mktime(dtime.timetuple())
print "datetime : %s temp : %s" % (dtime, pi_temp)
con = mdb.connect('localhost', 'logger', 'password', 'temperatures');
cur = con.cursor()
cur.execute("""INSERT INTO temperaturedata(dateandtime, temperature) VALUES(%s, %s)""", (dtime, pi_temp))
con.commit()
except mdb.Error, e:
con.rollback()
print "Error %d: %s" % (e.args[0],e.args[1])
sys.exit(1)
finally:
if con:
con.close()
time.sleep(10)
|
Add python script to project
Temperature measurement using raspberry pi. Temperatures are save to MySql database.import os
import glob
import time
import datetime
import MySQLdb as mdb
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(10)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
### temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c ###, temp_f
while True:
try:
time.sleep(5*60)
pi_temp = read_temp()
dtime = datetime.datetime.now()
### ans_time = time.mktime(dtime.timetuple())
print "datetime : %s temp : %s" % (dtime, pi_temp)
con = mdb.connect('localhost', 'logger', 'password', 'temperatures');
cur = con.cursor()
cur.execute("""INSERT INTO temperaturedata(dateandtime, temperature) VALUES(%s, %s)""", (dtime, pi_temp))
con.commit()
except mdb.Error, e:
con.rollback()
print "Error %d: %s" % (e.args[0],e.args[1])
sys.exit(1)
finally:
if con:
con.close()
time.sleep(10)
|
<commit_before><commit_msg>Add python script to project
Temperature measurement using raspberry pi. Temperatures are save to MySql database.<commit_after>import os
import glob
import time
import datetime
import MySQLdb as mdb
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(10)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
### temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c ###, temp_f
while True:
try:
time.sleep(5*60)
pi_temp = read_temp()
dtime = datetime.datetime.now()
### ans_time = time.mktime(dtime.timetuple())
print "datetime : %s temp : %s" % (dtime, pi_temp)
con = mdb.connect('localhost', 'logger', 'password', 'temperatures');
cur = con.cursor()
cur.execute("""INSERT INTO temperaturedata(dateandtime, temperature) VALUES(%s, %s)""", (dtime, pi_temp))
con.commit()
except mdb.Error, e:
con.rollback()
print "Error %d: %s" % (e.args[0],e.args[1])
sys.exit(1)
finally:
if con:
con.close()
time.sleep(10)
|
|
e71f3ab92ffe0e6a32833855347f1ada9e773ede
|
salt/modules/pkg.py
|
salt/modules/pkg.py
|
'''
Top level package command wrapper, used to translate the os detected by facter
to the correct package manager
'''
import salt.modules.pacman
import salt.modules.yum
#import salt.modules.apt
factmap = {
'Archlinux': 'pacman',
'Fedora': 'yum',
'RedHat': 'yum',
#'Debian': 'apt',
#'Ubuntu': 'apt',
}
def _map_cmd(cmd, args=[]):
'''
Map the passed data to the correct function
'''
if args:
args = [args]
pro = factmap[__facter__['operatingsystem']]
return getattr('salt.modules.' + pro + '.' + cmd)(*args)
def list_pkgs():
'''
List installed packages
CLI Example:
salt '*' pkg.list_pkgs
'''
return _map_cmd('list_pkgs')
def refresh_db():
'''
Refresh the package database
CLI Example:
salt '*' pkg.refresh_db
'''
return _map_cmd('refresh_db')
def install(pkg_name):
'''
Install the desired package
CLI Example:
salt '*' pkg.install <package name>
'''
return _map_cmd('install', pkg_name)
def upgrade():
'''
Upgrade the entire system
CLI Example:
salt '*' pkg.upgrade
'''
return _map_cmd('upgrade')
def remove(pkg_name):
'''
Remove the desired package
CLI Example:
salt '*' pkg.remove <package name>
'''
return _map_cmd('remove', pkg_name)
def purge(pkg_name):
'''
Purge the desired package
CLI Example:
salt '*' pkg.purge <package name>
'''
return _map_cmd('purge', pkg_name)
|
Set up module to map to the correct package manager for the OS
|
Set up module to map to the correct package manager for the OS
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Set up module to map to the correct package manager for the OS
|
'''
Top level package command wrapper, used to translate the os detected by facter
to the correct package manager
'''
import salt.modules.pacman
import salt.modules.yum
#import salt.modules.apt
factmap = {
'Archlinux': 'pacman',
'Fedora': 'yum',
'RedHat': 'yum',
#'Debian': 'apt',
#'Ubuntu': 'apt',
}
def _map_cmd(cmd, args=[]):
'''
Map the passed data to the correct function
'''
if args:
args = [args]
pro = factmap[__facter__['operatingsystem']]
return getattr('salt.modules.' + pro + '.' + cmd)(*args)
def list_pkgs():
'''
List installed packages
CLI Example:
salt '*' pkg.list_pkgs
'''
return _map_cmd('list_pkgs')
def refresh_db():
'''
Refresh the package database
CLI Example:
salt '*' pkg.refresh_db
'''
return _map_cmd('refresh_db')
def install(pkg_name):
'''
Install the desired package
CLI Example:
salt '*' pkg.install <package name>
'''
return _map_cmd('install', pkg_name)
def upgrade():
'''
Upgrade the entire system
CLI Example:
salt '*' pkg.upgrade
'''
return _map_cmd('upgrade')
def remove(pkg_name):
'''
Remove the desired package
CLI Example:
salt '*' pkg.remove <package name>
'''
return _map_cmd('remove', pkg_name)
def purge(pkg_name):
'''
Purge the desired package
CLI Example:
salt '*' pkg.purge <package name>
'''
return _map_cmd('purge', pkg_name)
|
<commit_before><commit_msg>Set up module to map to the correct package manager for the OS<commit_after>
|
'''
Top level package command wrapper, used to translate the os detected by facter
to the correct package manager
'''
import salt.modules.pacman
import salt.modules.yum
#import salt.modules.apt
factmap = {
'Archlinux': 'pacman',
'Fedora': 'yum',
'RedHat': 'yum',
#'Debian': 'apt',
#'Ubuntu': 'apt',
}
def _map_cmd(cmd, args=[]):
'''
Map the passed data to the correct function
'''
if args:
args = [args]
pro = factmap[__facter__['operatingsystem']]
return getattr('salt.modules.' + pro + '.' + cmd)(*args)
def list_pkgs():
'''
List installed packages
CLI Example:
salt '*' pkg.list_pkgs
'''
return _map_cmd('list_pkgs')
def refresh_db():
'''
Refresh the package database
CLI Example:
salt '*' pkg.refresh_db
'''
return _map_cmd('refresh_db')
def install(pkg_name):
'''
Install the desired package
CLI Example:
salt '*' pkg.install <package name>
'''
return _map_cmd('install', pkg_name)
def upgrade():
'''
Upgrade the entire system
CLI Example:
salt '*' pkg.upgrade
'''
return _map_cmd('upgrade')
def remove(pkg_name):
'''
Remove the desired package
CLI Example:
salt '*' pkg.remove <package name>
'''
return _map_cmd('remove', pkg_name)
def purge(pkg_name):
'''
Purge the desired package
CLI Example:
salt '*' pkg.purge <package name>
'''
return _map_cmd('purge', pkg_name)
|
Set up module to map to the correct package manager for the OS'''
Top level package command wrapper, used to translate the os detected by facter
to the correct package manager
'''
import salt.modules.pacman
import salt.modules.yum
#import salt.modules.apt
factmap = {
'Archlinux': 'pacman',
'Fedora': 'yum',
'RedHat': 'yum',
#'Debian': 'apt',
#'Ubuntu': 'apt',
}
def _map_cmd(cmd, args=[]):
'''
Map the passed data to the correct function
'''
if args:
args = [args]
pro = factmap[__facter__['operatingsystem']]
return getattr('salt.modules.' + pro + '.' + cmd)(*args)
def list_pkgs():
'''
List installed packages
CLI Example:
salt '*' pkg.list_pkgs
'''
return _map_cmd('list_pkgs')
def refresh_db():
'''
Refresh the package database
CLI Example:
salt '*' pkg.refresh_db
'''
return _map_cmd('refresh_db')
def install(pkg_name):
'''
Install the desired package
CLI Example:
salt '*' pkg.install <package name>
'''
return _map_cmd('install', pkg_name)
def upgrade():
'''
Upgrade the entire system
CLI Example:
salt '*' pkg.upgrade
'''
return _map_cmd('upgrade')
def remove(pkg_name):
'''
Remove the desired package
CLI Example:
salt '*' pkg.remove <package name>
'''
return _map_cmd('remove', pkg_name)
def purge(pkg_name):
'''
Purge the desired package
CLI Example:
salt '*' pkg.purge <package name>
'''
return _map_cmd('purge', pkg_name)
|
<commit_before><commit_msg>Set up module to map to the correct package manager for the OS<commit_after>'''
Top level package command wrapper, used to translate the os detected by facter
to the correct package manager
'''
import salt.modules.pacman
import salt.modules.yum
#import salt.modules.apt
factmap = {
'Archlinux': 'pacman',
'Fedora': 'yum',
'RedHat': 'yum',
#'Debian': 'apt',
#'Ubuntu': 'apt',
}
def _map_cmd(cmd, args=[]):
'''
Map the passed data to the correct function
'''
if args:
args = [args]
pro = factmap[__facter__['operatingsystem']]
return getattr('salt.modules.' + pro + '.' + cmd)(*args)
def list_pkgs():
'''
List installed packages
CLI Example:
salt '*' pkg.list_pkgs
'''
return _map_cmd('list_pkgs')
def refresh_db():
'''
Refresh the package database
CLI Example:
salt '*' pkg.refresh_db
'''
return _map_cmd('refresh_db')
def install(pkg_name):
'''
Install the desired package
CLI Example:
salt '*' pkg.install <package name>
'''
return _map_cmd('install', pkg_name)
def upgrade():
'''
Upgrade the entire system
CLI Example:
salt '*' pkg.upgrade
'''
return _map_cmd('upgrade')
def remove(pkg_name):
'''
Remove the desired package
CLI Example:
salt '*' pkg.remove <package name>
'''
return _map_cmd('remove', pkg_name)
def purge(pkg_name):
'''
Purge the desired package
CLI Example:
salt '*' pkg.purge <package name>
'''
return _map_cmd('purge', pkg_name)
|
|
2d7e9a353cb19e49e072615c09ca9c590eabace2
|
astrobin/management/commands/message_all.py
|
astrobin/management/commands/message_all.py
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
import persistent_messages
class Command(BaseCommand):
help = "Sends a message to all users."
def handle(self, *args, **options):
if len(args) < 2:
print "Need two arbuments: subject and body."
subject = args[0]
body = args[1]
sender = User.objects.get(username = 'astrobin')
for recipient in User.objects.all():
if recipient.username != 'astrobin':
persistent_messages.add_message_without_storage(
recipient,
sender,
persistent_messages.SUCCESS,
body,
subject = subject)
|
Add command to message everybody.
|
Add command to message everybody.
|
Python
|
agpl-3.0
|
astrobin/astrobin,astrobin/astrobin,astrobin/astrobin,astrobin/astrobin
|
Add command to message everybody.
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
import persistent_messages
class Command(BaseCommand):
help = "Sends a message to all users."
def handle(self, *args, **options):
if len(args) < 2:
print "Need two arbuments: subject and body."
subject = args[0]
body = args[1]
sender = User.objects.get(username = 'astrobin')
for recipient in User.objects.all():
if recipient.username != 'astrobin':
persistent_messages.add_message_without_storage(
recipient,
sender,
persistent_messages.SUCCESS,
body,
subject = subject)
|
<commit_before><commit_msg>Add command to message everybody.<commit_after>
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
import persistent_messages
class Command(BaseCommand):
help = "Sends a message to all users."
def handle(self, *args, **options):
if len(args) < 2:
print "Need two arbuments: subject and body."
subject = args[0]
body = args[1]
sender = User.objects.get(username = 'astrobin')
for recipient in User.objects.all():
if recipient.username != 'astrobin':
persistent_messages.add_message_without_storage(
recipient,
sender,
persistent_messages.SUCCESS,
body,
subject = subject)
|
Add command to message everybody.from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
import persistent_messages
class Command(BaseCommand):
help = "Sends a message to all users."
def handle(self, *args, **options):
if len(args) < 2:
print "Need two arbuments: subject and body."
subject = args[0]
body = args[1]
sender = User.objects.get(username = 'astrobin')
for recipient in User.objects.all():
if recipient.username != 'astrobin':
persistent_messages.add_message_without_storage(
recipient,
sender,
persistent_messages.SUCCESS,
body,
subject = subject)
|
<commit_before><commit_msg>Add command to message everybody.<commit_after>from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
import persistent_messages
class Command(BaseCommand):
help = "Sends a message to all users."
def handle(self, *args, **options):
if len(args) < 2:
print "Need two arbuments: subject and body."
subject = args[0]
body = args[1]
sender = User.objects.get(username = 'astrobin')
for recipient in User.objects.all():
if recipient.username != 'astrobin':
persistent_messages.add_message_without_storage(
recipient,
sender,
persistent_messages.SUCCESS,
body,
subject = subject)
|
|
41e2cd0802a5fc8fb247a5063b08aaec5e701797
|
scripts/fix_data.py
|
scripts/fix_data.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All Rights Reserved.
import models
import uma
def CorrectPropertyName(bucket_id):
if bucket_id in uma.CSS_PROPERTY_BUCKETS:
return uma.CSS_PROPERTY_BUCKETS[bucket_id]
return None
def FetchAllPropertiesWithError(bucket_id=None):
q = models.StableInstance.all()
if bucket_id:
q.filter('bucket_id =', bucket_id)
q.filter('property_name =', 'ERROR')
props = q.fetch(None)
# Bucket 1 for CSS properties is total pages visited
props = [p for p in props if p.bucket_id > 1]
return props
if __name__ == '__main__':
props = FetchAllPropertiesWithError()
print 'Found', str(len(props)), 'properties tagged "ERROR"'
need_correcting = {}
for p in props:
correct_name = CorrectPropertyName(p.bucket_id)
if correct_name is not None:
need_correcting[p.bucket_id] = correct_name
for p in props:
if p.bucket_id in need_correcting:
new_name = need_correcting[p.bucket_id]
print p.bucket_id, p.property_name, '->', new_name
p.property_name = new_name
p.put()
print 'Done'
|
Add db data correcting script
|
Add db data correcting script
|
Python
|
apache-2.0
|
mdittmer/chromium-dashboard,modulexcite/chromium-dashboard,beaufortfrancois/chromium-dashboard,jeffposnick/chromium-dashboard,modulexcite/chromium-dashboard,beaufortfrancois/chromium-dashboard,GoogleChrome/chromium-dashboard,GoogleChrome/chromium-dashboard,jeffposnick/chromium-dashboard,jeffposnick/chromium-dashboard,GoogleChrome/chromium-dashboard,mdittmer/chromium-dashboard,beaufortfrancois/chromium-dashboard,beaufortfrancois/chromium-dashboard,GoogleChrome/chromium-dashboard,jeffposnick/chromium-dashboard,modulexcite/chromium-dashboard,mdittmer/chromium-dashboard,beaufortfrancois/chromium-dashboard,mdittmer/chromium-dashboard,modulexcite/chromium-dashboard,mdittmer/chromium-dashboard,modulexcite/chromium-dashboard,GoogleChrome/chromium-dashboard,jeffposnick/chromium-dashboard
|
Add db data correcting script
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All Rights Reserved.
import models
import uma
def CorrectPropertyName(bucket_id):
if bucket_id in uma.CSS_PROPERTY_BUCKETS:
return uma.CSS_PROPERTY_BUCKETS[bucket_id]
return None
def FetchAllPropertiesWithError(bucket_id=None):
q = models.StableInstance.all()
if bucket_id:
q.filter('bucket_id =', bucket_id)
q.filter('property_name =', 'ERROR')
props = q.fetch(None)
# Bucket 1 for CSS properties is total pages visited
props = [p for p in props if p.bucket_id > 1]
return props
if __name__ == '__main__':
props = FetchAllPropertiesWithError()
print 'Found', str(len(props)), 'properties tagged "ERROR"'
need_correcting = {}
for p in props:
correct_name = CorrectPropertyName(p.bucket_id)
if correct_name is not None:
need_correcting[p.bucket_id] = correct_name
for p in props:
if p.bucket_id in need_correcting:
new_name = need_correcting[p.bucket_id]
print p.bucket_id, p.property_name, '->', new_name
p.property_name = new_name
p.put()
print 'Done'
|
<commit_before><commit_msg>Add db data correcting script<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All Rights Reserved.
import models
import uma
def CorrectPropertyName(bucket_id):
if bucket_id in uma.CSS_PROPERTY_BUCKETS:
return uma.CSS_PROPERTY_BUCKETS[bucket_id]
return None
def FetchAllPropertiesWithError(bucket_id=None):
q = models.StableInstance.all()
if bucket_id:
q.filter('bucket_id =', bucket_id)
q.filter('property_name =', 'ERROR')
props = q.fetch(None)
# Bucket 1 for CSS properties is total pages visited
props = [p for p in props if p.bucket_id > 1]
return props
if __name__ == '__main__':
props = FetchAllPropertiesWithError()
print 'Found', str(len(props)), 'properties tagged "ERROR"'
need_correcting = {}
for p in props:
correct_name = CorrectPropertyName(p.bucket_id)
if correct_name is not None:
need_correcting[p.bucket_id] = correct_name
for p in props:
if p.bucket_id in need_correcting:
new_name = need_correcting[p.bucket_id]
print p.bucket_id, p.property_name, '->', new_name
p.property_name = new_name
p.put()
print 'Done'
|
Add db data correcting script#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All Rights Reserved.
import models
import uma
def CorrectPropertyName(bucket_id):
if bucket_id in uma.CSS_PROPERTY_BUCKETS:
return uma.CSS_PROPERTY_BUCKETS[bucket_id]
return None
def FetchAllPropertiesWithError(bucket_id=None):
q = models.StableInstance.all()
if bucket_id:
q.filter('bucket_id =', bucket_id)
q.filter('property_name =', 'ERROR')
props = q.fetch(None)
# Bucket 1 for CSS properties is total pages visited
props = [p for p in props if p.bucket_id > 1]
return props
if __name__ == '__main__':
props = FetchAllPropertiesWithError()
print 'Found', str(len(props)), 'properties tagged "ERROR"'
need_correcting = {}
for p in props:
correct_name = CorrectPropertyName(p.bucket_id)
if correct_name is not None:
need_correcting[p.bucket_id] = correct_name
for p in props:
if p.bucket_id in need_correcting:
new_name = need_correcting[p.bucket_id]
print p.bucket_id, p.property_name, '->', new_name
p.property_name = new_name
p.put()
print 'Done'
|
<commit_before><commit_msg>Add db data correcting script<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All Rights Reserved.
import models
import uma
def CorrectPropertyName(bucket_id):
if bucket_id in uma.CSS_PROPERTY_BUCKETS:
return uma.CSS_PROPERTY_BUCKETS[bucket_id]
return None
def FetchAllPropertiesWithError(bucket_id=None):
q = models.StableInstance.all()
if bucket_id:
q.filter('bucket_id =', bucket_id)
q.filter('property_name =', 'ERROR')
props = q.fetch(None)
# Bucket 1 for CSS properties is total pages visited
props = [p for p in props if p.bucket_id > 1]
return props
if __name__ == '__main__':
props = FetchAllPropertiesWithError()
print 'Found', str(len(props)), 'properties tagged "ERROR"'
need_correcting = {}
for p in props:
correct_name = CorrectPropertyName(p.bucket_id)
if correct_name is not None:
need_correcting[p.bucket_id] = correct_name
for p in props:
if p.bucket_id in need_correcting:
new_name = need_correcting[p.bucket_id]
print p.bucket_id, p.property_name, '->', new_name
p.property_name = new_name
p.put()
print 'Done'
|
|
3e4305259b9f7b0842bac99c4d3a70014e01ac15
|
tests_asyncio/test_dataloader.py
|
tests_asyncio/test_dataloader.py
|
from collections import namedtuple
from unittest.mock import Mock
from pytest import mark
from aiodataloader import DataLoader
from graphene import ObjectType, String, Schema, Field, List
CHARACTERS = {
"1": {"name": "Luke Skywalker", "sibling": "3"},
"2": {"name": "Darth Vader", "sibling": None},
"3": {"name": "Leia Organa", "sibling": "1"},
}
get_character = Mock(side_effect=lambda character_id: CHARACTERS[character_id])
class CharacterType(ObjectType):
name = String()
sibling = Field(lambda: CharacterType)
async def resolve_sibling(character, info):
if character["sibling"]:
return await info.context.character_loader.load(character["sibling"])
return None
class Query(ObjectType):
skywalker_family = List(CharacterType)
async def resolve_skywalker_family(_, info):
return await info.context.character_loader.load_many(["1", "2", "3"])
mock_batch_load_fn = Mock(
side_effect=lambda character_ids: [get_character(id) for id in character_ids]
)
class CharacterLoader(DataLoader):
async def batch_load_fn(self, character_ids):
return mock_batch_load_fn(character_ids)
Context = namedtuple("Context", "character_loader")
@mark.asyncio
async def test_basic_dataloader():
schema = Schema(query=Query)
character_loader = CharacterLoader()
context = Context(character_loader=character_loader)
query = """
{
skywalkerFamily {
name
sibling {
name
}
}
}
"""
result = await schema.execute_async(query, context=context)
assert not result.errors
assert result.data == {
"skywalkerFamily": [
{"name": "Luke Skywalker", "sibling": {"name": "Leia Organa"}},
{"name": "Darth Vader", "sibling": None},
{"name": "Leia Organa", "sibling": {"name": "Luke Skywalker"}},
]
}
assert mock_batch_load_fn.call_count == 1
assert get_character.call_count == 3
|
Add basic test for aiodataloader
|
Add basic test for aiodataloader
|
Python
|
mit
|
graphql-python/graphene,graphql-python/graphene
|
Add basic test for aiodataloader
|
from collections import namedtuple
from unittest.mock import Mock
from pytest import mark
from aiodataloader import DataLoader
from graphene import ObjectType, String, Schema, Field, List
CHARACTERS = {
"1": {"name": "Luke Skywalker", "sibling": "3"},
"2": {"name": "Darth Vader", "sibling": None},
"3": {"name": "Leia Organa", "sibling": "1"},
}
get_character = Mock(side_effect=lambda character_id: CHARACTERS[character_id])
class CharacterType(ObjectType):
name = String()
sibling = Field(lambda: CharacterType)
async def resolve_sibling(character, info):
if character["sibling"]:
return await info.context.character_loader.load(character["sibling"])
return None
class Query(ObjectType):
skywalker_family = List(CharacterType)
async def resolve_skywalker_family(_, info):
return await info.context.character_loader.load_many(["1", "2", "3"])
mock_batch_load_fn = Mock(
side_effect=lambda character_ids: [get_character(id) for id in character_ids]
)
class CharacterLoader(DataLoader):
async def batch_load_fn(self, character_ids):
return mock_batch_load_fn(character_ids)
Context = namedtuple("Context", "character_loader")
@mark.asyncio
async def test_basic_dataloader():
schema = Schema(query=Query)
character_loader = CharacterLoader()
context = Context(character_loader=character_loader)
query = """
{
skywalkerFamily {
name
sibling {
name
}
}
}
"""
result = await schema.execute_async(query, context=context)
assert not result.errors
assert result.data == {
"skywalkerFamily": [
{"name": "Luke Skywalker", "sibling": {"name": "Leia Organa"}},
{"name": "Darth Vader", "sibling": None},
{"name": "Leia Organa", "sibling": {"name": "Luke Skywalker"}},
]
}
assert mock_batch_load_fn.call_count == 1
assert get_character.call_count == 3
|
<commit_before><commit_msg>Add basic test for aiodataloader<commit_after>
|
from collections import namedtuple
from unittest.mock import Mock
from pytest import mark
from aiodataloader import DataLoader
from graphene import ObjectType, String, Schema, Field, List
CHARACTERS = {
"1": {"name": "Luke Skywalker", "sibling": "3"},
"2": {"name": "Darth Vader", "sibling": None},
"3": {"name": "Leia Organa", "sibling": "1"},
}
get_character = Mock(side_effect=lambda character_id: CHARACTERS[character_id])
class CharacterType(ObjectType):
name = String()
sibling = Field(lambda: CharacterType)
async def resolve_sibling(character, info):
if character["sibling"]:
return await info.context.character_loader.load(character["sibling"])
return None
class Query(ObjectType):
skywalker_family = List(CharacterType)
async def resolve_skywalker_family(_, info):
return await info.context.character_loader.load_many(["1", "2", "3"])
mock_batch_load_fn = Mock(
side_effect=lambda character_ids: [get_character(id) for id in character_ids]
)
class CharacterLoader(DataLoader):
async def batch_load_fn(self, character_ids):
return mock_batch_load_fn(character_ids)
Context = namedtuple("Context", "character_loader")
@mark.asyncio
async def test_basic_dataloader():
schema = Schema(query=Query)
character_loader = CharacterLoader()
context = Context(character_loader=character_loader)
query = """
{
skywalkerFamily {
name
sibling {
name
}
}
}
"""
result = await schema.execute_async(query, context=context)
assert not result.errors
assert result.data == {
"skywalkerFamily": [
{"name": "Luke Skywalker", "sibling": {"name": "Leia Organa"}},
{"name": "Darth Vader", "sibling": None},
{"name": "Leia Organa", "sibling": {"name": "Luke Skywalker"}},
]
}
assert mock_batch_load_fn.call_count == 1
assert get_character.call_count == 3
|
Add basic test for aiodataloaderfrom collections import namedtuple
from unittest.mock import Mock
from pytest import mark
from aiodataloader import DataLoader
from graphene import ObjectType, String, Schema, Field, List
CHARACTERS = {
"1": {"name": "Luke Skywalker", "sibling": "3"},
"2": {"name": "Darth Vader", "sibling": None},
"3": {"name": "Leia Organa", "sibling": "1"},
}
get_character = Mock(side_effect=lambda character_id: CHARACTERS[character_id])
class CharacterType(ObjectType):
name = String()
sibling = Field(lambda: CharacterType)
async def resolve_sibling(character, info):
if character["sibling"]:
return await info.context.character_loader.load(character["sibling"])
return None
class Query(ObjectType):
skywalker_family = List(CharacterType)
async def resolve_skywalker_family(_, info):
return await info.context.character_loader.load_many(["1", "2", "3"])
mock_batch_load_fn = Mock(
side_effect=lambda character_ids: [get_character(id) for id in character_ids]
)
class CharacterLoader(DataLoader):
async def batch_load_fn(self, character_ids):
return mock_batch_load_fn(character_ids)
Context = namedtuple("Context", "character_loader")
@mark.asyncio
async def test_basic_dataloader():
schema = Schema(query=Query)
character_loader = CharacterLoader()
context = Context(character_loader=character_loader)
query = """
{
skywalkerFamily {
name
sibling {
name
}
}
}
"""
result = await schema.execute_async(query, context=context)
assert not result.errors
assert result.data == {
"skywalkerFamily": [
{"name": "Luke Skywalker", "sibling": {"name": "Leia Organa"}},
{"name": "Darth Vader", "sibling": None},
{"name": "Leia Organa", "sibling": {"name": "Luke Skywalker"}},
]
}
assert mock_batch_load_fn.call_count == 1
assert get_character.call_count == 3
|
<commit_before><commit_msg>Add basic test for aiodataloader<commit_after>from collections import namedtuple
from unittest.mock import Mock
from pytest import mark
from aiodataloader import DataLoader
from graphene import ObjectType, String, Schema, Field, List
CHARACTERS = {
"1": {"name": "Luke Skywalker", "sibling": "3"},
"2": {"name": "Darth Vader", "sibling": None},
"3": {"name": "Leia Organa", "sibling": "1"},
}
get_character = Mock(side_effect=lambda character_id: CHARACTERS[character_id])
class CharacterType(ObjectType):
name = String()
sibling = Field(lambda: CharacterType)
async def resolve_sibling(character, info):
if character["sibling"]:
return await info.context.character_loader.load(character["sibling"])
return None
class Query(ObjectType):
skywalker_family = List(CharacterType)
async def resolve_skywalker_family(_, info):
return await info.context.character_loader.load_many(["1", "2", "3"])
mock_batch_load_fn = Mock(
side_effect=lambda character_ids: [get_character(id) for id in character_ids]
)
class CharacterLoader(DataLoader):
async def batch_load_fn(self, character_ids):
return mock_batch_load_fn(character_ids)
Context = namedtuple("Context", "character_loader")
@mark.asyncio
async def test_basic_dataloader():
schema = Schema(query=Query)
character_loader = CharacterLoader()
context = Context(character_loader=character_loader)
query = """
{
skywalkerFamily {
name
sibling {
name
}
}
}
"""
result = await schema.execute_async(query, context=context)
assert not result.errors
assert result.data == {
"skywalkerFamily": [
{"name": "Luke Skywalker", "sibling": {"name": "Leia Organa"}},
{"name": "Darth Vader", "sibling": None},
{"name": "Leia Organa", "sibling": {"name": "Luke Skywalker"}},
]
}
assert mock_batch_load_fn.call_count == 1
assert get_character.call_count == 3
|
|
edf2ebfec6b6a5ddde418590ad8d775a393675bc
|
tests/test_manifold_elliptope.py
|
tests/test_manifold_elliptope.py
|
import unittest
import numpy as np
import numpy.linalg as la
import numpy.random as rnd
import numpy.testing as np_testing
from pymanopt.manifolds import Elliptope
class TestElliptopeManifold(unittest.TestCase):
def setUp(self):
self.n = n = 50
self.k = k = 10
self.man = Elliptope(n, k)
# def test_dim(self):
# def test_typicaldist(self):
# def test_dist(self):
# def test_inner(self):
# def test_proj(self):
# def test_ehess2rhess(self):
# def test_retr(self):
# def test_egrad2rgrad(self):
# def test_norm(self):
# def test_rand(self):
# def test_randvec(self):
# def test_transp(self):
# def test_exp_log_inverse(self):
# s = self.man
# X = s.rand()
# U = s.randvec(X)
# Uexplog = s.exp(X, s.log(X, U))
# np_testing.assert_array_almost_equal(U, Uexplog)
# def test_log_exp_inverse(self):
# s = self.man
# X = s.rand()
# U = s.randvec(X)
# Ulogexp = s.log(X, s.exp(X, U))
# np_testing.assert_array_almost_equal(U, Ulogexp)
# def test_pairmean(self):
# s = self.man
# X = s.rand()
# Y = s.rand()
# Z = s.pairmean(X, Y)
# np_testing.assert_array_almost_equal(s.dist(X, Z), s.dist(Y, Z))
|
Add elliptope manifold test skeleton.
|
Add elliptope manifold test skeleton.
|
Python
|
bsd-3-clause
|
nkoep/pymanopt,tingelst/pymanopt,nkoep/pymanopt,pymanopt/pymanopt,nkoep/pymanopt,pymanopt/pymanopt,j-towns/pymanopt
|
Add elliptope manifold test skeleton.
|
import unittest
import numpy as np
import numpy.linalg as la
import numpy.random as rnd
import numpy.testing as np_testing
from pymanopt.manifolds import Elliptope
class TestElliptopeManifold(unittest.TestCase):
def setUp(self):
self.n = n = 50
self.k = k = 10
self.man = Elliptope(n, k)
# def test_dim(self):
# def test_typicaldist(self):
# def test_dist(self):
# def test_inner(self):
# def test_proj(self):
# def test_ehess2rhess(self):
# def test_retr(self):
# def test_egrad2rgrad(self):
# def test_norm(self):
# def test_rand(self):
# def test_randvec(self):
# def test_transp(self):
# def test_exp_log_inverse(self):
# s = self.man
# X = s.rand()
# U = s.randvec(X)
# Uexplog = s.exp(X, s.log(X, U))
# np_testing.assert_array_almost_equal(U, Uexplog)
# def test_log_exp_inverse(self):
# s = self.man
# X = s.rand()
# U = s.randvec(X)
# Ulogexp = s.log(X, s.exp(X, U))
# np_testing.assert_array_almost_equal(U, Ulogexp)
# def test_pairmean(self):
# s = self.man
# X = s.rand()
# Y = s.rand()
# Z = s.pairmean(X, Y)
# np_testing.assert_array_almost_equal(s.dist(X, Z), s.dist(Y, Z))
|
<commit_before><commit_msg>Add elliptope manifold test skeleton.<commit_after>
|
import unittest
import numpy as np
import numpy.linalg as la
import numpy.random as rnd
import numpy.testing as np_testing
from pymanopt.manifolds import Elliptope
class TestElliptopeManifold(unittest.TestCase):
def setUp(self):
self.n = n = 50
self.k = k = 10
self.man = Elliptope(n, k)
# def test_dim(self):
# def test_typicaldist(self):
# def test_dist(self):
# def test_inner(self):
# def test_proj(self):
# def test_ehess2rhess(self):
# def test_retr(self):
# def test_egrad2rgrad(self):
# def test_norm(self):
# def test_rand(self):
# def test_randvec(self):
# def test_transp(self):
# def test_exp_log_inverse(self):
# s = self.man
# X = s.rand()
# U = s.randvec(X)
# Uexplog = s.exp(X, s.log(X, U))
# np_testing.assert_array_almost_equal(U, Uexplog)
# def test_log_exp_inverse(self):
# s = self.man
# X = s.rand()
# U = s.randvec(X)
# Ulogexp = s.log(X, s.exp(X, U))
# np_testing.assert_array_almost_equal(U, Ulogexp)
# def test_pairmean(self):
# s = self.man
# X = s.rand()
# Y = s.rand()
# Z = s.pairmean(X, Y)
# np_testing.assert_array_almost_equal(s.dist(X, Z), s.dist(Y, Z))
|
Add elliptope manifold test skeleton.import unittest
import numpy as np
import numpy.linalg as la
import numpy.random as rnd
import numpy.testing as np_testing
from pymanopt.manifolds import Elliptope
class TestElliptopeManifold(unittest.TestCase):
def setUp(self):
self.n = n = 50
self.k = k = 10
self.man = Elliptope(n, k)
# def test_dim(self):
# def test_typicaldist(self):
# def test_dist(self):
# def test_inner(self):
# def test_proj(self):
# def test_ehess2rhess(self):
# def test_retr(self):
# def test_egrad2rgrad(self):
# def test_norm(self):
# def test_rand(self):
# def test_randvec(self):
# def test_transp(self):
# def test_exp_log_inverse(self):
# s = self.man
# X = s.rand()
# U = s.randvec(X)
# Uexplog = s.exp(X, s.log(X, U))
# np_testing.assert_array_almost_equal(U, Uexplog)
# def test_log_exp_inverse(self):
# s = self.man
# X = s.rand()
# U = s.randvec(X)
# Ulogexp = s.log(X, s.exp(X, U))
# np_testing.assert_array_almost_equal(U, Ulogexp)
# def test_pairmean(self):
# s = self.man
# X = s.rand()
# Y = s.rand()
# Z = s.pairmean(X, Y)
# np_testing.assert_array_almost_equal(s.dist(X, Z), s.dist(Y, Z))
|
<commit_before><commit_msg>Add elliptope manifold test skeleton.<commit_after>import unittest
import numpy as np
import numpy.linalg as la
import numpy.random as rnd
import numpy.testing as np_testing
from pymanopt.manifolds import Elliptope
class TestElliptopeManifold(unittest.TestCase):
def setUp(self):
self.n = n = 50
self.k = k = 10
self.man = Elliptope(n, k)
# def test_dim(self):
# def test_typicaldist(self):
# def test_dist(self):
# def test_inner(self):
# def test_proj(self):
# def test_ehess2rhess(self):
# def test_retr(self):
# def test_egrad2rgrad(self):
# def test_norm(self):
# def test_rand(self):
# def test_randvec(self):
# def test_transp(self):
# def test_exp_log_inverse(self):
# s = self.man
# X = s.rand()
# U = s.randvec(X)
# Uexplog = s.exp(X, s.log(X, U))
# np_testing.assert_array_almost_equal(U, Uexplog)
# def test_log_exp_inverse(self):
# s = self.man
# X = s.rand()
# U = s.randvec(X)
# Ulogexp = s.log(X, s.exp(X, U))
# np_testing.assert_array_almost_equal(U, Ulogexp)
# def test_pairmean(self):
# s = self.man
# X = s.rand()
# Y = s.rand()
# Z = s.pairmean(X, Y)
# np_testing.assert_array_almost_equal(s.dist(X, Z), s.dist(Y, Z))
|
|
837e35e1ecd8c4ab62a7011471d56c7b9283fb4d
|
media/mojo/scripts/run_mojo_media_renderer.py
|
media/mojo/scripts/run_mojo_media_renderer.py
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# The script follows mojo/services/html_viewer/view_url.py and modified it for
# test the mojo media renderer. The page will be rendered in a windowless mode.
#
# TODO(xhwang): Explore the possibility of running this with the Kiosk window
# manager.
import argparse
import os
import subprocess
import sys
root_path = os.path.realpath(
os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
os.pardir,
os.pardir,
os.pardir))
def _BuildShellCommand(args):
sdk_version = subprocess.check_output(["cat",
"third_party/mojo/src/mojo/public/VERSION"], cwd=root_path)
build_dir = os.path.join(root_path, args.build_dir)
shell_command = [os.path.join(build_dir, "mojo_shell")]
options = []
options.append(
"--origin=https://storage.googleapis.com/mojo/services/linux-x64/%s" %
sdk_version)
options.append("--url-mappings=mojo:html_viewer=file://%s/html_viewer.mojo,"
"mojo:media=file://%s/media.mojo" % (build_dir, build_dir))
args_for_html_viewer = "--enable-mojo-media-renderer "
if args.verbose:
args_for_html_viewer += \
"--vmodule=pipeline*=3,*renderer_impl*=3,*mojo_demuxer*=3"
options.append("--args-for=mojo:html_viewer %s" % args_for_html_viewer)
full_command = shell_command + options + [args.url]
if args.verbose:
print full_command
return full_command
def main():
parser = argparse.ArgumentParser(
description="View a URL with HTMLViewer with mojo media renderer. "
"You must have built //mojo/services/html_viewer, "
"//mojo/services/network and //media/mojo/services first. "
" Note that this will currently often fail spectacularly due "
" to lack of binary stability in Mojo.")
parser.add_argument(
"--build-dir",
help="Path to the dir containing the linux-x64 binaries relative to the "
"repo root (default: %(default)s)",
default="out/Release")
parser.add_argument("--verbose", help="Increase output verbosity.",
action="store_true")
parser.add_argument("url",
help="The URL to be viewed")
args = parser.parse_args()
return subprocess.call(_BuildShellCommand(args))
if __name__ == '__main__':
sys.exit(main())
|
Add script to run/test the mojo media renderer.
|
Add script to run/test the mojo media renderer.
The script follows mojo/services/html_viewer/view_url.py. See the script for
how to use it.
BUG=410451
TEST=Run the script to test.
Review URL: https://codereview.chromium.org/991963003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#319741}
|
Python
|
bsd-3-clause
|
chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,Chilledheart/chromium,ltilve/chromium,fujunwei/chromium-crosswalk,Chilledheart/chromium,ltilve/chromium,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ltilve/chromium,chuan9/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,Chilledheart/chromium,ltilve/chromium,Chilledheart/chromium,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ltilve/chromium,fujunwei/chromium-crosswalk,fujunwei/chromium-crosswalk,ltilve/chromium,chuan9/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,ltilve/chromium,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ltilve/chromium,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,Chilledheart/chromium,Just-D/chromium-1,Just-D/chromium-1,chuan9/chromium-crosswalk
|
Add script to run/test the mojo media renderer.
The script follows mojo/services/html_viewer/view_url.py. See the script for
how to use it.
BUG=410451
TEST=Run the script to test.
Review URL: https://codereview.chromium.org/991963003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#319741}
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# The script follows mojo/services/html_viewer/view_url.py and modified it for
# test the mojo media renderer. The page will be rendered in a windowless mode.
#
# TODO(xhwang): Explore the possibility of running this with the Kiosk window
# manager.
import argparse
import os
import subprocess
import sys
root_path = os.path.realpath(
os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
os.pardir,
os.pardir,
os.pardir))
def _BuildShellCommand(args):
sdk_version = subprocess.check_output(["cat",
"third_party/mojo/src/mojo/public/VERSION"], cwd=root_path)
build_dir = os.path.join(root_path, args.build_dir)
shell_command = [os.path.join(build_dir, "mojo_shell")]
options = []
options.append(
"--origin=https://storage.googleapis.com/mojo/services/linux-x64/%s" %
sdk_version)
options.append("--url-mappings=mojo:html_viewer=file://%s/html_viewer.mojo,"
"mojo:media=file://%s/media.mojo" % (build_dir, build_dir))
args_for_html_viewer = "--enable-mojo-media-renderer "
if args.verbose:
args_for_html_viewer += \
"--vmodule=pipeline*=3,*renderer_impl*=3,*mojo_demuxer*=3"
options.append("--args-for=mojo:html_viewer %s" % args_for_html_viewer)
full_command = shell_command + options + [args.url]
if args.verbose:
print full_command
return full_command
def main():
parser = argparse.ArgumentParser(
description="View a URL with HTMLViewer with mojo media renderer. "
"You must have built //mojo/services/html_viewer, "
"//mojo/services/network and //media/mojo/services first. "
" Note that this will currently often fail spectacularly due "
" to lack of binary stability in Mojo.")
parser.add_argument(
"--build-dir",
help="Path to the dir containing the linux-x64 binaries relative to the "
"repo root (default: %(default)s)",
default="out/Release")
parser.add_argument("--verbose", help="Increase output verbosity.",
action="store_true")
parser.add_argument("url",
help="The URL to be viewed")
args = parser.parse_args()
return subprocess.call(_BuildShellCommand(args))
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script to run/test the mojo media renderer.
The script follows mojo/services/html_viewer/view_url.py. See the script for
how to use it.
BUG=410451
TEST=Run the script to test.
Review URL: https://codereview.chromium.org/991963003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#319741}<commit_after>
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# The script follows mojo/services/html_viewer/view_url.py and modified it for
# test the mojo media renderer. The page will be rendered in a windowless mode.
#
# TODO(xhwang): Explore the possibility of running this with the Kiosk window
# manager.
import argparse
import os
import subprocess
import sys
root_path = os.path.realpath(
os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
os.pardir,
os.pardir,
os.pardir))
def _BuildShellCommand(args):
sdk_version = subprocess.check_output(["cat",
"third_party/mojo/src/mojo/public/VERSION"], cwd=root_path)
build_dir = os.path.join(root_path, args.build_dir)
shell_command = [os.path.join(build_dir, "mojo_shell")]
options = []
options.append(
"--origin=https://storage.googleapis.com/mojo/services/linux-x64/%s" %
sdk_version)
options.append("--url-mappings=mojo:html_viewer=file://%s/html_viewer.mojo,"
"mojo:media=file://%s/media.mojo" % (build_dir, build_dir))
args_for_html_viewer = "--enable-mojo-media-renderer "
if args.verbose:
args_for_html_viewer += \
"--vmodule=pipeline*=3,*renderer_impl*=3,*mojo_demuxer*=3"
options.append("--args-for=mojo:html_viewer %s" % args_for_html_viewer)
full_command = shell_command + options + [args.url]
if args.verbose:
print full_command
return full_command
def main():
parser = argparse.ArgumentParser(
description="View a URL with HTMLViewer with mojo media renderer. "
"You must have built //mojo/services/html_viewer, "
"//mojo/services/network and //media/mojo/services first. "
" Note that this will currently often fail spectacularly due "
" to lack of binary stability in Mojo.")
parser.add_argument(
"--build-dir",
help="Path to the dir containing the linux-x64 binaries relative to the "
"repo root (default: %(default)s)",
default="out/Release")
parser.add_argument("--verbose", help="Increase output verbosity.",
action="store_true")
parser.add_argument("url",
help="The URL to be viewed")
args = parser.parse_args()
return subprocess.call(_BuildShellCommand(args))
if __name__ == '__main__':
sys.exit(main())
|
Add script to run/test the mojo media renderer.
The script follows mojo/services/html_viewer/view_url.py. See the script for
how to use it.
BUG=410451
TEST=Run the script to test.
Review URL: https://codereview.chromium.org/991963003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#319741}#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# The script follows mojo/services/html_viewer/view_url.py and modified it for
# test the mojo media renderer. The page will be rendered in a windowless mode.
#
# TODO(xhwang): Explore the possibility of running this with the Kiosk window
# manager.
import argparse
import os
import subprocess
import sys
root_path = os.path.realpath(
os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
os.pardir,
os.pardir,
os.pardir))
def _BuildShellCommand(args):
sdk_version = subprocess.check_output(["cat",
"third_party/mojo/src/mojo/public/VERSION"], cwd=root_path)
build_dir = os.path.join(root_path, args.build_dir)
shell_command = [os.path.join(build_dir, "mojo_shell")]
options = []
options.append(
"--origin=https://storage.googleapis.com/mojo/services/linux-x64/%s" %
sdk_version)
options.append("--url-mappings=mojo:html_viewer=file://%s/html_viewer.mojo,"
"mojo:media=file://%s/media.mojo" % (build_dir, build_dir))
args_for_html_viewer = "--enable-mojo-media-renderer "
if args.verbose:
args_for_html_viewer += \
"--vmodule=pipeline*=3,*renderer_impl*=3,*mojo_demuxer*=3"
options.append("--args-for=mojo:html_viewer %s" % args_for_html_viewer)
full_command = shell_command + options + [args.url]
if args.verbose:
print full_command
return full_command
def main():
parser = argparse.ArgumentParser(
description="View a URL with HTMLViewer with mojo media renderer. "
"You must have built //mojo/services/html_viewer, "
"//mojo/services/network and //media/mojo/services first. "
" Note that this will currently often fail spectacularly due "
" to lack of binary stability in Mojo.")
parser.add_argument(
"--build-dir",
help="Path to the dir containing the linux-x64 binaries relative to the "
"repo root (default: %(default)s)",
default="out/Release")
parser.add_argument("--verbose", help="Increase output verbosity.",
action="store_true")
parser.add_argument("url",
help="The URL to be viewed")
args = parser.parse_args()
return subprocess.call(_BuildShellCommand(args))
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script to run/test the mojo media renderer.
The script follows mojo/services/html_viewer/view_url.py. See the script for
how to use it.
BUG=410451
TEST=Run the script to test.
Review URL: https://codereview.chromium.org/991963003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#319741}<commit_after>#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# The script follows mojo/services/html_viewer/view_url.py and modified it for
# test the mojo media renderer. The page will be rendered in a windowless mode.
#
# TODO(xhwang): Explore the possibility of running this with the Kiosk window
# manager.
import argparse
import os
import subprocess
import sys
root_path = os.path.realpath(
os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
os.pardir,
os.pardir,
os.pardir))
def _BuildShellCommand(args):
sdk_version = subprocess.check_output(["cat",
"third_party/mojo/src/mojo/public/VERSION"], cwd=root_path)
build_dir = os.path.join(root_path, args.build_dir)
shell_command = [os.path.join(build_dir, "mojo_shell")]
options = []
options.append(
"--origin=https://storage.googleapis.com/mojo/services/linux-x64/%s" %
sdk_version)
options.append("--url-mappings=mojo:html_viewer=file://%s/html_viewer.mojo,"
"mojo:media=file://%s/media.mojo" % (build_dir, build_dir))
args_for_html_viewer = "--enable-mojo-media-renderer "
if args.verbose:
args_for_html_viewer += \
"--vmodule=pipeline*=3,*renderer_impl*=3,*mojo_demuxer*=3"
options.append("--args-for=mojo:html_viewer %s" % args_for_html_viewer)
full_command = shell_command + options + [args.url]
if args.verbose:
print full_command
return full_command
def main():
parser = argparse.ArgumentParser(
description="View a URL with HTMLViewer with mojo media renderer. "
"You must have built //mojo/services/html_viewer, "
"//mojo/services/network and //media/mojo/services first. "
" Note that this will currently often fail spectacularly due "
" to lack of binary stability in Mojo.")
parser.add_argument(
"--build-dir",
help="Path to the dir containing the linux-x64 binaries relative to the "
"repo root (default: %(default)s)",
default="out/Release")
parser.add_argument("--verbose", help="Increase output verbosity.",
action="store_true")
parser.add_argument("url",
help="The URL to be viewed")
args = parser.parse_args()
return subprocess.call(_BuildShellCommand(args))
if __name__ == '__main__':
sys.exit(main())
|
|
fca7a5923a412745e428fc9435fb2d7c7d376d11
|
recipes/earthpy/run_test.py
|
recipes/earthpy/run_test.py
|
import earthpy.io as eio
import rasterio as rio
with rio.open(eio.path_to_example('rmnp-dem.tif')) as src:
dem = src.read()
|
Add script for a simple test
|
Add script for a simple test
|
Python
|
bsd-3-clause
|
dschreij/staged-recipes,chrisburr/staged-recipes,isuruf/staged-recipes,scopatz/staged-recipes,ocefpaf/staged-recipes,ReimarBauer/staged-recipes,birdsarah/staged-recipes,birdsarah/staged-recipes,Juanlu001/staged-recipes,mcs07/staged-recipes,igortg/staged-recipes,igortg/staged-recipes,johanneskoester/staged-recipes,kwilcox/staged-recipes,kwilcox/staged-recipes,chrisburr/staged-recipes,mariusvniekerk/staged-recipes,synapticarbors/staged-recipes,conda-forge/staged-recipes,petrushy/staged-recipes,ceholden/staged-recipes,jjhelmus/staged-recipes,asmeurer/staged-recipes,dschreij/staged-recipes,isuruf/staged-recipes,jochym/staged-recipes,conda-forge/staged-recipes,basnijholt/staged-recipes,scopatz/staged-recipes,ReimarBauer/staged-recipes,goanpeca/staged-recipes,asmeurer/staged-recipes,jochym/staged-recipes,basnijholt/staged-recipes,mcs07/staged-recipes,petrushy/staged-recipes,synapticarbors/staged-recipes,johanneskoester/staged-recipes,cpaulik/staged-recipes,hadim/staged-recipes,hadim/staged-recipes,Juanlu001/staged-recipes,cpaulik/staged-recipes,jakirkham/staged-recipes,ceholden/staged-recipes,SylvainCorlay/staged-recipes,SylvainCorlay/staged-recipes,jakirkham/staged-recipes,stuertz/staged-recipes,patricksnape/staged-recipes,mariusvniekerk/staged-recipes,goanpeca/staged-recipes,stuertz/staged-recipes,jjhelmus/staged-recipes,ocefpaf/staged-recipes,patricksnape/staged-recipes
|
Add script for a simple test
|
import earthpy.io as eio
import rasterio as rio
with rio.open(eio.path_to_example('rmnp-dem.tif')) as src:
dem = src.read()
|
<commit_before><commit_msg>Add script for a simple test<commit_after>
|
import earthpy.io as eio
import rasterio as rio
with rio.open(eio.path_to_example('rmnp-dem.tif')) as src:
dem = src.read()
|
Add script for a simple testimport earthpy.io as eio
import rasterio as rio
with rio.open(eio.path_to_example('rmnp-dem.tif')) as src:
dem = src.read()
|
<commit_before><commit_msg>Add script for a simple test<commit_after>import earthpy.io as eio
import rasterio as rio
with rio.open(eio.path_to_example('rmnp-dem.tif')) as src:
dem = src.read()
|
|
78137a7d276eed39b885754256c1601e5941f62c
|
convert_to_recovery_and_remove_useless.py
|
convert_to_recovery_and_remove_useless.py
|
import sys
import os
import pandas as pd
from utils import get_driving_log_dataframe
from utils import img_folder
from shutil import copyfile
directory = sys.argv[1]
output_directory = sys.argv[2]
driving_log_df = get_driving_log_dataframe(directory)
steering = driving_log_df['steering']
angle_offset = 0.25
# if image_column == 'left':
# delta_steering = -angle_offset
# elif image_column == 'right':
# delta_steering = angle_offset
# else:
# delta_steering = 0
def save_images(offset_sign, image_series, angles_file):
for i in range(len(driving_log_df)):
if pd.isnull(image_series[i]):
continue
delta_steering = offset_sign * angle_offset
image_name = image_series[i].lstrip().rstrip()
steering_angle = steering[i] + delta_steering
print('{0} -> {1}'.format(image_name, steering_angle))
src_path_to_image = '{0}/{1}'.format(directory, image_name)
dest_path_to_image = '{0}/{1}'.format(output_directory, image_name)
copyfile(src_path_to_image, dest_path_to_image)
angles_file.write('{0},{1}\n'.format(image_name, steering_angle))
def copy_if_has_column(column_name, steering_sign, angles_file):
print(driving_log_df.columns)
if column_name in driving_log_df.columns:
image_series = driving_log_df[column_name]
save_images(steering_sign, image_series, angles_file)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if not os.path.exists(img_folder(output_directory)):
os.makedirs(img_folder(output_directory))
with open('{0}/{1}'.format(output_directory, 'angles.csv'), 'a+') as angles_file:
copy_if_has_column('left', -1, angles_file)
copy_if_has_column('right', 1, angles_file)
copy_if_has_column('center', 0, angles_file)
|
Add useful script for standartizing data in beta simulator and the normal one
|
Add useful script for standartizing data in beta simulator and the normal one
|
Python
|
mit
|
hristo-vrigazov/behavioral-cloning,hristo-vrigazov/behavioral-cloning
|
Add useful script for standartizing data in beta simulator and the normal one
|
import sys
import os
import pandas as pd
from utils import get_driving_log_dataframe
from utils import img_folder
from shutil import copyfile
directory = sys.argv[1]
output_directory = sys.argv[2]
driving_log_df = get_driving_log_dataframe(directory)
steering = driving_log_df['steering']
angle_offset = 0.25
# if image_column == 'left':
# delta_steering = -angle_offset
# elif image_column == 'right':
# delta_steering = angle_offset
# else:
# delta_steering = 0
def save_images(offset_sign, image_series, angles_file):
for i in range(len(driving_log_df)):
if pd.isnull(image_series[i]):
continue
delta_steering = offset_sign * angle_offset
image_name = image_series[i].lstrip().rstrip()
steering_angle = steering[i] + delta_steering
print('{0} -> {1}'.format(image_name, steering_angle))
src_path_to_image = '{0}/{1}'.format(directory, image_name)
dest_path_to_image = '{0}/{1}'.format(output_directory, image_name)
copyfile(src_path_to_image, dest_path_to_image)
angles_file.write('{0},{1}\n'.format(image_name, steering_angle))
def copy_if_has_column(column_name, steering_sign, angles_file):
print(driving_log_df.columns)
if column_name in driving_log_df.columns:
image_series = driving_log_df[column_name]
save_images(steering_sign, image_series, angles_file)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if not os.path.exists(img_folder(output_directory)):
os.makedirs(img_folder(output_directory))
with open('{0}/{1}'.format(output_directory, 'angles.csv'), 'a+') as angles_file:
copy_if_has_column('left', -1, angles_file)
copy_if_has_column('right', 1, angles_file)
copy_if_has_column('center', 0, angles_file)
|
<commit_before><commit_msg>Add useful script for standartizing data in beta simulator and the normal one<commit_after>
|
import sys
import os
import pandas as pd
from utils import get_driving_log_dataframe
from utils import img_folder
from shutil import copyfile
directory = sys.argv[1]
output_directory = sys.argv[2]
driving_log_df = get_driving_log_dataframe(directory)
steering = driving_log_df['steering']
angle_offset = 0.25
# if image_column == 'left':
# delta_steering = -angle_offset
# elif image_column == 'right':
# delta_steering = angle_offset
# else:
# delta_steering = 0
def save_images(offset_sign, image_series, angles_file):
for i in range(len(driving_log_df)):
if pd.isnull(image_series[i]):
continue
delta_steering = offset_sign * angle_offset
image_name = image_series[i].lstrip().rstrip()
steering_angle = steering[i] + delta_steering
print('{0} -> {1}'.format(image_name, steering_angle))
src_path_to_image = '{0}/{1}'.format(directory, image_name)
dest_path_to_image = '{0}/{1}'.format(output_directory, image_name)
copyfile(src_path_to_image, dest_path_to_image)
angles_file.write('{0},{1}\n'.format(image_name, steering_angle))
def copy_if_has_column(column_name, steering_sign, angles_file):
print(driving_log_df.columns)
if column_name in driving_log_df.columns:
image_series = driving_log_df[column_name]
save_images(steering_sign, image_series, angles_file)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if not os.path.exists(img_folder(output_directory)):
os.makedirs(img_folder(output_directory))
with open('{0}/{1}'.format(output_directory, 'angles.csv'), 'a+') as angles_file:
copy_if_has_column('left', -1, angles_file)
copy_if_has_column('right', 1, angles_file)
copy_if_has_column('center', 0, angles_file)
|
Add useful script for standartizing data in beta simulator and the normal oneimport sys
import os
import pandas as pd
from utils import get_driving_log_dataframe
from utils import img_folder
from shutil import copyfile
directory = sys.argv[1]
output_directory = sys.argv[2]
driving_log_df = get_driving_log_dataframe(directory)
steering = driving_log_df['steering']
angle_offset = 0.25
# if image_column == 'left':
# delta_steering = -angle_offset
# elif image_column == 'right':
# delta_steering = angle_offset
# else:
# delta_steering = 0
def save_images(offset_sign, image_series, angles_file):
for i in range(len(driving_log_df)):
if pd.isnull(image_series[i]):
continue
delta_steering = offset_sign * angle_offset
image_name = image_series[i].lstrip().rstrip()
steering_angle = steering[i] + delta_steering
print('{0} -> {1}'.format(image_name, steering_angle))
src_path_to_image = '{0}/{1}'.format(directory, image_name)
dest_path_to_image = '{0}/{1}'.format(output_directory, image_name)
copyfile(src_path_to_image, dest_path_to_image)
angles_file.write('{0},{1}\n'.format(image_name, steering_angle))
def copy_if_has_column(column_name, steering_sign, angles_file):
print(driving_log_df.columns)
if column_name in driving_log_df.columns:
image_series = driving_log_df[column_name]
save_images(steering_sign, image_series, angles_file)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if not os.path.exists(img_folder(output_directory)):
os.makedirs(img_folder(output_directory))
with open('{0}/{1}'.format(output_directory, 'angles.csv'), 'a+') as angles_file:
copy_if_has_column('left', -1, angles_file)
copy_if_has_column('right', 1, angles_file)
copy_if_has_column('center', 0, angles_file)
|
<commit_before><commit_msg>Add useful script for standartizing data in beta simulator and the normal one<commit_after>import sys
import os
import pandas as pd
from utils import get_driving_log_dataframe
from utils import img_folder
from shutil import copyfile
directory = sys.argv[1]
output_directory = sys.argv[2]
driving_log_df = get_driving_log_dataframe(directory)
steering = driving_log_df['steering']
angle_offset = 0.25
# if image_column == 'left':
# delta_steering = -angle_offset
# elif image_column == 'right':
# delta_steering = angle_offset
# else:
# delta_steering = 0
def save_images(offset_sign, image_series, angles_file):
for i in range(len(driving_log_df)):
if pd.isnull(image_series[i]):
continue
delta_steering = offset_sign * angle_offset
image_name = image_series[i].lstrip().rstrip()
steering_angle = steering[i] + delta_steering
print('{0} -> {1}'.format(image_name, steering_angle))
src_path_to_image = '{0}/{1}'.format(directory, image_name)
dest_path_to_image = '{0}/{1}'.format(output_directory, image_name)
copyfile(src_path_to_image, dest_path_to_image)
angles_file.write('{0},{1}\n'.format(image_name, steering_angle))
def copy_if_has_column(column_name, steering_sign, angles_file):
print(driving_log_df.columns)
if column_name in driving_log_df.columns:
image_series = driving_log_df[column_name]
save_images(steering_sign, image_series, angles_file)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if not os.path.exists(img_folder(output_directory)):
os.makedirs(img_folder(output_directory))
with open('{0}/{1}'.format(output_directory, 'angles.csv'), 'a+') as angles_file:
copy_if_has_column('left', -1, angles_file)
copy_if_has_column('right', 1, angles_file)
copy_if_has_column('center', 0, angles_file)
|
|
2a54042645a651c9c578517743f7267720e9d6b9
|
data/migrations/deb/1_3_433_to_1_3_434.py
|
data/migrations/deb/1_3_433_to_1_3_434.py
|
#!/usr/bin/python3.5
import os
import sys
import traceback
from indy_common.config_util import getConfig
from indy_common.config_helper import NodeConfigHelper
from ledger.compact_merkle_tree import CompactMerkleTree
from plenum.common.stack_manager import TxnStackManager
from plenum.common.ledger import Ledger
from stp_core.common.log import getlogger
from storage.helper import initHashStore
logger = getlogger()
ENV_FILE_PATH = "/etc/indy/indy.env"
def get_node_name():
node_name = None
node_name_key = 'NODE_NAME'
if os.path.exists(ENV_FILE_PATH):
with open(ENV_FILE_PATH, "r") as fenv:
for line in fenv.readlines():
if line.find(node_name_key) != -1:
node_name = line.split('=')[1].strip()
break
else:
logger.error("Path to env file does not exist")
return node_name
def append_ips_to_env(node_ip):
node_ip_key = 'NODE_IP'
client_ip_key = 'CLIENT_IP'
with open(ENV_FILE_PATH, "a") as fenv:
fenv.write("\n{}={}\n".format(node_ip_key, node_ip))
fenv.write("{}={}\n".format(client_ip_key, "0.0.0.0"))
def migrate_all():
node_name = get_node_name()
if node_name is None:
logger.error("Could not get node name")
return False
config = getConfig()
config_helper = NodeConfigHelper(node_name, config)
hash_store = initHashStore(config_helper.ledger_dir, "pool", config, read_only=True)
ledger = Ledger(CompactMerkleTree(hashStore=hash_store), dataDir=config_helper.ledger_dir,
fileName=config.poolTransactionsFile, read_only=True)
nodeReg, _, _ = TxnStackManager.parseLedgerForHaAndKeys(ledger)
if nodeReg == None:
logger.error("Empty node registry returned by stack manager")
return False
if not node_name in nodeReg:
logger.error("Node registry does not contain node {}".format(node_name))
return False
ha = nodeReg[node_name]
if ha == None:
logger.error("Empty HA for node {}".format(node_name))
return False
logger.info("HA for {}: {}".format(node_name, ha))
try:
append_ips_to_env(ha.host)
except Exception:
logger.error(traceback.print_exc())
logger.error("Could not append node and client IPs to indy env file")
return False
return True
if migrate_all():
logger.info("Migration complete: node and client IPs have been added to indy env file")
else:
logger.error("Migration failed: node and client IPs have not been added to indy env file")
sys.exit(1)
|
Add migration script that appends node and client IPs to indy env file.
|
Add migration script that appends node and client IPs to indy env file.
Signed-off-by: Sergey Shilov <064bbdfaeb89a0bebebfe7b388747a73c8941704@dsr-company.com>
|
Python
|
apache-2.0
|
spivachuk/sovrin-node,spivachuk/sovrin-node,spivachuk/sovrin-node,spivachuk/sovrin-node
|
Add migration script that appends node and client IPs to indy env file.
Signed-off-by: Sergey Shilov <064bbdfaeb89a0bebebfe7b388747a73c8941704@dsr-company.com>
|
#!/usr/bin/python3.5
import os
import sys
import traceback
from indy_common.config_util import getConfig
from indy_common.config_helper import NodeConfigHelper
from ledger.compact_merkle_tree import CompactMerkleTree
from plenum.common.stack_manager import TxnStackManager
from plenum.common.ledger import Ledger
from stp_core.common.log import getlogger
from storage.helper import initHashStore
logger = getlogger()
ENV_FILE_PATH = "/etc/indy/indy.env"
def get_node_name():
node_name = None
node_name_key = 'NODE_NAME'
if os.path.exists(ENV_FILE_PATH):
with open(ENV_FILE_PATH, "r") as fenv:
for line in fenv.readlines():
if line.find(node_name_key) != -1:
node_name = line.split('=')[1].strip()
break
else:
logger.error("Path to env file does not exist")
return node_name
def append_ips_to_env(node_ip):
node_ip_key = 'NODE_IP'
client_ip_key = 'CLIENT_IP'
with open(ENV_FILE_PATH, "a") as fenv:
fenv.write("\n{}={}\n".format(node_ip_key, node_ip))
fenv.write("{}={}\n".format(client_ip_key, "0.0.0.0"))
def migrate_all():
node_name = get_node_name()
if node_name is None:
logger.error("Could not get node name")
return False
config = getConfig()
config_helper = NodeConfigHelper(node_name, config)
hash_store = initHashStore(config_helper.ledger_dir, "pool", config, read_only=True)
ledger = Ledger(CompactMerkleTree(hashStore=hash_store), dataDir=config_helper.ledger_dir,
fileName=config.poolTransactionsFile, read_only=True)
nodeReg, _, _ = TxnStackManager.parseLedgerForHaAndKeys(ledger)
if nodeReg == None:
logger.error("Empty node registry returned by stack manager")
return False
if not node_name in nodeReg:
logger.error("Node registry does not contain node {}".format(node_name))
return False
ha = nodeReg[node_name]
if ha == None:
logger.error("Empty HA for node {}".format(node_name))
return False
logger.info("HA for {}: {}".format(node_name, ha))
try:
append_ips_to_env(ha.host)
except Exception:
logger.error(traceback.print_exc())
logger.error("Could not append node and client IPs to indy env file")
return False
return True
if migrate_all():
logger.info("Migration complete: node and client IPs have been added to indy env file")
else:
logger.error("Migration failed: node and client IPs have not been added to indy env file")
sys.exit(1)
|
<commit_before><commit_msg>Add migration script that appends node and client IPs to indy env file.
Signed-off-by: Sergey Shilov <064bbdfaeb89a0bebebfe7b388747a73c8941704@dsr-company.com><commit_after>
|
#!/usr/bin/python3.5
import os
import sys
import traceback
from indy_common.config_util import getConfig
from indy_common.config_helper import NodeConfigHelper
from ledger.compact_merkle_tree import CompactMerkleTree
from plenum.common.stack_manager import TxnStackManager
from plenum.common.ledger import Ledger
from stp_core.common.log import getlogger
from storage.helper import initHashStore
logger = getlogger()
ENV_FILE_PATH = "/etc/indy/indy.env"
def get_node_name():
node_name = None
node_name_key = 'NODE_NAME'
if os.path.exists(ENV_FILE_PATH):
with open(ENV_FILE_PATH, "r") as fenv:
for line in fenv.readlines():
if line.find(node_name_key) != -1:
node_name = line.split('=')[1].strip()
break
else:
logger.error("Path to env file does not exist")
return node_name
def append_ips_to_env(node_ip):
node_ip_key = 'NODE_IP'
client_ip_key = 'CLIENT_IP'
with open(ENV_FILE_PATH, "a") as fenv:
fenv.write("\n{}={}\n".format(node_ip_key, node_ip))
fenv.write("{}={}\n".format(client_ip_key, "0.0.0.0"))
def migrate_all():
node_name = get_node_name()
if node_name is None:
logger.error("Could not get node name")
return False
config = getConfig()
config_helper = NodeConfigHelper(node_name, config)
hash_store = initHashStore(config_helper.ledger_dir, "pool", config, read_only=True)
ledger = Ledger(CompactMerkleTree(hashStore=hash_store), dataDir=config_helper.ledger_dir,
fileName=config.poolTransactionsFile, read_only=True)
nodeReg, _, _ = TxnStackManager.parseLedgerForHaAndKeys(ledger)
if nodeReg == None:
logger.error("Empty node registry returned by stack manager")
return False
if not node_name in nodeReg:
logger.error("Node registry does not contain node {}".format(node_name))
return False
ha = nodeReg[node_name]
if ha == None:
logger.error("Empty HA for node {}".format(node_name))
return False
logger.info("HA for {}: {}".format(node_name, ha))
try:
append_ips_to_env(ha.host)
except Exception:
logger.error(traceback.print_exc())
logger.error("Could not append node and client IPs to indy env file")
return False
return True
if migrate_all():
logger.info("Migration complete: node and client IPs have been added to indy env file")
else:
logger.error("Migration failed: node and client IPs have not been added to indy env file")
sys.exit(1)
|
Add migration script that appends node and client IPs to indy env file.
Signed-off-by: Sergey Shilov <064bbdfaeb89a0bebebfe7b388747a73c8941704@dsr-company.com>#!/usr/bin/python3.5
import os
import sys
import traceback
from indy_common.config_util import getConfig
from indy_common.config_helper import NodeConfigHelper
from ledger.compact_merkle_tree import CompactMerkleTree
from plenum.common.stack_manager import TxnStackManager
from plenum.common.ledger import Ledger
from stp_core.common.log import getlogger
from storage.helper import initHashStore
logger = getlogger()
ENV_FILE_PATH = "/etc/indy/indy.env"
def get_node_name():
node_name = None
node_name_key = 'NODE_NAME'
if os.path.exists(ENV_FILE_PATH):
with open(ENV_FILE_PATH, "r") as fenv:
for line in fenv.readlines():
if line.find(node_name_key) != -1:
node_name = line.split('=')[1].strip()
break
else:
logger.error("Path to env file does not exist")
return node_name
def append_ips_to_env(node_ip):
node_ip_key = 'NODE_IP'
client_ip_key = 'CLIENT_IP'
with open(ENV_FILE_PATH, "a") as fenv:
fenv.write("\n{}={}\n".format(node_ip_key, node_ip))
fenv.write("{}={}\n".format(client_ip_key, "0.0.0.0"))
def migrate_all():
node_name = get_node_name()
if node_name is None:
logger.error("Could not get node name")
return False
config = getConfig()
config_helper = NodeConfigHelper(node_name, config)
hash_store = initHashStore(config_helper.ledger_dir, "pool", config, read_only=True)
ledger = Ledger(CompactMerkleTree(hashStore=hash_store), dataDir=config_helper.ledger_dir,
fileName=config.poolTransactionsFile, read_only=True)
nodeReg, _, _ = TxnStackManager.parseLedgerForHaAndKeys(ledger)
if nodeReg == None:
logger.error("Empty node registry returned by stack manager")
return False
if not node_name in nodeReg:
logger.error("Node registry does not contain node {}".format(node_name))
return False
ha = nodeReg[node_name]
if ha == None:
logger.error("Empty HA for node {}".format(node_name))
return False
logger.info("HA for {}: {}".format(node_name, ha))
try:
append_ips_to_env(ha.host)
except Exception:
logger.error(traceback.print_exc())
logger.error("Could not append node and client IPs to indy env file")
return False
return True
if migrate_all():
logger.info("Migration complete: node and client IPs have been added to indy env file")
else:
logger.error("Migration failed: node and client IPs have not been added to indy env file")
sys.exit(1)
|
<commit_before><commit_msg>Add migration script that appends node and client IPs to indy env file.
Signed-off-by: Sergey Shilov <064bbdfaeb89a0bebebfe7b388747a73c8941704@dsr-company.com><commit_after>#!/usr/bin/python3.5
import os
import sys
import traceback
from indy_common.config_util import getConfig
from indy_common.config_helper import NodeConfigHelper
from ledger.compact_merkle_tree import CompactMerkleTree
from plenum.common.stack_manager import TxnStackManager
from plenum.common.ledger import Ledger
from stp_core.common.log import getlogger
from storage.helper import initHashStore
logger = getlogger()
ENV_FILE_PATH = "/etc/indy/indy.env"
def get_node_name():
node_name = None
node_name_key = 'NODE_NAME'
if os.path.exists(ENV_FILE_PATH):
with open(ENV_FILE_PATH, "r") as fenv:
for line in fenv.readlines():
if line.find(node_name_key) != -1:
node_name = line.split('=')[1].strip()
break
else:
logger.error("Path to env file does not exist")
return node_name
def append_ips_to_env(node_ip):
node_ip_key = 'NODE_IP'
client_ip_key = 'CLIENT_IP'
with open(ENV_FILE_PATH, "a") as fenv:
fenv.write("\n{}={}\n".format(node_ip_key, node_ip))
fenv.write("{}={}\n".format(client_ip_key, "0.0.0.0"))
def migrate_all():
node_name = get_node_name()
if node_name is None:
logger.error("Could not get node name")
return False
config = getConfig()
config_helper = NodeConfigHelper(node_name, config)
hash_store = initHashStore(config_helper.ledger_dir, "pool", config, read_only=True)
ledger = Ledger(CompactMerkleTree(hashStore=hash_store), dataDir=config_helper.ledger_dir,
fileName=config.poolTransactionsFile, read_only=True)
nodeReg, _, _ = TxnStackManager.parseLedgerForHaAndKeys(ledger)
if nodeReg == None:
logger.error("Empty node registry returned by stack manager")
return False
if not node_name in nodeReg:
logger.error("Node registry does not contain node {}".format(node_name))
return False
ha = nodeReg[node_name]
if ha == None:
logger.error("Empty HA for node {}".format(node_name))
return False
logger.info("HA for {}: {}".format(node_name, ha))
try:
append_ips_to_env(ha.host)
except Exception:
logger.error(traceback.print_exc())
logger.error("Could not append node and client IPs to indy env file")
return False
return True
if migrate_all():
logger.info("Migration complete: node and client IPs have been added to indy env file")
else:
logger.error("Migration failed: node and client IPs have not been added to indy env file")
sys.exit(1)
|
|
05e07c6f2dd5b334e917696ff13f57a426272014
|
parsing/command_parsing.py
|
parsing/command_parsing.py
|
'''
In app map, sometimes need to describe how to start and close one UI element
So create a simple command language to satify this.
this language is like:
'elementX.elementY.operation [parameter1 parameter2 ...]'
parameter could be number,string,bool,list,tuple
'''
import ply.lex as lex
import ply.yacc as yacc
import AXUI.logger as AXUI_logger
LOGGER = AXUI_logger.get_logger()
##################################
#lexical analysis
##################################
tokens = ("PERIOD", "TERM", "NUMBER", "STRING", "BOOL", "LIST", "TUPLE")
#ignore characters
t_ignore = ' \t\x0c'
#newline
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
t_PERIOD = r"\."
def t_TERM(t):
r'[\w_]'
return t
def t_NUMBER(t):
r'\d+'
return t
def t_STRING(t):
'(\"(.)*?\")|(\'(.)*?\')'
return t
def t_LIST(t):
|
Add parsing for describing UIElement start/stop
|
Add parsing for describing UIElement start/stop
Signed-off-by: xcgspring <8f4f8d15922e4269158d45cde01dc3497961f40d@126.com>
|
Python
|
apache-2.0
|
xcgspring/AXUI,xcgspring/AXUI,xcgspring/AXUI
|
Add parsing for describing UIElement start/stop
Signed-off-by: xcgspring <8f4f8d15922e4269158d45cde01dc3497961f40d@126.com>
|
'''
In app map, sometimes need to describe how to start and close one UI element
So create a simple command language to satify this.
this language is like:
'elementX.elementY.operation [parameter1 parameter2 ...]'
parameter could be number,string,bool,list,tuple
'''
import ply.lex as lex
import ply.yacc as yacc
import AXUI.logger as AXUI_logger
LOGGER = AXUI_logger.get_logger()
##################################
#lexical analysis
##################################
tokens = ("PERIOD", "TERM", "NUMBER", "STRING", "BOOL", "LIST", "TUPLE")
#ignore characters
t_ignore = ' \t\x0c'
#newline
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
t_PERIOD = r"\."
def t_TERM(t):
r'[\w_]'
return t
def t_NUMBER(t):
r'\d+'
return t
def t_STRING(t):
'(\"(.)*?\")|(\'(.)*?\')'
return t
def t_LIST(t):
|
<commit_before><commit_msg>Add parsing for describing UIElement start/stop
Signed-off-by: xcgspring <8f4f8d15922e4269158d45cde01dc3497961f40d@126.com><commit_after>
|
'''
In app map, sometimes need to describe how to start and close one UI element
So create a simple command language to satify this.
this language is like:
'elementX.elementY.operation [parameter1 parameter2 ...]'
parameter could be number,string,bool,list,tuple
'''
import ply.lex as lex
import ply.yacc as yacc
import AXUI.logger as AXUI_logger
LOGGER = AXUI_logger.get_logger()
##################################
#lexical analysis
##################################
tokens = ("PERIOD", "TERM", "NUMBER", "STRING", "BOOL", "LIST", "TUPLE")
#ignore characters
t_ignore = ' \t\x0c'
#newline
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
t_PERIOD = r"\."
def t_TERM(t):
r'[\w_]'
return t
def t_NUMBER(t):
r'\d+'
return t
def t_STRING(t):
'(\"(.)*?\")|(\'(.)*?\')'
return t
def t_LIST(t):
|
Add parsing for describing UIElement start/stop
Signed-off-by: xcgspring <8f4f8d15922e4269158d45cde01dc3497961f40d@126.com>'''
In app map, sometimes need to describe how to start and close one UI element
So create a simple command language to satify this.
this language is like:
'elementX.elementY.operation [parameter1 parameter2 ...]'
parameter could be number,string,bool,list,tuple
'''
import ply.lex as lex
import ply.yacc as yacc
import AXUI.logger as AXUI_logger
LOGGER = AXUI_logger.get_logger()
##################################
#lexical analysis
##################################
tokens = ("PERIOD", "TERM", "NUMBER", "STRING", "BOOL", "LIST", "TUPLE")
#ignore characters
t_ignore = ' \t\x0c'
#newline
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
t_PERIOD = r"\."
def t_TERM(t):
r'[\w_]'
return t
def t_NUMBER(t):
r'\d+'
return t
def t_STRING(t):
'(\"(.)*?\")|(\'(.)*?\')'
return t
def t_LIST(t):
|
<commit_before><commit_msg>Add parsing for describing UIElement start/stop
Signed-off-by: xcgspring <8f4f8d15922e4269158d45cde01dc3497961f40d@126.com><commit_after>'''
In app map, sometimes need to describe how to start and close one UI element
So create a simple command language to satify this.
this language is like:
'elementX.elementY.operation [parameter1 parameter2 ...]'
parameter could be number,string,bool,list,tuple
'''
import ply.lex as lex
import ply.yacc as yacc
import AXUI.logger as AXUI_logger
LOGGER = AXUI_logger.get_logger()
##################################
#lexical analysis
##################################
tokens = ("PERIOD", "TERM", "NUMBER", "STRING", "BOOL", "LIST", "TUPLE")
#ignore characters
t_ignore = ' \t\x0c'
#newline
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
t_PERIOD = r"\."
def t_TERM(t):
r'[\w_]'
return t
def t_NUMBER(t):
r'\d+'
return t
def t_STRING(t):
'(\"(.)*?\")|(\'(.)*?\')'
return t
def t_LIST(t):
|
|
89612a126b7df58022554df439bef2988e504643
|
py/delete-node-in-a-bst.py
|
py/delete-node-in-a-bst.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def deleteNode(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
def remove(node, parent):
ret = root
if node.left is None and node.right is None:
if parent is None:
ret = None
else:
if parent.val > node.val:
parent.left = None
else:
parent.right = None
elif node.left is not None and node.right is not None:
p, to_remove = node, node.right
while to_remove.left is not None:
p, to_remove = to_remove, to_remove.left
node.val = to_remove.val
ret = remove(to_remove, p)
else:
if node.left is not None:
up = node.left
else:
up = node.right
if parent is None:
ret = up
else:
if parent.val > node.val:
parent.left = up
else:
parent.right = up
return ret
def dfs(cur, parent):
if cur:
if cur.val == key:
return remove(cur, parent)
elif cur.val < key:
return dfs(cur.right, cur)
else:
return dfs(cur.left, cur)
return None
ret = dfs(root, None)
if root is None or (ret is None and root.val == key):
return None
else:
return ret or root
|
Add py solution for 450. Delete Node in a BST
|
Add py solution for 450. Delete Node in a BST
450. Delete Node in a BST: https://leetcode.com/problems/delete-node-in-a-bst/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 450. Delete Node in a BST
450. Delete Node in a BST: https://leetcode.com/problems/delete-node-in-a-bst/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def deleteNode(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
def remove(node, parent):
ret = root
if node.left is None and node.right is None:
if parent is None:
ret = None
else:
if parent.val > node.val:
parent.left = None
else:
parent.right = None
elif node.left is not None and node.right is not None:
p, to_remove = node, node.right
while to_remove.left is not None:
p, to_remove = to_remove, to_remove.left
node.val = to_remove.val
ret = remove(to_remove, p)
else:
if node.left is not None:
up = node.left
else:
up = node.right
if parent is None:
ret = up
else:
if parent.val > node.val:
parent.left = up
else:
parent.right = up
return ret
def dfs(cur, parent):
if cur:
if cur.val == key:
return remove(cur, parent)
elif cur.val < key:
return dfs(cur.right, cur)
else:
return dfs(cur.left, cur)
return None
ret = dfs(root, None)
if root is None or (ret is None and root.val == key):
return None
else:
return ret or root
|
<commit_before><commit_msg>Add py solution for 450. Delete Node in a BST
450. Delete Node in a BST: https://leetcode.com/problems/delete-node-in-a-bst/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def deleteNode(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
def remove(node, parent):
ret = root
if node.left is None and node.right is None:
if parent is None:
ret = None
else:
if parent.val > node.val:
parent.left = None
else:
parent.right = None
elif node.left is not None and node.right is not None:
p, to_remove = node, node.right
while to_remove.left is not None:
p, to_remove = to_remove, to_remove.left
node.val = to_remove.val
ret = remove(to_remove, p)
else:
if node.left is not None:
up = node.left
else:
up = node.right
if parent is None:
ret = up
else:
if parent.val > node.val:
parent.left = up
else:
parent.right = up
return ret
def dfs(cur, parent):
if cur:
if cur.val == key:
return remove(cur, parent)
elif cur.val < key:
return dfs(cur.right, cur)
else:
return dfs(cur.left, cur)
return None
ret = dfs(root, None)
if root is None or (ret is None and root.val == key):
return None
else:
return ret or root
|
Add py solution for 450. Delete Node in a BST
450. Delete Node in a BST: https://leetcode.com/problems/delete-node-in-a-bst/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def deleteNode(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
def remove(node, parent):
ret = root
if node.left is None and node.right is None:
if parent is None:
ret = None
else:
if parent.val > node.val:
parent.left = None
else:
parent.right = None
elif node.left is not None and node.right is not None:
p, to_remove = node, node.right
while to_remove.left is not None:
p, to_remove = to_remove, to_remove.left
node.val = to_remove.val
ret = remove(to_remove, p)
else:
if node.left is not None:
up = node.left
else:
up = node.right
if parent is None:
ret = up
else:
if parent.val > node.val:
parent.left = up
else:
parent.right = up
return ret
def dfs(cur, parent):
if cur:
if cur.val == key:
return remove(cur, parent)
elif cur.val < key:
return dfs(cur.right, cur)
else:
return dfs(cur.left, cur)
return None
ret = dfs(root, None)
if root is None or (ret is None and root.val == key):
return None
else:
return ret or root
|
<commit_before><commit_msg>Add py solution for 450. Delete Node in a BST
450. Delete Node in a BST: https://leetcode.com/problems/delete-node-in-a-bst/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def deleteNode(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
def remove(node, parent):
ret = root
if node.left is None and node.right is None:
if parent is None:
ret = None
else:
if parent.val > node.val:
parent.left = None
else:
parent.right = None
elif node.left is not None and node.right is not None:
p, to_remove = node, node.right
while to_remove.left is not None:
p, to_remove = to_remove, to_remove.left
node.val = to_remove.val
ret = remove(to_remove, p)
else:
if node.left is not None:
up = node.left
else:
up = node.right
if parent is None:
ret = up
else:
if parent.val > node.val:
parent.left = up
else:
parent.right = up
return ret
def dfs(cur, parent):
if cur:
if cur.val == key:
return remove(cur, parent)
elif cur.val < key:
return dfs(cur.right, cur)
else:
return dfs(cur.left, cur)
return None
ret = dfs(root, None)
if root is None or (ret is None and root.val == key):
return None
else:
return ret or root
|
|
78a32a311d5578cb1bdf8e7d0c7f0ab567b6b986
|
actions/cloudbolt_plugins/prep_ctl_server_for_remote_scripts/prep_ctl_for_remote_scripts.py
|
actions/cloudbolt_plugins/prep_ctl_server_for_remote_scripts/prep_ctl_for_remote_scripts.py
|
#!/usr/bin/env python
"""
Used to configure some prerequisites that a CenturyLink VM needs before we can
successfully run remote scripts on it. Namely, captures its credentials and
stores them on the CB server object and adds a public IP address so it's
accessible.
Meant to be run as an orchestration action during prov on CTL servers only. Must
be run before any remote script. Enables a CIT test for remote scripts on CTL.
"""
if __name__ == '__main__':
import django
django.setup()
import requests
from common.methods import set_progress
def run(job, logger=None, **kwargs):
# Get server (there's only ever one per prov job)
server = job.server_set.first()
if not server:
return "FAILURE", "", "No server to prep!"
rh = server.resource_handler
rh = rh.cast()
wrapper = rh.get_api_wrapper()
# 1st prereq is getting and storing username & password
set_progress("Pulling credentials from CTL for server {}".format(server.hostname))
url = "{}servers/{}/{}/credentials".format(wrapper.BASE_URL,
wrapper.account_alias,
server.ctlserverinfo.ctl_server_id)
response = requests.get(url, headers=wrapper.headers,
proxies=wrapper.proxies, verify=False)
creds = response.json()
server.username = creds.get('userName')
server.password = creds.get('password')
# 2nd prereq is adding a public IP in CTL
set_progress("Adding public IP to server {} associated with private "
"IP {}. This may take a while.".format(
server.hostname, server.ip))
url = "servers/{}/{}/publicIPAddresses".format(wrapper.account_alias,
server.ctlserverinfo.ctl_server_id)
payload = {
"internalIPAddress": server.ip,
"ports": [
{
"protocol": "TCP",
"port": "22"
}
]
}
response = wrapper.request_and_wait(url, method='post', json=payload)
server.refresh_info()
return "SUCCESS", "", ""
if __name__ == '__main__':
from utilities.logger import ThreadLogger
logger = ThreadLogger(__name__)
print run(None, logger)
|
Add example CTL Prep Plug-in
|
Add example CTL Prep Plug-in
Can be referenced or used to save CTL credentials in CB and add a public
IP.
|
Python
|
apache-2.0
|
CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge
|
Add example CTL Prep Plug-in
Can be referenced or used to save CTL credentials in CB and add a public
IP.
|
#!/usr/bin/env python
"""
Used to configure some prerequisites that a CenturyLink VM needs before we can
successfully run remote scripts on it. Namely, captures its credentials and
stores them on the CB server object and adds a public IP address so it's
accessible.
Meant to be run as an orchestration action during prov on CTL servers only. Must
be run before any remote script. Enables a CIT test for remote scripts on CTL.
"""
if __name__ == '__main__':
import django
django.setup()
import requests
from common.methods import set_progress
def run(job, logger=None, **kwargs):
# Get server (there's only ever one per prov job)
server = job.server_set.first()
if not server:
return "FAILURE", "", "No server to prep!"
rh = server.resource_handler
rh = rh.cast()
wrapper = rh.get_api_wrapper()
# 1st prereq is getting and storing username & password
set_progress("Pulling credentials from CTL for server {}".format(server.hostname))
url = "{}servers/{}/{}/credentials".format(wrapper.BASE_URL,
wrapper.account_alias,
server.ctlserverinfo.ctl_server_id)
response = requests.get(url, headers=wrapper.headers,
proxies=wrapper.proxies, verify=False)
creds = response.json()
server.username = creds.get('userName')
server.password = creds.get('password')
# 2nd prereq is adding a public IP in CTL
set_progress("Adding public IP to server {} associated with private "
"IP {}. This may take a while.".format(
server.hostname, server.ip))
url = "servers/{}/{}/publicIPAddresses".format(wrapper.account_alias,
server.ctlserverinfo.ctl_server_id)
payload = {
"internalIPAddress": server.ip,
"ports": [
{
"protocol": "TCP",
"port": "22"
}
]
}
response = wrapper.request_and_wait(url, method='post', json=payload)
server.refresh_info()
return "SUCCESS", "", ""
if __name__ == '__main__':
from utilities.logger import ThreadLogger
logger = ThreadLogger(__name__)
print run(None, logger)
|
<commit_before><commit_msg>Add example CTL Prep Plug-in
Can be referenced or used to save CTL credentials in CB and add a public
IP.<commit_after>
|
#!/usr/bin/env python
"""
Used to configure some prerequisites that a CenturyLink VM needs before we can
successfully run remote scripts on it. Namely, captures its credentials and
stores them on the CB server object and adds a public IP address so it's
accessible.
Meant to be run as an orchestration action during prov on CTL servers only. Must
be run before any remote script. Enables a CIT test for remote scripts on CTL.
"""
if __name__ == '__main__':
import django
django.setup()
import requests
from common.methods import set_progress
def run(job, logger=None, **kwargs):
# Get server (there's only ever one per prov job)
server = job.server_set.first()
if not server:
return "FAILURE", "", "No server to prep!"
rh = server.resource_handler
rh = rh.cast()
wrapper = rh.get_api_wrapper()
# 1st prereq is getting and storing username & password
set_progress("Pulling credentials from CTL for server {}".format(server.hostname))
url = "{}servers/{}/{}/credentials".format(wrapper.BASE_URL,
wrapper.account_alias,
server.ctlserverinfo.ctl_server_id)
response = requests.get(url, headers=wrapper.headers,
proxies=wrapper.proxies, verify=False)
creds = response.json()
server.username = creds.get('userName')
server.password = creds.get('password')
# 2nd prereq is adding a public IP in CTL
set_progress("Adding public IP to server {} associated with private "
"IP {}. This may take a while.".format(
server.hostname, server.ip))
url = "servers/{}/{}/publicIPAddresses".format(wrapper.account_alias,
server.ctlserverinfo.ctl_server_id)
payload = {
"internalIPAddress": server.ip,
"ports": [
{
"protocol": "TCP",
"port": "22"
}
]
}
response = wrapper.request_and_wait(url, method='post', json=payload)
server.refresh_info()
return "SUCCESS", "", ""
if __name__ == '__main__':
from utilities.logger import ThreadLogger
logger = ThreadLogger(__name__)
print run(None, logger)
|
Add example CTL Prep Plug-in
Can be referenced or used to save CTL credentials in CB and add a public
IP.#!/usr/bin/env python
"""
Used to configure some prerequisites that a CenturyLink VM needs before we can
successfully run remote scripts on it. Namely, captures its credentials and
stores them on the CB server object and adds a public IP address so it's
accessible.
Meant to be run as an orchestration action during prov on CTL servers only. Must
be run before any remote script. Enables a CIT test for remote scripts on CTL.
"""
if __name__ == '__main__':
import django
django.setup()
import requests
from common.methods import set_progress
def run(job, logger=None, **kwargs):
# Get server (there's only ever one per prov job)
server = job.server_set.first()
if not server:
return "FAILURE", "", "No server to prep!"
rh = server.resource_handler
rh = rh.cast()
wrapper = rh.get_api_wrapper()
# 1st prereq is getting and storing username & password
set_progress("Pulling credentials from CTL for server {}".format(server.hostname))
url = "{}servers/{}/{}/credentials".format(wrapper.BASE_URL,
wrapper.account_alias,
server.ctlserverinfo.ctl_server_id)
response = requests.get(url, headers=wrapper.headers,
proxies=wrapper.proxies, verify=False)
creds = response.json()
server.username = creds.get('userName')
server.password = creds.get('password')
# 2nd prereq is adding a public IP in CTL
set_progress("Adding public IP to server {} associated with private "
"IP {}. This may take a while.".format(
server.hostname, server.ip))
url = "servers/{}/{}/publicIPAddresses".format(wrapper.account_alias,
server.ctlserverinfo.ctl_server_id)
payload = {
"internalIPAddress": server.ip,
"ports": [
{
"protocol": "TCP",
"port": "22"
}
]
}
response = wrapper.request_and_wait(url, method='post', json=payload)
server.refresh_info()
return "SUCCESS", "", ""
if __name__ == '__main__':
from utilities.logger import ThreadLogger
logger = ThreadLogger(__name__)
print run(None, logger)
|
<commit_before><commit_msg>Add example CTL Prep Plug-in
Can be referenced or used to save CTL credentials in CB and add a public
IP.<commit_after>#!/usr/bin/env python
"""
Used to configure some prerequisites that a CenturyLink VM needs before we can
successfully run remote scripts on it. Namely, captures its credentials and
stores them on the CB server object and adds a public IP address so it's
accessible.
Meant to be run as an orchestration action during prov on CTL servers only. Must
be run before any remote script. Enables a CIT test for remote scripts on CTL.
"""
if __name__ == '__main__':
import django
django.setup()
import requests
from common.methods import set_progress
def run(job, logger=None, **kwargs):
# Get server (there's only ever one per prov job)
server = job.server_set.first()
if not server:
return "FAILURE", "", "No server to prep!"
rh = server.resource_handler
rh = rh.cast()
wrapper = rh.get_api_wrapper()
# 1st prereq is getting and storing username & password
set_progress("Pulling credentials from CTL for server {}".format(server.hostname))
url = "{}servers/{}/{}/credentials".format(wrapper.BASE_URL,
wrapper.account_alias,
server.ctlserverinfo.ctl_server_id)
response = requests.get(url, headers=wrapper.headers,
proxies=wrapper.proxies, verify=False)
creds = response.json()
server.username = creds.get('userName')
server.password = creds.get('password')
# 2nd prereq is adding a public IP in CTL
set_progress("Adding public IP to server {} associated with private "
"IP {}. This may take a while.".format(
server.hostname, server.ip))
url = "servers/{}/{}/publicIPAddresses".format(wrapper.account_alias,
server.ctlserverinfo.ctl_server_id)
payload = {
"internalIPAddress": server.ip,
"ports": [
{
"protocol": "TCP",
"port": "22"
}
]
}
response = wrapper.request_and_wait(url, method='post', json=payload)
server.refresh_info()
return "SUCCESS", "", ""
if __name__ == '__main__':
from utilities.logger import ThreadLogger
logger = ThreadLogger(__name__)
print run(None, logger)
|
|
86cede2c228e2e6bccb4adbdfe81d9d4bd34ac6f
|
teams/blog_fetch.py
|
teams/blog_fetch.py
|
"""Fetches the blogs configured on the website to local files.
The local files dumped to are 'BLOGNAME.incoming.yml' in the
assets/blogs directory.
Should be run as a regular cron-job. Must be run from within the
website virtual environment.
---
Copyright (c) 2013, University Radio York.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import pyramid
import feedparser
import pickle
import lass.common.config
if __name__ == '__main__':
blog_config = lass.common.config.from_yaml('sitewide/blogs')
for name, config in blog_config.items():
asset = 'assets:blogs/{}.incoming'.format(name)
full_path = pyramid.path.AssetResolver().resolve(asset).abspath()
feed = feedparser.parse(config['feed'])
with open(full_path, 'wb+') as stream:
pickle.dump(feed, stream)
|
Move the Python end of blog-fetch to LASS.
|
Move the Python end of blog-fetch to LASS.
|
Python
|
bsd-2-clause
|
UniversityRadioYork/lass-pyramid
|
Move the Python end of blog-fetch to LASS.
|
"""Fetches the blogs configured on the website to local files.
The local files dumped to are 'BLOGNAME.incoming.yml' in the
assets/blogs directory.
Should be run as a regular cron-job. Must be run from within the
website virtual environment.
---
Copyright (c) 2013, University Radio York.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import pyramid
import feedparser
import pickle
import lass.common.config
if __name__ == '__main__':
blog_config = lass.common.config.from_yaml('sitewide/blogs')
for name, config in blog_config.items():
asset = 'assets:blogs/{}.incoming'.format(name)
full_path = pyramid.path.AssetResolver().resolve(asset).abspath()
feed = feedparser.parse(config['feed'])
with open(full_path, 'wb+') as stream:
pickle.dump(feed, stream)
|
<commit_before><commit_msg>Move the Python end of blog-fetch to LASS.<commit_after>
|
"""Fetches the blogs configured on the website to local files.
The local files dumped to are 'BLOGNAME.incoming.yml' in the
assets/blogs directory.
Should be run as a regular cron-job. Must be run from within the
website virtual environment.
---
Copyright (c) 2013, University Radio York.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import pyramid
import feedparser
import pickle
import lass.common.config
if __name__ == '__main__':
blog_config = lass.common.config.from_yaml('sitewide/blogs')
for name, config in blog_config.items():
asset = 'assets:blogs/{}.incoming'.format(name)
full_path = pyramid.path.AssetResolver().resolve(asset).abspath()
feed = feedparser.parse(config['feed'])
with open(full_path, 'wb+') as stream:
pickle.dump(feed, stream)
|
Move the Python end of blog-fetch to LASS."""Fetches the blogs configured on the website to local files.
The local files dumped to are 'BLOGNAME.incoming.yml' in the
assets/blogs directory.
Should be run as a regular cron-job. Must be run from within the
website virtual environment.
---
Copyright (c) 2013, University Radio York.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import pyramid
import feedparser
import pickle
import lass.common.config
if __name__ == '__main__':
blog_config = lass.common.config.from_yaml('sitewide/blogs')
for name, config in blog_config.items():
asset = 'assets:blogs/{}.incoming'.format(name)
full_path = pyramid.path.AssetResolver().resolve(asset).abspath()
feed = feedparser.parse(config['feed'])
with open(full_path, 'wb+') as stream:
pickle.dump(feed, stream)
|
<commit_before><commit_msg>Move the Python end of blog-fetch to LASS.<commit_after>"""Fetches the blogs configured on the website to local files.
The local files dumped to are 'BLOGNAME.incoming.yml' in the
assets/blogs directory.
Should be run as a regular cron-job. Must be run from within the
website virtual environment.
---
Copyright (c) 2013, University Radio York.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import pyramid
import feedparser
import pickle
import lass.common.config
if __name__ == '__main__':
blog_config = lass.common.config.from_yaml('sitewide/blogs')
for name, config in blog_config.items():
asset = 'assets:blogs/{}.incoming'.format(name)
full_path = pyramid.path.AssetResolver().resolve(asset).abspath()
feed = feedparser.parse(config['feed'])
with open(full_path, 'wb+') as stream:
pickle.dump(feed, stream)
|
|
4cb674093c95ebbe3f7dc61d0b6a262995337100
|
osf/migrations/0012_auto_20170411_1548.py
|
osf/migrations/0012_auto_20170411_1548.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-05 17:30
from __future__ import unicode_literals
from django.db import migrations, models
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from osf.models import OSFUser
def fix_osfuser_view_permissions(*args):
view_osfuser_permission = Permission.objects.get(codename='view_osfuser')
wrong_osfuser_permission = Permission.objects.get(codename='view_user')
wrong_osfuser_permission.delete()
read_only = Group.objects.get(name='read_only')
osf_admin = Group.objects.get(name='osf_admin')
read_only.permissions.add(view_osfuser_permission)
osf_admin.permissions.add(view_osfuser_permission)
read_only.save()
osf_admin.save()
def revert_osfuser_view_permissions(*args):
ctype = ContentType.get_for_model(OSFUser)
wrong_osfuser_permission = Permission.objects.create(codename='view_user', name='Can view user details', content_type=ctype)
view_osfuser_permission = Permission.objects.get(codename='view_osfuser')
osf_admin = Group.objects.get(name='osf_admin')
read_only = Group.objects.get(name='read_only')
osf_admin.permissions.add(wrong_osfuser_permission)
read_only.permissions.add(wrong_osfuser_permission)
read_only.permissions.remove(view_osfuser_permission)
osf_admin.permissions.remove(view_osfuser_permission)
osf_admin.save()
read_only.save()
class Migration(migrations.Migration):
dependencies = [
('osf', '0011_auto_20170410_1711'),
]
operations = [
migrations.RunPython(fix_osfuser_view_permissions, revert_osfuser_view_permissions),
]
|
Add proper view_osfuser permission to read_only and admin groups
|
Add proper view_osfuser permission to read_only and admin groups
|
Python
|
apache-2.0
|
chennan47/osf.io,adlius/osf.io,leb2dg/osf.io,binoculars/osf.io,mattclark/osf.io,icereval/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,caseyrollins/osf.io,sloria/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,chennan47/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,Nesiehr/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,caneruguz/osf.io,hmoco/osf.io,CenterForOpenScience/osf.io,cwisecarver/osf.io,pattisdr/osf.io,Nesiehr/osf.io,aaxelb/osf.io,crcresearch/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,cslzchen/osf.io,laurenrevere/osf.io,caneruguz/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,TomBaxter/osf.io,laurenrevere/osf.io,aaxelb/osf.io,crcresearch/osf.io,erinspace/osf.io,cslzchen/osf.io,felliott/osf.io,binoculars/osf.io,leb2dg/osf.io,adlius/osf.io,cslzchen/osf.io,aaxelb/osf.io,sloria/osf.io,mfraezz/osf.io,binoculars/osf.io,Nesiehr/osf.io,TomBaxter/osf.io,chrisseto/osf.io,Nesiehr/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,erinspace/osf.io,mfraezz/osf.io,chrisseto/osf.io,sloria/osf.io,chrisseto/osf.io,icereval/osf.io,hmoco/osf.io,baylee-d/osf.io,crcresearch/osf.io,aaxelb/osf.io,mattclark/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,erinspace/osf.io,hmoco/osf.io,saradbowman/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,hmoco/osf.io,chennan47/osf.io,cwisecarver/osf.io,felliott/osf.io,cwisecarver/osf.io,mattclark/osf.io,HalcyonChimera/osf.io,felliott/osf.io,cslzchen/osf.io,caneruguz/osf.io,pattisdr/osf.io,caneruguz/osf.io,caseyrollins/osf.io,laurenrevere/osf.io,baylee-d/osf.io,icereval/osf.io,felliott/osf.io
|
Add proper view_osfuser permission to read_only and admin groups
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-05 17:30
from __future__ import unicode_literals
from django.db import migrations, models
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from osf.models import OSFUser
def fix_osfuser_view_permissions(*args):
view_osfuser_permission = Permission.objects.get(codename='view_osfuser')
wrong_osfuser_permission = Permission.objects.get(codename='view_user')
wrong_osfuser_permission.delete()
read_only = Group.objects.get(name='read_only')
osf_admin = Group.objects.get(name='osf_admin')
read_only.permissions.add(view_osfuser_permission)
osf_admin.permissions.add(view_osfuser_permission)
read_only.save()
osf_admin.save()
def revert_osfuser_view_permissions(*args):
ctype = ContentType.get_for_model(OSFUser)
wrong_osfuser_permission = Permission.objects.create(codename='view_user', name='Can view user details', content_type=ctype)
view_osfuser_permission = Permission.objects.get(codename='view_osfuser')
osf_admin = Group.objects.get(name='osf_admin')
read_only = Group.objects.get(name='read_only')
osf_admin.permissions.add(wrong_osfuser_permission)
read_only.permissions.add(wrong_osfuser_permission)
read_only.permissions.remove(view_osfuser_permission)
osf_admin.permissions.remove(view_osfuser_permission)
osf_admin.save()
read_only.save()
class Migration(migrations.Migration):
dependencies = [
('osf', '0011_auto_20170410_1711'),
]
operations = [
migrations.RunPython(fix_osfuser_view_permissions, revert_osfuser_view_permissions),
]
|
<commit_before><commit_msg>Add proper view_osfuser permission to read_only and admin groups<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-05 17:30
from __future__ import unicode_literals
from django.db import migrations, models
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from osf.models import OSFUser
def fix_osfuser_view_permissions(*args):
view_osfuser_permission = Permission.objects.get(codename='view_osfuser')
wrong_osfuser_permission = Permission.objects.get(codename='view_user')
wrong_osfuser_permission.delete()
read_only = Group.objects.get(name='read_only')
osf_admin = Group.objects.get(name='osf_admin')
read_only.permissions.add(view_osfuser_permission)
osf_admin.permissions.add(view_osfuser_permission)
read_only.save()
osf_admin.save()
def revert_osfuser_view_permissions(*args):
ctype = ContentType.get_for_model(OSFUser)
wrong_osfuser_permission = Permission.objects.create(codename='view_user', name='Can view user details', content_type=ctype)
view_osfuser_permission = Permission.objects.get(codename='view_osfuser')
osf_admin = Group.objects.get(name='osf_admin')
read_only = Group.objects.get(name='read_only')
osf_admin.permissions.add(wrong_osfuser_permission)
read_only.permissions.add(wrong_osfuser_permission)
read_only.permissions.remove(view_osfuser_permission)
osf_admin.permissions.remove(view_osfuser_permission)
osf_admin.save()
read_only.save()
class Migration(migrations.Migration):
dependencies = [
('osf', '0011_auto_20170410_1711'),
]
operations = [
migrations.RunPython(fix_osfuser_view_permissions, revert_osfuser_view_permissions),
]
|
Add proper view_osfuser permission to read_only and admin groups# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-05 17:30
from __future__ import unicode_literals
from django.db import migrations, models
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from osf.models import OSFUser
def fix_osfuser_view_permissions(*args):
view_osfuser_permission = Permission.objects.get(codename='view_osfuser')
wrong_osfuser_permission = Permission.objects.get(codename='view_user')
wrong_osfuser_permission.delete()
read_only = Group.objects.get(name='read_only')
osf_admin = Group.objects.get(name='osf_admin')
read_only.permissions.add(view_osfuser_permission)
osf_admin.permissions.add(view_osfuser_permission)
read_only.save()
osf_admin.save()
def revert_osfuser_view_permissions(*args):
ctype = ContentType.get_for_model(OSFUser)
wrong_osfuser_permission = Permission.objects.create(codename='view_user', name='Can view user details', content_type=ctype)
view_osfuser_permission = Permission.objects.get(codename='view_osfuser')
osf_admin = Group.objects.get(name='osf_admin')
read_only = Group.objects.get(name='read_only')
osf_admin.permissions.add(wrong_osfuser_permission)
read_only.permissions.add(wrong_osfuser_permission)
read_only.permissions.remove(view_osfuser_permission)
osf_admin.permissions.remove(view_osfuser_permission)
osf_admin.save()
read_only.save()
class Migration(migrations.Migration):
dependencies = [
('osf', '0011_auto_20170410_1711'),
]
operations = [
migrations.RunPython(fix_osfuser_view_permissions, revert_osfuser_view_permissions),
]
|
<commit_before><commit_msg>Add proper view_osfuser permission to read_only and admin groups<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-05 17:30
from __future__ import unicode_literals
from django.db import migrations, models
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from osf.models import OSFUser
def fix_osfuser_view_permissions(*args):
view_osfuser_permission = Permission.objects.get(codename='view_osfuser')
wrong_osfuser_permission = Permission.objects.get(codename='view_user')
wrong_osfuser_permission.delete()
read_only = Group.objects.get(name='read_only')
osf_admin = Group.objects.get(name='osf_admin')
read_only.permissions.add(view_osfuser_permission)
osf_admin.permissions.add(view_osfuser_permission)
read_only.save()
osf_admin.save()
def revert_osfuser_view_permissions(*args):
ctype = ContentType.get_for_model(OSFUser)
wrong_osfuser_permission = Permission.objects.create(codename='view_user', name='Can view user details', content_type=ctype)
view_osfuser_permission = Permission.objects.get(codename='view_osfuser')
osf_admin = Group.objects.get(name='osf_admin')
read_only = Group.objects.get(name='read_only')
osf_admin.permissions.add(wrong_osfuser_permission)
read_only.permissions.add(wrong_osfuser_permission)
read_only.permissions.remove(view_osfuser_permission)
osf_admin.permissions.remove(view_osfuser_permission)
osf_admin.save()
read_only.save()
class Migration(migrations.Migration):
dependencies = [
('osf', '0011_auto_20170410_1711'),
]
operations = [
migrations.RunPython(fix_osfuser_view_permissions, revert_osfuser_view_permissions),
]
|
|
d821e62e343150ee57daa4e5ac6ad203cb4d9708
|
comp/microsoft/000_missinteger.py
|
comp/microsoft/000_missinteger.py
|
'''
https://app.codility.com/demo/results/demoHZEZJ5-D8X/
This is a demo task.
Write a function:
def solution(A)
that, given an array A of N integers, returns the smallest positive integer (greater than 0) that does not occur in A.
For example, given A = [1, 3, 6, 4, 1, 2], the function should return 5.
Given A = [1, 2, 3], the function should return 4.
Given A = [β1, β3], the function should return 1.
Write an efficient algorithm for the following assumptions:
N is an integer within the range [1..100,000];
each element of array A is an integer within the range [β1,000,000..1,000,000].
'''
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
# write your code in Python 3.6
pos_set = set()
for a in A:
if a > 0:
pos_set.add(a)
pos = list(pos_set)
pos.sort()
res = 1
for num in pos:
if res < num:
break
else:
res = num + 1
return res
|
Implement MissingInteger (microsoft oa demo)
|
Implement MissingInteger (microsoft oa demo)
|
Python
|
mit
|
Chasego/codirit,cc13ny/algo,Chasego/codi,Chasego/cod,Chasego/codi,cc13ny/algo,Chasego/codi,Chasego/cod,Chasego/codi,Chasego/codirit,Chasego/codi,cc13ny/algo,Chasego/codirit,cc13ny/algo,Chasego/cod,Chasego/cod,cc13ny/algo,Chasego/codirit,Chasego/cod,Chasego/codirit
|
Implement MissingInteger (microsoft oa demo)
|
'''
https://app.codility.com/demo/results/demoHZEZJ5-D8X/
This is a demo task.
Write a function:
def solution(A)
that, given an array A of N integers, returns the smallest positive integer (greater than 0) that does not occur in A.
For example, given A = [1, 3, 6, 4, 1, 2], the function should return 5.
Given A = [1, 2, 3], the function should return 4.
Given A = [β1, β3], the function should return 1.
Write an efficient algorithm for the following assumptions:
N is an integer within the range [1..100,000];
each element of array A is an integer within the range [β1,000,000..1,000,000].
'''
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
# write your code in Python 3.6
pos_set = set()
for a in A:
if a > 0:
pos_set.add(a)
pos = list(pos_set)
pos.sort()
res = 1
for num in pos:
if res < num:
break
else:
res = num + 1
return res
|
<commit_before><commit_msg>Implement MissingInteger (microsoft oa demo)<commit_after>
|
'''
https://app.codility.com/demo/results/demoHZEZJ5-D8X/
This is a demo task.
Write a function:
def solution(A)
that, given an array A of N integers, returns the smallest positive integer (greater than 0) that does not occur in A.
For example, given A = [1, 3, 6, 4, 1, 2], the function should return 5.
Given A = [1, 2, 3], the function should return 4.
Given A = [β1, β3], the function should return 1.
Write an efficient algorithm for the following assumptions:
N is an integer within the range [1..100,000];
each element of array A is an integer within the range [β1,000,000..1,000,000].
'''
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
# write your code in Python 3.6
pos_set = set()
for a in A:
if a > 0:
pos_set.add(a)
pos = list(pos_set)
pos.sort()
res = 1
for num in pos:
if res < num:
break
else:
res = num + 1
return res
|
Implement MissingInteger (microsoft oa demo)'''
https://app.codility.com/demo/results/demoHZEZJ5-D8X/
This is a demo task.
Write a function:
def solution(A)
that, given an array A of N integers, returns the smallest positive integer (greater than 0) that does not occur in A.
For example, given A = [1, 3, 6, 4, 1, 2], the function should return 5.
Given A = [1, 2, 3], the function should return 4.
Given A = [β1, β3], the function should return 1.
Write an efficient algorithm for the following assumptions:
N is an integer within the range [1..100,000];
each element of array A is an integer within the range [β1,000,000..1,000,000].
'''
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
# write your code in Python 3.6
pos_set = set()
for a in A:
if a > 0:
pos_set.add(a)
pos = list(pos_set)
pos.sort()
res = 1
for num in pos:
if res < num:
break
else:
res = num + 1
return res
|
<commit_before><commit_msg>Implement MissingInteger (microsoft oa demo)<commit_after>'''
https://app.codility.com/demo/results/demoHZEZJ5-D8X/
This is a demo task.
Write a function:
def solution(A)
that, given an array A of N integers, returns the smallest positive integer (greater than 0) that does not occur in A.
For example, given A = [1, 3, 6, 4, 1, 2], the function should return 5.
Given A = [1, 2, 3], the function should return 4.
Given A = [β1, β3], the function should return 1.
Write an efficient algorithm for the following assumptions:
N is an integer within the range [1..100,000];
each element of array A is an integer within the range [β1,000,000..1,000,000].
'''
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
# write your code in Python 3.6
pos_set = set()
for a in A:
if a > 0:
pos_set.add(a)
pos = list(pos_set)
pos.sort()
res = 1
for num in pos:
if res < num:
break
else:
res = num + 1
return res
|
|
e1d268d1d7ad63c0c9db6ff3b6645b835ef4513f
|
resources/git/hooks/pre_commit.py
|
resources/git/hooks/pre_commit.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script provides automatic update and staging of documentation files when
"raw" documentation files have been changed.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import os
import sys
import git
GIT_HOOKS_DIRPATH = os.path.abspath(
os.path.dirname(inspect.getfile(inspect.currentframe())))
REPOSITORY_ROOT_DIRPATH = os.path.dirname(os.path.dirname(GIT_HOOKS_DIRPATH))
sys.path.append(REPOSITORY_ROOT_DIRPATH)
from resources.utils import sync_docs
#===============================================================================
def get_synced_files_to_stage(staged_filepaths, filepaths_to_sync):
return [
filepaths_to_sync[staged_filepath]
for staged_filepath in staged_filepaths if staged_filepath in filepaths_to_sync]
def main():
repo = git.Repo(REPOSITORY_ROOT_DIRPATH)
staged_filepaths = [
os.path.normpath(os.path.join(REPOSITORY_ROOT_DIRPATH, diff.a_path))
for diff in repo.index.diff("HEAD")]
filepaths_to_sync = sync_docs.get_filepaths(sync_docs.PATHS_TO_PREPROCESS_FILEPATH)
filepaths_to_sync.update(sync_docs.get_filepaths(sync_docs.PATHS_TO_COPY_FILEPATH))
sync_docs.main()
synced_filepaths_to_stage = (
get_synced_files_to_stage(staged_filepaths, filepaths_to_sync))
repo.git.add(synced_filepaths_to_stage)
if __name__ == "__main__":
main()
|
Add git hook to auto-stage docs files
|
resources: Add git hook to auto-stage docs files
|
Python
|
bsd-3-clause
|
khalim19/gimp-plugin-export-layers,khalim19/gimp-plugin-export-layers
|
resources: Add git hook to auto-stage docs files
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script provides automatic update and staging of documentation files when
"raw" documentation files have been changed.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import os
import sys
import git
GIT_HOOKS_DIRPATH = os.path.abspath(
os.path.dirname(inspect.getfile(inspect.currentframe())))
REPOSITORY_ROOT_DIRPATH = os.path.dirname(os.path.dirname(GIT_HOOKS_DIRPATH))
sys.path.append(REPOSITORY_ROOT_DIRPATH)
from resources.utils import sync_docs
#===============================================================================
def get_synced_files_to_stage(staged_filepaths, filepaths_to_sync):
return [
filepaths_to_sync[staged_filepath]
for staged_filepath in staged_filepaths if staged_filepath in filepaths_to_sync]
def main():
repo = git.Repo(REPOSITORY_ROOT_DIRPATH)
staged_filepaths = [
os.path.normpath(os.path.join(REPOSITORY_ROOT_DIRPATH, diff.a_path))
for diff in repo.index.diff("HEAD")]
filepaths_to_sync = sync_docs.get_filepaths(sync_docs.PATHS_TO_PREPROCESS_FILEPATH)
filepaths_to_sync.update(sync_docs.get_filepaths(sync_docs.PATHS_TO_COPY_FILEPATH))
sync_docs.main()
synced_filepaths_to_stage = (
get_synced_files_to_stage(staged_filepaths, filepaths_to_sync))
repo.git.add(synced_filepaths_to_stage)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>resources: Add git hook to auto-stage docs files<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script provides automatic update and staging of documentation files when
"raw" documentation files have been changed.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import os
import sys
import git
GIT_HOOKS_DIRPATH = os.path.abspath(
os.path.dirname(inspect.getfile(inspect.currentframe())))
REPOSITORY_ROOT_DIRPATH = os.path.dirname(os.path.dirname(GIT_HOOKS_DIRPATH))
sys.path.append(REPOSITORY_ROOT_DIRPATH)
from resources.utils import sync_docs
#===============================================================================
def get_synced_files_to_stage(staged_filepaths, filepaths_to_sync):
return [
filepaths_to_sync[staged_filepath]
for staged_filepath in staged_filepaths if staged_filepath in filepaths_to_sync]
def main():
repo = git.Repo(REPOSITORY_ROOT_DIRPATH)
staged_filepaths = [
os.path.normpath(os.path.join(REPOSITORY_ROOT_DIRPATH, diff.a_path))
for diff in repo.index.diff("HEAD")]
filepaths_to_sync = sync_docs.get_filepaths(sync_docs.PATHS_TO_PREPROCESS_FILEPATH)
filepaths_to_sync.update(sync_docs.get_filepaths(sync_docs.PATHS_TO_COPY_FILEPATH))
sync_docs.main()
synced_filepaths_to_stage = (
get_synced_files_to_stage(staged_filepaths, filepaths_to_sync))
repo.git.add(synced_filepaths_to_stage)
if __name__ == "__main__":
main()
|
resources: Add git hook to auto-stage docs files#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script provides automatic update and staging of documentation files when
"raw" documentation files have been changed.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import os
import sys
import git
GIT_HOOKS_DIRPATH = os.path.abspath(
os.path.dirname(inspect.getfile(inspect.currentframe())))
REPOSITORY_ROOT_DIRPATH = os.path.dirname(os.path.dirname(GIT_HOOKS_DIRPATH))
sys.path.append(REPOSITORY_ROOT_DIRPATH)
from resources.utils import sync_docs
#===============================================================================
def get_synced_files_to_stage(staged_filepaths, filepaths_to_sync):
return [
filepaths_to_sync[staged_filepath]
for staged_filepath in staged_filepaths if staged_filepath in filepaths_to_sync]
def main():
repo = git.Repo(REPOSITORY_ROOT_DIRPATH)
staged_filepaths = [
os.path.normpath(os.path.join(REPOSITORY_ROOT_DIRPATH, diff.a_path))
for diff in repo.index.diff("HEAD")]
filepaths_to_sync = sync_docs.get_filepaths(sync_docs.PATHS_TO_PREPROCESS_FILEPATH)
filepaths_to_sync.update(sync_docs.get_filepaths(sync_docs.PATHS_TO_COPY_FILEPATH))
sync_docs.main()
synced_filepaths_to_stage = (
get_synced_files_to_stage(staged_filepaths, filepaths_to_sync))
repo.git.add(synced_filepaths_to_stage)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>resources: Add git hook to auto-stage docs files<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script provides automatic update and staging of documentation files when
"raw" documentation files have been changed.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import os
import sys
import git
GIT_HOOKS_DIRPATH = os.path.abspath(
os.path.dirname(inspect.getfile(inspect.currentframe())))
REPOSITORY_ROOT_DIRPATH = os.path.dirname(os.path.dirname(GIT_HOOKS_DIRPATH))
sys.path.append(REPOSITORY_ROOT_DIRPATH)
from resources.utils import sync_docs
#===============================================================================
def get_synced_files_to_stage(staged_filepaths, filepaths_to_sync):
return [
filepaths_to_sync[staged_filepath]
for staged_filepath in staged_filepaths if staged_filepath in filepaths_to_sync]
def main():
repo = git.Repo(REPOSITORY_ROOT_DIRPATH)
staged_filepaths = [
os.path.normpath(os.path.join(REPOSITORY_ROOT_DIRPATH, diff.a_path))
for diff in repo.index.diff("HEAD")]
filepaths_to_sync = sync_docs.get_filepaths(sync_docs.PATHS_TO_PREPROCESS_FILEPATH)
filepaths_to_sync.update(sync_docs.get_filepaths(sync_docs.PATHS_TO_COPY_FILEPATH))
sync_docs.main()
synced_filepaths_to_stage = (
get_synced_files_to_stage(staged_filepaths, filepaths_to_sync))
repo.git.add(synced_filepaths_to_stage)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.