commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
526560178f93894f19ed6d99557254eea78653ea
|
services/migrations/0002_add_initial_servicetypes.py
|
services/migrations/0002_add_initial_servicetypes.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
def add_servicetypes(apps, schema_editor):
ServiceType = apps.get_model('services', 'ServiceType')
for st in SERVICE_TYPES:
ServiceType.objects.get_or_create(
pk=st['pk'],
servicetype=st['fields']['servicetype']
)
def del_servicetypes(apps, schema_editor):
ServiceType = apps.get_model('services', 'ServiceType')
for st in SERVICE_TYPES:
ServiceType.objects.get(pk=st['pk'], servicetype=st['fields']['servicetype']).delete()
dependencies = [
('services', '0001_initial'),
]
operations = [
migrations.RunPython(add_servicetypes, del_servicetypes)
]
SERVICE_TYPES = [
{
"pk": 1,
"model": "services.servicetype",
"fields": {
"servicetype": "UNIX account"
}
},
{
"pk": 2,
"model": "services.servicetype",
"fields": {
"servicetype": "Email alias"
}
},
{
"pk": 3,
"model": "services.servicetype",
"fields": {
"servicetype": "WWW vhost"
}
},
{
"pk": 4,
"model": "services.servicetype",
"fields": {
"servicetype": "MySQL database"
}
},
{
"pk": 5,
"model": "services.servicetype",
"fields": {
"servicetype": "PostgreSQL database"
}
},
{
"pk": 6,
"model": "services.servicetype",
"fields": {
"servicetype": "DNS domain"
}
},
{
"pk": 7,
"model": "services.servicetype",
"fields": {
"servicetype": "IRC vhost"
}
},
{
"pk": 8,
"model": "services.servicetype",
"fields": {
"servicetype": "SVN repository"
}
},
{
"pk": 9,
"model": "services.servicetype",
"fields": {
"servicetype": "Mailbox account"
}
},
{
"pk": 10,
"model": "services.servicetype",
"fields": {
"servicetype": "Firewall port"
}
}
]
|
Add ServiceTypes as a data migration instead of fixtures
|
Add ServiceTypes as a data migration instead of fixtures
|
Python
|
mit
|
AriMartti/sikteeri,kapsiry/sikteeri,kapsiry/sikteeri,joneskoo/sikteeri,joneskoo/sikteeri,kapsiry/sikteeri,annttu/sikteeri,annttu/sikteeri,annttu/sikteeri,joneskoo/sikteeri,annttu/sikteeri,AriMartti/sikteeri,AriMartti/sikteeri,joneskoo/sikteeri,AriMartti/sikteeri,kapsiry/sikteeri
|
Add ServiceTypes as a data migration instead of fixtures
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
def add_servicetypes(apps, schema_editor):
ServiceType = apps.get_model('services', 'ServiceType')
for st in SERVICE_TYPES:
ServiceType.objects.get_or_create(
pk=st['pk'],
servicetype=st['fields']['servicetype']
)
def del_servicetypes(apps, schema_editor):
ServiceType = apps.get_model('services', 'ServiceType')
for st in SERVICE_TYPES:
ServiceType.objects.get(pk=st['pk'], servicetype=st['fields']['servicetype']).delete()
dependencies = [
('services', '0001_initial'),
]
operations = [
migrations.RunPython(add_servicetypes, del_servicetypes)
]
SERVICE_TYPES = [
{
"pk": 1,
"model": "services.servicetype",
"fields": {
"servicetype": "UNIX account"
}
},
{
"pk": 2,
"model": "services.servicetype",
"fields": {
"servicetype": "Email alias"
}
},
{
"pk": 3,
"model": "services.servicetype",
"fields": {
"servicetype": "WWW vhost"
}
},
{
"pk": 4,
"model": "services.servicetype",
"fields": {
"servicetype": "MySQL database"
}
},
{
"pk": 5,
"model": "services.servicetype",
"fields": {
"servicetype": "PostgreSQL database"
}
},
{
"pk": 6,
"model": "services.servicetype",
"fields": {
"servicetype": "DNS domain"
}
},
{
"pk": 7,
"model": "services.servicetype",
"fields": {
"servicetype": "IRC vhost"
}
},
{
"pk": 8,
"model": "services.servicetype",
"fields": {
"servicetype": "SVN repository"
}
},
{
"pk": 9,
"model": "services.servicetype",
"fields": {
"servicetype": "Mailbox account"
}
},
{
"pk": 10,
"model": "services.servicetype",
"fields": {
"servicetype": "Firewall port"
}
}
]
|
<commit_before><commit_msg>Add ServiceTypes as a data migration instead of fixtures<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
def add_servicetypes(apps, schema_editor):
ServiceType = apps.get_model('services', 'ServiceType')
for st in SERVICE_TYPES:
ServiceType.objects.get_or_create(
pk=st['pk'],
servicetype=st['fields']['servicetype']
)
def del_servicetypes(apps, schema_editor):
ServiceType = apps.get_model('services', 'ServiceType')
for st in SERVICE_TYPES:
ServiceType.objects.get(pk=st['pk'], servicetype=st['fields']['servicetype']).delete()
dependencies = [
('services', '0001_initial'),
]
operations = [
migrations.RunPython(add_servicetypes, del_servicetypes)
]
SERVICE_TYPES = [
{
"pk": 1,
"model": "services.servicetype",
"fields": {
"servicetype": "UNIX account"
}
},
{
"pk": 2,
"model": "services.servicetype",
"fields": {
"servicetype": "Email alias"
}
},
{
"pk": 3,
"model": "services.servicetype",
"fields": {
"servicetype": "WWW vhost"
}
},
{
"pk": 4,
"model": "services.servicetype",
"fields": {
"servicetype": "MySQL database"
}
},
{
"pk": 5,
"model": "services.servicetype",
"fields": {
"servicetype": "PostgreSQL database"
}
},
{
"pk": 6,
"model": "services.servicetype",
"fields": {
"servicetype": "DNS domain"
}
},
{
"pk": 7,
"model": "services.servicetype",
"fields": {
"servicetype": "IRC vhost"
}
},
{
"pk": 8,
"model": "services.servicetype",
"fields": {
"servicetype": "SVN repository"
}
},
{
"pk": 9,
"model": "services.servicetype",
"fields": {
"servicetype": "Mailbox account"
}
},
{
"pk": 10,
"model": "services.servicetype",
"fields": {
"servicetype": "Firewall port"
}
}
]
|
Add ServiceTypes as a data migration instead of fixtures# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
def add_servicetypes(apps, schema_editor):
ServiceType = apps.get_model('services', 'ServiceType')
for st in SERVICE_TYPES:
ServiceType.objects.get_or_create(
pk=st['pk'],
servicetype=st['fields']['servicetype']
)
def del_servicetypes(apps, schema_editor):
ServiceType = apps.get_model('services', 'ServiceType')
for st in SERVICE_TYPES:
ServiceType.objects.get(pk=st['pk'], servicetype=st['fields']['servicetype']).delete()
dependencies = [
('services', '0001_initial'),
]
operations = [
migrations.RunPython(add_servicetypes, del_servicetypes)
]
SERVICE_TYPES = [
{
"pk": 1,
"model": "services.servicetype",
"fields": {
"servicetype": "UNIX account"
}
},
{
"pk": 2,
"model": "services.servicetype",
"fields": {
"servicetype": "Email alias"
}
},
{
"pk": 3,
"model": "services.servicetype",
"fields": {
"servicetype": "WWW vhost"
}
},
{
"pk": 4,
"model": "services.servicetype",
"fields": {
"servicetype": "MySQL database"
}
},
{
"pk": 5,
"model": "services.servicetype",
"fields": {
"servicetype": "PostgreSQL database"
}
},
{
"pk": 6,
"model": "services.servicetype",
"fields": {
"servicetype": "DNS domain"
}
},
{
"pk": 7,
"model": "services.servicetype",
"fields": {
"servicetype": "IRC vhost"
}
},
{
"pk": 8,
"model": "services.servicetype",
"fields": {
"servicetype": "SVN repository"
}
},
{
"pk": 9,
"model": "services.servicetype",
"fields": {
"servicetype": "Mailbox account"
}
},
{
"pk": 10,
"model": "services.servicetype",
"fields": {
"servicetype": "Firewall port"
}
}
]
|
<commit_before><commit_msg>Add ServiceTypes as a data migration instead of fixtures<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
def add_servicetypes(apps, schema_editor):
ServiceType = apps.get_model('services', 'ServiceType')
for st in SERVICE_TYPES:
ServiceType.objects.get_or_create(
pk=st['pk'],
servicetype=st['fields']['servicetype']
)
def del_servicetypes(apps, schema_editor):
ServiceType = apps.get_model('services', 'ServiceType')
for st in SERVICE_TYPES:
ServiceType.objects.get(pk=st['pk'], servicetype=st['fields']['servicetype']).delete()
dependencies = [
('services', '0001_initial'),
]
operations = [
migrations.RunPython(add_servicetypes, del_servicetypes)
]
SERVICE_TYPES = [
{
"pk": 1,
"model": "services.servicetype",
"fields": {
"servicetype": "UNIX account"
}
},
{
"pk": 2,
"model": "services.servicetype",
"fields": {
"servicetype": "Email alias"
}
},
{
"pk": 3,
"model": "services.servicetype",
"fields": {
"servicetype": "WWW vhost"
}
},
{
"pk": 4,
"model": "services.servicetype",
"fields": {
"servicetype": "MySQL database"
}
},
{
"pk": 5,
"model": "services.servicetype",
"fields": {
"servicetype": "PostgreSQL database"
}
},
{
"pk": 6,
"model": "services.servicetype",
"fields": {
"servicetype": "DNS domain"
}
},
{
"pk": 7,
"model": "services.servicetype",
"fields": {
"servicetype": "IRC vhost"
}
},
{
"pk": 8,
"model": "services.servicetype",
"fields": {
"servicetype": "SVN repository"
}
},
{
"pk": 9,
"model": "services.servicetype",
"fields": {
"servicetype": "Mailbox account"
}
},
{
"pk": 10,
"model": "services.servicetype",
"fields": {
"servicetype": "Firewall port"
}
}
]
|
|
690dc62ae000d7608da72b2372828b1d91659e4f
|
test/test_functions.py
|
test/test_functions.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__author__ = "Yuji Ikeda"
import unittest
import numpy as np
from ph_unfolder.analysis.functions import lorentzian_unnormalized
class TestFunctions(unittest.TestCase):
def test_lorentzian_unnnormalized(self):
# norms = np.linspace(0.0, 10.0, 101)
norms = np.linspace(1.0, 1.0, 1)
widths = np.linspace(0.01, 5.0, 500)
# peak_positions = np.linspace(-2.0, 2.0, 401)
peak_positions = np.linspace(0.0, 0.0, 1)
xs = np.linspace(-500.0, 500.0, 100001)
dx = xs[1] - xs[0]
prec = 1e-2
for peak_position in peak_positions:
for width in widths:
for norm in norms:
ys = lorentzian_unnormalized(xs, peak_position, width, norm)
norm_integration = np.sum(ys) * dx
ratio = norm_integration / norm
print('{:12.3f}'.format(peak_position ), end='')
print('{:12.3f}'.format(width ), end='')
print('{:12.6f}'.format(norm ), end='')
print('{:12.6f}'.format(norm_integration), end='')
print('{:12.6f}'.format(ratio ), end='')
print()
if not np.isnan(ratio):
self.assertTrue(np.abs(ratio - 1.0) < prec)
if __name__ == "__main__":
unittest.main()
|
Add the test for functions.py
|
Add the test for functions.py
|
Python
|
mit
|
yuzie007/upho,yuzie007/ph_unfolder
|
Add the test for functions.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__author__ = "Yuji Ikeda"
import unittest
import numpy as np
from ph_unfolder.analysis.functions import lorentzian_unnormalized
class TestFunctions(unittest.TestCase):
def test_lorentzian_unnnormalized(self):
# norms = np.linspace(0.0, 10.0, 101)
norms = np.linspace(1.0, 1.0, 1)
widths = np.linspace(0.01, 5.0, 500)
# peak_positions = np.linspace(-2.0, 2.0, 401)
peak_positions = np.linspace(0.0, 0.0, 1)
xs = np.linspace(-500.0, 500.0, 100001)
dx = xs[1] - xs[0]
prec = 1e-2
for peak_position in peak_positions:
for width in widths:
for norm in norms:
ys = lorentzian_unnormalized(xs, peak_position, width, norm)
norm_integration = np.sum(ys) * dx
ratio = norm_integration / norm
print('{:12.3f}'.format(peak_position ), end='')
print('{:12.3f}'.format(width ), end='')
print('{:12.6f}'.format(norm ), end='')
print('{:12.6f}'.format(norm_integration), end='')
print('{:12.6f}'.format(ratio ), end='')
print()
if not np.isnan(ratio):
self.assertTrue(np.abs(ratio - 1.0) < prec)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add the test for functions.py<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__author__ = "Yuji Ikeda"
import unittest
import numpy as np
from ph_unfolder.analysis.functions import lorentzian_unnormalized
class TestFunctions(unittest.TestCase):
def test_lorentzian_unnnormalized(self):
# norms = np.linspace(0.0, 10.0, 101)
norms = np.linspace(1.0, 1.0, 1)
widths = np.linspace(0.01, 5.0, 500)
# peak_positions = np.linspace(-2.0, 2.0, 401)
peak_positions = np.linspace(0.0, 0.0, 1)
xs = np.linspace(-500.0, 500.0, 100001)
dx = xs[1] - xs[0]
prec = 1e-2
for peak_position in peak_positions:
for width in widths:
for norm in norms:
ys = lorentzian_unnormalized(xs, peak_position, width, norm)
norm_integration = np.sum(ys) * dx
ratio = norm_integration / norm
print('{:12.3f}'.format(peak_position ), end='')
print('{:12.3f}'.format(width ), end='')
print('{:12.6f}'.format(norm ), end='')
print('{:12.6f}'.format(norm_integration), end='')
print('{:12.6f}'.format(ratio ), end='')
print()
if not np.isnan(ratio):
self.assertTrue(np.abs(ratio - 1.0) < prec)
if __name__ == "__main__":
unittest.main()
|
Add the test for functions.py#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__author__ = "Yuji Ikeda"
import unittest
import numpy as np
from ph_unfolder.analysis.functions import lorentzian_unnormalized
class TestFunctions(unittest.TestCase):
def test_lorentzian_unnnormalized(self):
# norms = np.linspace(0.0, 10.0, 101)
norms = np.linspace(1.0, 1.0, 1)
widths = np.linspace(0.01, 5.0, 500)
# peak_positions = np.linspace(-2.0, 2.0, 401)
peak_positions = np.linspace(0.0, 0.0, 1)
xs = np.linspace(-500.0, 500.0, 100001)
dx = xs[1] - xs[0]
prec = 1e-2
for peak_position in peak_positions:
for width in widths:
for norm in norms:
ys = lorentzian_unnormalized(xs, peak_position, width, norm)
norm_integration = np.sum(ys) * dx
ratio = norm_integration / norm
print('{:12.3f}'.format(peak_position ), end='')
print('{:12.3f}'.format(width ), end='')
print('{:12.6f}'.format(norm ), end='')
print('{:12.6f}'.format(norm_integration), end='')
print('{:12.6f}'.format(ratio ), end='')
print()
if not np.isnan(ratio):
self.assertTrue(np.abs(ratio - 1.0) < prec)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add the test for functions.py<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__author__ = "Yuji Ikeda"
import unittest
import numpy as np
from ph_unfolder.analysis.functions import lorentzian_unnormalized
class TestFunctions(unittest.TestCase):
def test_lorentzian_unnnormalized(self):
# norms = np.linspace(0.0, 10.0, 101)
norms = np.linspace(1.0, 1.0, 1)
widths = np.linspace(0.01, 5.0, 500)
# peak_positions = np.linspace(-2.0, 2.0, 401)
peak_positions = np.linspace(0.0, 0.0, 1)
xs = np.linspace(-500.0, 500.0, 100001)
dx = xs[1] - xs[0]
prec = 1e-2
for peak_position in peak_positions:
for width in widths:
for norm in norms:
ys = lorentzian_unnormalized(xs, peak_position, width, norm)
norm_integration = np.sum(ys) * dx
ratio = norm_integration / norm
print('{:12.3f}'.format(peak_position ), end='')
print('{:12.3f}'.format(width ), end='')
print('{:12.6f}'.format(norm ), end='')
print('{:12.6f}'.format(norm_integration), end='')
print('{:12.6f}'.format(ratio ), end='')
print()
if not np.isnan(ratio):
self.assertTrue(np.abs(ratio - 1.0) < prec)
if __name__ == "__main__":
unittest.main()
|
|
0ac153e6e3b9432aac2bd5bbbe119480e5c1677c
|
tests/calculate_test.py
|
tests/calculate_test.py
|
from calculate import calculate
def test_calculate():
assert calculate('5+5') == 10
assert calculate('5*5') == 25
assert calculate('10/2') == 5
assert calculate('10-4') == 6
assert calculate('7/3') == 2
assert calculate('5-10') == -5
assert calculate('2+7*2') == 16
|
Add test cases for calculate
|
Add test cases for calculate
|
Python
|
mit
|
MichaelAquilina/Simple-Calculator
|
Add test cases for calculate
|
from calculate import calculate
def test_calculate():
assert calculate('5+5') == 10
assert calculate('5*5') == 25
assert calculate('10/2') == 5
assert calculate('10-4') == 6
assert calculate('7/3') == 2
assert calculate('5-10') == -5
assert calculate('2+7*2') == 16
|
<commit_before><commit_msg>Add test cases for calculate<commit_after>
|
from calculate import calculate
def test_calculate():
assert calculate('5+5') == 10
assert calculate('5*5') == 25
assert calculate('10/2') == 5
assert calculate('10-4') == 6
assert calculate('7/3') == 2
assert calculate('5-10') == -5
assert calculate('2+7*2') == 16
|
Add test cases for calculatefrom calculate import calculate
def test_calculate():
assert calculate('5+5') == 10
assert calculate('5*5') == 25
assert calculate('10/2') == 5
assert calculate('10-4') == 6
assert calculate('7/3') == 2
assert calculate('5-10') == -5
assert calculate('2+7*2') == 16
|
<commit_before><commit_msg>Add test cases for calculate<commit_after>from calculate import calculate
def test_calculate():
assert calculate('5+5') == 10
assert calculate('5*5') == 25
assert calculate('10/2') == 5
assert calculate('10-4') == 6
assert calculate('7/3') == 2
assert calculate('5-10') == -5
assert calculate('2+7*2') == 16
|
|
3e75626ee72dee59d2aec01b4dd4278cd7fed3fe
|
tests/test_alternate.py
|
tests/test_alternate.py
|
from dtest import *
from dtest.util import *
class TestAlternate(DTestCase):
alternate = None
def setUp(self):
assert_is_none(self.alternate)
self.alternate = False
def tearDown(self):
assert_false(self.alternate)
def test1(self):
assert_false(self.alternate)
@istest
def test2(self):
assert_true(self.alternate)
@test2.setUp
def alternateSetUp(self):
assert_is_none(self.alternate)
self.alternate = True
@test2.tearDown
def alternateTearDown(self):
assert_true(self.alternate)
|
Add tests to ensure the setUp and tearDown decorators work
|
Add tests to ensure the setUp and tearDown decorators work
|
Python
|
apache-2.0
|
klmitch/dtest,klmitch/dtest
|
Add tests to ensure the setUp and tearDown decorators work
|
from dtest import *
from dtest.util import *
class TestAlternate(DTestCase):
alternate = None
def setUp(self):
assert_is_none(self.alternate)
self.alternate = False
def tearDown(self):
assert_false(self.alternate)
def test1(self):
assert_false(self.alternate)
@istest
def test2(self):
assert_true(self.alternate)
@test2.setUp
def alternateSetUp(self):
assert_is_none(self.alternate)
self.alternate = True
@test2.tearDown
def alternateTearDown(self):
assert_true(self.alternate)
|
<commit_before><commit_msg>Add tests to ensure the setUp and tearDown decorators work<commit_after>
|
from dtest import *
from dtest.util import *
class TestAlternate(DTestCase):
alternate = None
def setUp(self):
assert_is_none(self.alternate)
self.alternate = False
def tearDown(self):
assert_false(self.alternate)
def test1(self):
assert_false(self.alternate)
@istest
def test2(self):
assert_true(self.alternate)
@test2.setUp
def alternateSetUp(self):
assert_is_none(self.alternate)
self.alternate = True
@test2.tearDown
def alternateTearDown(self):
assert_true(self.alternate)
|
Add tests to ensure the setUp and tearDown decorators workfrom dtest import *
from dtest.util import *
class TestAlternate(DTestCase):
alternate = None
def setUp(self):
assert_is_none(self.alternate)
self.alternate = False
def tearDown(self):
assert_false(self.alternate)
def test1(self):
assert_false(self.alternate)
@istest
def test2(self):
assert_true(self.alternate)
@test2.setUp
def alternateSetUp(self):
assert_is_none(self.alternate)
self.alternate = True
@test2.tearDown
def alternateTearDown(self):
assert_true(self.alternate)
|
<commit_before><commit_msg>Add tests to ensure the setUp and tearDown decorators work<commit_after>from dtest import *
from dtest.util import *
class TestAlternate(DTestCase):
alternate = None
def setUp(self):
assert_is_none(self.alternate)
self.alternate = False
def tearDown(self):
assert_false(self.alternate)
def test1(self):
assert_false(self.alternate)
@istest
def test2(self):
assert_true(self.alternate)
@test2.setUp
def alternateSetUp(self):
assert_is_none(self.alternate)
self.alternate = True
@test2.tearDown
def alternateTearDown(self):
assert_true(self.alternate)
|
|
60cfcca427337881eb611fb85456ed4dee104992
|
TP1/Sources/instance_info_script.py
|
TP1/Sources/instance_info_script.py
|
#!/usr/bin/env python
import subprocess
import argparse
# inspired by https://www.cyberciti.biz/faq/linux-ram-info-command/ and https://en.wikipedia.org/wiki/Hdparm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', dest='path', help='Path for the output file')
parser.add_argument('--interface', dest='interfaceName', help='Interface name for the network information')
options = parser.parse_args()
command_HD = "sudo hdparm -I /dev/sda" # get hard drive info
command_mem = "free -h" # get memory size
command_mem2 = "dmidecode --type 17" # get memory speed -- 17 is SMIBIOS code for 'Memory Device' type
command_CPU = "cat /proc/cpuinfo" # get CPU info
command_net = "ethtool " + options.interfaceName # get network info -- get interface name from 'ifconfig'
file = open(options.path, 'a')
subprocess.call("echo '#### CPU info #### \n'", shell=True, stdout=file)
subprocess.call(command_CPU, shell=True, stdout=file)
subprocess.call("echo '\n \n#### memory size #### \n'", shell=True, stdout=file)
subprocess.call(command_mem, shell=True, stdout=file)
subprocess.call("echo '\n \n#### memory info #### \n'", shell=True, stdout=file)
subprocess.call(command_mem2, shell=True, stdout=file)
subprocess.call("echo '\n \n#### disk info #### \n'", shell=True, stdout=file)
subprocess.call(command_HD, shell=True, stdout=file)
subprocess.call("echo '\n \n#### network info #### \n'", shell=True, stdout=file)
subprocess.call(command_net, shell=True, stdout=file)
file.close()
if __name__ == '__main__':
main()
|
Add script to get machine characteristics
|
Add script to get machine characteristics
|
Python
|
mit
|
PrincessMadMath/LOG8415-Advanced_Cloud,PrincessMadMath/LOG8415-Advanced_Cloud,PrincessMadMath/LOG8415-Advanced_Cloud
|
Add script to get machine characteristics
|
#!/usr/bin/env python
import subprocess
import argparse
# inspired by https://www.cyberciti.biz/faq/linux-ram-info-command/ and https://en.wikipedia.org/wiki/Hdparm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', dest='path', help='Path for the output file')
parser.add_argument('--interface', dest='interfaceName', help='Interface name for the network information')
options = parser.parse_args()
command_HD = "sudo hdparm -I /dev/sda" # get hard drive info
command_mem = "free -h" # get memory size
command_mem2 = "dmidecode --type 17" # get memory speed -- 17 is SMIBIOS code for 'Memory Device' type
command_CPU = "cat /proc/cpuinfo" # get CPU info
command_net = "ethtool " + options.interfaceName # get network info -- get interface name from 'ifconfig'
file = open(options.path, 'a')
subprocess.call("echo '#### CPU info #### \n'", shell=True, stdout=file)
subprocess.call(command_CPU, shell=True, stdout=file)
subprocess.call("echo '\n \n#### memory size #### \n'", shell=True, stdout=file)
subprocess.call(command_mem, shell=True, stdout=file)
subprocess.call("echo '\n \n#### memory info #### \n'", shell=True, stdout=file)
subprocess.call(command_mem2, shell=True, stdout=file)
subprocess.call("echo '\n \n#### disk info #### \n'", shell=True, stdout=file)
subprocess.call(command_HD, shell=True, stdout=file)
subprocess.call("echo '\n \n#### network info #### \n'", shell=True, stdout=file)
subprocess.call(command_net, shell=True, stdout=file)
file.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to get machine characteristics<commit_after>
|
#!/usr/bin/env python
import subprocess
import argparse
# inspired by https://www.cyberciti.biz/faq/linux-ram-info-command/ and https://en.wikipedia.org/wiki/Hdparm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', dest='path', help='Path for the output file')
parser.add_argument('--interface', dest='interfaceName', help='Interface name for the network information')
options = parser.parse_args()
command_HD = "sudo hdparm -I /dev/sda" # get hard drive info
command_mem = "free -h" # get memory size
command_mem2 = "dmidecode --type 17" # get memory speed -- 17 is SMIBIOS code for 'Memory Device' type
command_CPU = "cat /proc/cpuinfo" # get CPU info
command_net = "ethtool " + options.interfaceName # get network info -- get interface name from 'ifconfig'
file = open(options.path, 'a')
subprocess.call("echo '#### CPU info #### \n'", shell=True, stdout=file)
subprocess.call(command_CPU, shell=True, stdout=file)
subprocess.call("echo '\n \n#### memory size #### \n'", shell=True, stdout=file)
subprocess.call(command_mem, shell=True, stdout=file)
subprocess.call("echo '\n \n#### memory info #### \n'", shell=True, stdout=file)
subprocess.call(command_mem2, shell=True, stdout=file)
subprocess.call("echo '\n \n#### disk info #### \n'", shell=True, stdout=file)
subprocess.call(command_HD, shell=True, stdout=file)
subprocess.call("echo '\n \n#### network info #### \n'", shell=True, stdout=file)
subprocess.call(command_net, shell=True, stdout=file)
file.close()
if __name__ == '__main__':
main()
|
Add script to get machine characteristics#!/usr/bin/env python
import subprocess
import argparse
# inspired by https://www.cyberciti.biz/faq/linux-ram-info-command/ and https://en.wikipedia.org/wiki/Hdparm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', dest='path', help='Path for the output file')
parser.add_argument('--interface', dest='interfaceName', help='Interface name for the network information')
options = parser.parse_args()
command_HD = "sudo hdparm -I /dev/sda" # get hard drive info
command_mem = "free -h" # get memory size
command_mem2 = "dmidecode --type 17" # get memory speed -- 17 is SMIBIOS code for 'Memory Device' type
command_CPU = "cat /proc/cpuinfo" # get CPU info
command_net = "ethtool " + options.interfaceName # get network info -- get interface name from 'ifconfig'
file = open(options.path, 'a')
subprocess.call("echo '#### CPU info #### \n'", shell=True, stdout=file)
subprocess.call(command_CPU, shell=True, stdout=file)
subprocess.call("echo '\n \n#### memory size #### \n'", shell=True, stdout=file)
subprocess.call(command_mem, shell=True, stdout=file)
subprocess.call("echo '\n \n#### memory info #### \n'", shell=True, stdout=file)
subprocess.call(command_mem2, shell=True, stdout=file)
subprocess.call("echo '\n \n#### disk info #### \n'", shell=True, stdout=file)
subprocess.call(command_HD, shell=True, stdout=file)
subprocess.call("echo '\n \n#### network info #### \n'", shell=True, stdout=file)
subprocess.call(command_net, shell=True, stdout=file)
file.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to get machine characteristics<commit_after>#!/usr/bin/env python
import subprocess
import argparse
# inspired by https://www.cyberciti.biz/faq/linux-ram-info-command/ and https://en.wikipedia.org/wiki/Hdparm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', dest='path', help='Path for the output file')
parser.add_argument('--interface', dest='interfaceName', help='Interface name for the network information')
options = parser.parse_args()
command_HD = "sudo hdparm -I /dev/sda" # get hard drive info
command_mem = "free -h" # get memory size
command_mem2 = "dmidecode --type 17" # get memory speed -- 17 is SMIBIOS code for 'Memory Device' type
command_CPU = "cat /proc/cpuinfo" # get CPU info
command_net = "ethtool " + options.interfaceName # get network info -- get interface name from 'ifconfig'
file = open(options.path, 'a')
subprocess.call("echo '#### CPU info #### \n'", shell=True, stdout=file)
subprocess.call(command_CPU, shell=True, stdout=file)
subprocess.call("echo '\n \n#### memory size #### \n'", shell=True, stdout=file)
subprocess.call(command_mem, shell=True, stdout=file)
subprocess.call("echo '\n \n#### memory info #### \n'", shell=True, stdout=file)
subprocess.call(command_mem2, shell=True, stdout=file)
subprocess.call("echo '\n \n#### disk info #### \n'", shell=True, stdout=file)
subprocess.call(command_HD, shell=True, stdout=file)
subprocess.call("echo '\n \n#### network info #### \n'", shell=True, stdout=file)
subprocess.call(command_net, shell=True, stdout=file)
file.close()
if __name__ == '__main__':
main()
|
|
9dfe9061f6294918c0c52594617db595bd0baf17
|
ancillary_data/IRAM30m_CO21/14B-088/make_14B-088_HI_smoothed_output.py
|
ancillary_data/IRAM30m_CO21/14B-088/make_14B-088_HI_smoothed_output.py
|
'''
Make a version of the CO(2-1) data at the resolution of the 14B-088 HI data
'''
from spectral_cube import SpectralCube, Projection
from spectral_cube.cube_utils import largest_beam
from astropy.io import fits
import os
from cube_analysis import run_pipeline
from paths import fourteenB_HI_file_dict, iram_co21_data_path
vla_cube = SpectralCube.read(fourteenB_HI_file_dict["Cube"])
co21_cube = SpectralCube.read(iram_co21_data_path("m33.co21_iram.fits"))
# Smooth to the largest beam (they differ by tiny fractions anyway)
large_beam = largest_beam(vla_cube.beams)
# smoothed_cube = co21_cube.convolve_to(large_beam)
# Save to its own folder
if not os.path.exists(iram_co21_data_path("14B-088", no_check=True)):
os.mkdir(iram_co21_data_path("14B-088", no_check=True))
smooth_name = iram_co21_data_path("14B-088/m33.co21_iram.14B-088_HI_smoothed.fits", no_check=True)
# smoothed_cube.write(smooth_name)
# Now smooth the noise map
co21_noise = Projection.from_hdu(fits.open(iram_co21_data_path("m33.rms.fits"))[0])
# co21_noise_smoothed = co21_noise.convolve_to(large_beam)
smooth_noise_name = iram_co21_data_path("14B-088/m33.rms.14B-088_HI_smoothed.fits", no_check=True)
# co21_noise_smoothed.write(smooth_noise_name)
# Find a signal mask and derive moment arrays
run_pipeline(smooth_name, iram_co21_data_path("14B-088"),
masking_kwargs={"method": "ppv_dilation",
"save_cube": True,
"noise_map": smooth_noise_name,
"min_sig": 3,
"max_sig": 5,
"min_pix": 27,
},
moment_kwargs={"num_cores": 6,
"verbose": True})
|
Add making a signal mask and moment arrays for the CO smoothed cube
|
Add making a signal mask and moment arrays for the CO smoothed cube
|
Python
|
mit
|
e-koch/VLA_Lband,e-koch/VLA_Lband
|
Add making a signal mask and moment arrays for the CO smoothed cube
|
'''
Make a version of the CO(2-1) data at the resolution of the 14B-088 HI data
'''
from spectral_cube import SpectralCube, Projection
from spectral_cube.cube_utils import largest_beam
from astropy.io import fits
import os
from cube_analysis import run_pipeline
from paths import fourteenB_HI_file_dict, iram_co21_data_path
vla_cube = SpectralCube.read(fourteenB_HI_file_dict["Cube"])
co21_cube = SpectralCube.read(iram_co21_data_path("m33.co21_iram.fits"))
# Smooth to the largest beam (they differ by tiny fractions anyway)
large_beam = largest_beam(vla_cube.beams)
# smoothed_cube = co21_cube.convolve_to(large_beam)
# Save to its own folder
if not os.path.exists(iram_co21_data_path("14B-088", no_check=True)):
os.mkdir(iram_co21_data_path("14B-088", no_check=True))
smooth_name = iram_co21_data_path("14B-088/m33.co21_iram.14B-088_HI_smoothed.fits", no_check=True)
# smoothed_cube.write(smooth_name)
# Now smooth the noise map
co21_noise = Projection.from_hdu(fits.open(iram_co21_data_path("m33.rms.fits"))[0])
# co21_noise_smoothed = co21_noise.convolve_to(large_beam)
smooth_noise_name = iram_co21_data_path("14B-088/m33.rms.14B-088_HI_smoothed.fits", no_check=True)
# co21_noise_smoothed.write(smooth_noise_name)
# Find a signal mask and derive moment arrays
run_pipeline(smooth_name, iram_co21_data_path("14B-088"),
masking_kwargs={"method": "ppv_dilation",
"save_cube": True,
"noise_map": smooth_noise_name,
"min_sig": 3,
"max_sig": 5,
"min_pix": 27,
},
moment_kwargs={"num_cores": 6,
"verbose": True})
|
<commit_before><commit_msg>Add making a signal mask and moment arrays for the CO smoothed cube<commit_after>
|
'''
Make a version of the CO(2-1) data at the resolution of the 14B-088 HI data
'''
from spectral_cube import SpectralCube, Projection
from spectral_cube.cube_utils import largest_beam
from astropy.io import fits
import os
from cube_analysis import run_pipeline
from paths import fourteenB_HI_file_dict, iram_co21_data_path
vla_cube = SpectralCube.read(fourteenB_HI_file_dict["Cube"])
co21_cube = SpectralCube.read(iram_co21_data_path("m33.co21_iram.fits"))
# Smooth to the largest beam (they differ by tiny fractions anyway)
large_beam = largest_beam(vla_cube.beams)
# smoothed_cube = co21_cube.convolve_to(large_beam)
# Save to its own folder
if not os.path.exists(iram_co21_data_path("14B-088", no_check=True)):
os.mkdir(iram_co21_data_path("14B-088", no_check=True))
smooth_name = iram_co21_data_path("14B-088/m33.co21_iram.14B-088_HI_smoothed.fits", no_check=True)
# smoothed_cube.write(smooth_name)
# Now smooth the noise map
co21_noise = Projection.from_hdu(fits.open(iram_co21_data_path("m33.rms.fits"))[0])
# co21_noise_smoothed = co21_noise.convolve_to(large_beam)
smooth_noise_name = iram_co21_data_path("14B-088/m33.rms.14B-088_HI_smoothed.fits", no_check=True)
# co21_noise_smoothed.write(smooth_noise_name)
# Find a signal mask and derive moment arrays
run_pipeline(smooth_name, iram_co21_data_path("14B-088"),
masking_kwargs={"method": "ppv_dilation",
"save_cube": True,
"noise_map": smooth_noise_name,
"min_sig": 3,
"max_sig": 5,
"min_pix": 27,
},
moment_kwargs={"num_cores": 6,
"verbose": True})
|
Add making a signal mask and moment arrays for the CO smoothed cube
'''
Make a version of the CO(2-1) data at the resolution of the 14B-088 HI data
'''
from spectral_cube import SpectralCube, Projection
from spectral_cube.cube_utils import largest_beam
from astropy.io import fits
import os
from cube_analysis import run_pipeline
from paths import fourteenB_HI_file_dict, iram_co21_data_path
vla_cube = SpectralCube.read(fourteenB_HI_file_dict["Cube"])
co21_cube = SpectralCube.read(iram_co21_data_path("m33.co21_iram.fits"))
# Smooth to the largest beam (they differ by tiny fractions anyway)
large_beam = largest_beam(vla_cube.beams)
# smoothed_cube = co21_cube.convolve_to(large_beam)
# Save to its own folder
if not os.path.exists(iram_co21_data_path("14B-088", no_check=True)):
os.mkdir(iram_co21_data_path("14B-088", no_check=True))
smooth_name = iram_co21_data_path("14B-088/m33.co21_iram.14B-088_HI_smoothed.fits", no_check=True)
# smoothed_cube.write(smooth_name)
# Now smooth the noise map
co21_noise = Projection.from_hdu(fits.open(iram_co21_data_path("m33.rms.fits"))[0])
# co21_noise_smoothed = co21_noise.convolve_to(large_beam)
smooth_noise_name = iram_co21_data_path("14B-088/m33.rms.14B-088_HI_smoothed.fits", no_check=True)
# co21_noise_smoothed.write(smooth_noise_name)
# Find a signal mask and derive moment arrays
run_pipeline(smooth_name, iram_co21_data_path("14B-088"),
masking_kwargs={"method": "ppv_dilation",
"save_cube": True,
"noise_map": smooth_noise_name,
"min_sig": 3,
"max_sig": 5,
"min_pix": 27,
},
moment_kwargs={"num_cores": 6,
"verbose": True})
|
<commit_before><commit_msg>Add making a signal mask and moment arrays for the CO smoothed cube<commit_after>
'''
Make a version of the CO(2-1) data at the resolution of the 14B-088 HI data
'''
from spectral_cube import SpectralCube, Projection
from spectral_cube.cube_utils import largest_beam
from astropy.io import fits
import os
from cube_analysis import run_pipeline
from paths import fourteenB_HI_file_dict, iram_co21_data_path
vla_cube = SpectralCube.read(fourteenB_HI_file_dict["Cube"])
co21_cube = SpectralCube.read(iram_co21_data_path("m33.co21_iram.fits"))
# Smooth to the largest beam (they differ by tiny fractions anyway)
large_beam = largest_beam(vla_cube.beams)
# smoothed_cube = co21_cube.convolve_to(large_beam)
# Save to its own folder
if not os.path.exists(iram_co21_data_path("14B-088", no_check=True)):
os.mkdir(iram_co21_data_path("14B-088", no_check=True))
smooth_name = iram_co21_data_path("14B-088/m33.co21_iram.14B-088_HI_smoothed.fits", no_check=True)
# smoothed_cube.write(smooth_name)
# Now smooth the noise map
co21_noise = Projection.from_hdu(fits.open(iram_co21_data_path("m33.rms.fits"))[0])
# co21_noise_smoothed = co21_noise.convolve_to(large_beam)
smooth_noise_name = iram_co21_data_path("14B-088/m33.rms.14B-088_HI_smoothed.fits", no_check=True)
# co21_noise_smoothed.write(smooth_noise_name)
# Find a signal mask and derive moment arrays
run_pipeline(smooth_name, iram_co21_data_path("14B-088"),
masking_kwargs={"method": "ppv_dilation",
"save_cube": True,
"noise_map": smooth_noise_name,
"min_sig": 3,
"max_sig": 5,
"min_pix": 27,
},
moment_kwargs={"num_cores": 6,
"verbose": True})
|
|
e9607bbacb1e2775bbda6010b51a076a57b5ad09
|
make_body_part_mapping.py
|
make_body_part_mapping.py
|
"""Make a mapping from body part words to categories.
Make mapping <body part word> -> [historic words] based on Inger Leemans'
clustering.
Usage: python make_body_part_mapping.py
Requires files body_part_clusters_renaissance.csv,
body_part_clusters_classisism.csv, and body_part_clusters_enlightenment.csv to
be in the current directory.
Writes body_part_mapping.json to the current directory.
"""
import codecs
import json
def csv2mapping(file_name):
mapping = {}
with codecs.open(file_name, 'rb', 'utf-8') as f:
for line in f.readlines():
parts = line.split(';')
label = parts[0].lower()
if parts[2] != '':
if not mapping.get(label):
mapping[label] = []
for entry in parts[2:]:
if entry and entry != '\n':
words = entry.split('\t')
mapping[label].append(words[0])
return mapping
def merge_mappings(m1, m2):
for k, v in m2.iteritems():
if not m1.get(k):
m1[k] = v
else:
m1[k] = m1[k] + v
return m1
mapping_r = csv2mapping('body_part_clusters_renaissance.csv')
mapping_c = csv2mapping('body_part_clusters_classisism.csv')
mapping_e = csv2mapping('body_part_clusters_enlightenment.csv')
mapping = merge_mappings(mapping_r, mapping_c)
mapping = merge_mappings(mapping, mapping_e)
for k, v in mapping.iteritems():
mapping[k] = list(set(mapping[k]))
with codecs.open('body_part_mapping.json', 'wb', 'utf-8') as f:
json.dump(mapping, f, indent=2)
|
Add script to save a mapping for body parts to categories
|
Add script to save a mapping for body parts to categories
The script creates a json file containing categories->(Dutch) words for
body parts. The mapping is based on Inger Leemans' division.
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to save a mapping for body parts to categories
The script creates a json file containing categories->(Dutch) words for
body parts. The mapping is based on Inger Leemans' division.
|
"""Make a mapping from body part words to categories.
Make mapping <body part word> -> [historic words] based on Inger Leemans'
clustering.
Usage: python make_body_part_mapping.py
Requires files body_part_clusters_renaissance.csv,
body_part_clusters_classisism.csv, and body_part_clusters_enlightenment.csv to
be in the current directory.
Writes body_part_mapping.json to the current directory.
"""
import codecs
import json
def csv2mapping(file_name):
mapping = {}
with codecs.open(file_name, 'rb', 'utf-8') as f:
for line in f.readlines():
parts = line.split(';')
label = parts[0].lower()
if parts[2] != '':
if not mapping.get(label):
mapping[label] = []
for entry in parts[2:]:
if entry and entry != '\n':
words = entry.split('\t')
mapping[label].append(words[0])
return mapping
def merge_mappings(m1, m2):
for k, v in m2.iteritems():
if not m1.get(k):
m1[k] = v
else:
m1[k] = m1[k] + v
return m1
mapping_r = csv2mapping('body_part_clusters_renaissance.csv')
mapping_c = csv2mapping('body_part_clusters_classisism.csv')
mapping_e = csv2mapping('body_part_clusters_enlightenment.csv')
mapping = merge_mappings(mapping_r, mapping_c)
mapping = merge_mappings(mapping, mapping_e)
for k, v in mapping.iteritems():
mapping[k] = list(set(mapping[k]))
with codecs.open('body_part_mapping.json', 'wb', 'utf-8') as f:
json.dump(mapping, f, indent=2)
|
<commit_before><commit_msg>Add script to save a mapping for body parts to categories
The script creates a json file containing categories->(Dutch) words for
body parts. The mapping is based on Inger Leemans' division.<commit_after>
|
"""Make a mapping from body part words to categories.
Make mapping <body part word> -> [historic words] based on Inger Leemans'
clustering.
Usage: python make_body_part_mapping.py
Requires files body_part_clusters_renaissance.csv,
body_part_clusters_classisism.csv, and body_part_clusters_enlightenment.csv to
be in the current directory.
Writes body_part_mapping.json to the current directory.
"""
import codecs
import json
def csv2mapping(file_name):
mapping = {}
with codecs.open(file_name, 'rb', 'utf-8') as f:
for line in f.readlines():
parts = line.split(';')
label = parts[0].lower()
if parts[2] != '':
if not mapping.get(label):
mapping[label] = []
for entry in parts[2:]:
if entry and entry != '\n':
words = entry.split('\t')
mapping[label].append(words[0])
return mapping
def merge_mappings(m1, m2):
for k, v in m2.iteritems():
if not m1.get(k):
m1[k] = v
else:
m1[k] = m1[k] + v
return m1
mapping_r = csv2mapping('body_part_clusters_renaissance.csv')
mapping_c = csv2mapping('body_part_clusters_classisism.csv')
mapping_e = csv2mapping('body_part_clusters_enlightenment.csv')
mapping = merge_mappings(mapping_r, mapping_c)
mapping = merge_mappings(mapping, mapping_e)
for k, v in mapping.iteritems():
mapping[k] = list(set(mapping[k]))
with codecs.open('body_part_mapping.json', 'wb', 'utf-8') as f:
json.dump(mapping, f, indent=2)
|
Add script to save a mapping for body parts to categories
The script creates a json file containing categories->(Dutch) words for
body parts. The mapping is based on Inger Leemans' division."""Make a mapping from body part words to categories.
Make mapping <body part word> -> [historic words] based on Inger Leemans'
clustering.
Usage: python make_body_part_mapping.py
Requires files body_part_clusters_renaissance.csv,
body_part_clusters_classisism.csv, and body_part_clusters_enlightenment.csv to
be in the current directory.
Writes body_part_mapping.json to the current directory.
"""
import codecs
import json
def csv2mapping(file_name):
mapping = {}
with codecs.open(file_name, 'rb', 'utf-8') as f:
for line in f.readlines():
parts = line.split(';')
label = parts[0].lower()
if parts[2] != '':
if not mapping.get(label):
mapping[label] = []
for entry in parts[2:]:
if entry and entry != '\n':
words = entry.split('\t')
mapping[label].append(words[0])
return mapping
def merge_mappings(m1, m2):
for k, v in m2.iteritems():
if not m1.get(k):
m1[k] = v
else:
m1[k] = m1[k] + v
return m1
mapping_r = csv2mapping('body_part_clusters_renaissance.csv')
mapping_c = csv2mapping('body_part_clusters_classisism.csv')
mapping_e = csv2mapping('body_part_clusters_enlightenment.csv')
mapping = merge_mappings(mapping_r, mapping_c)
mapping = merge_mappings(mapping, mapping_e)
for k, v in mapping.iteritems():
mapping[k] = list(set(mapping[k]))
with codecs.open('body_part_mapping.json', 'wb', 'utf-8') as f:
json.dump(mapping, f, indent=2)
|
<commit_before><commit_msg>Add script to save a mapping for body parts to categories
The script creates a json file containing categories->(Dutch) words for
body parts. The mapping is based on Inger Leemans' division.<commit_after>"""Make a mapping from body part words to categories.
Make mapping <body part word> -> [historic words] based on Inger Leemans'
clustering.
Usage: python make_body_part_mapping.py
Requires files body_part_clusters_renaissance.csv,
body_part_clusters_classisism.csv, and body_part_clusters_enlightenment.csv to
be in the current directory.
Writes body_part_mapping.json to the current directory.
"""
import codecs
import json
def csv2mapping(file_name):
mapping = {}
with codecs.open(file_name, 'rb', 'utf-8') as f:
for line in f.readlines():
parts = line.split(';')
label = parts[0].lower()
if parts[2] != '':
if not mapping.get(label):
mapping[label] = []
for entry in parts[2:]:
if entry and entry != '\n':
words = entry.split('\t')
mapping[label].append(words[0])
return mapping
def merge_mappings(m1, m2):
for k, v in m2.iteritems():
if not m1.get(k):
m1[k] = v
else:
m1[k] = m1[k] + v
return m1
mapping_r = csv2mapping('body_part_clusters_renaissance.csv')
mapping_c = csv2mapping('body_part_clusters_classisism.csv')
mapping_e = csv2mapping('body_part_clusters_enlightenment.csv')
mapping = merge_mappings(mapping_r, mapping_c)
mapping = merge_mappings(mapping, mapping_e)
for k, v in mapping.iteritems():
mapping[k] = list(set(mapping[k]))
with codecs.open('body_part_mapping.json', 'wb', 'utf-8') as f:
json.dump(mapping, f, indent=2)
|
|
b03805dbbae36743f57797026beeb6def96436a4
|
apps/explorer/tests/test_apps.py
|
apps/explorer/tests/test_apps.py
|
from django.apps import apps
from django.test import TestCase
from ..apps import ExplorerConfig
class ExplorerConfigTestCase(TestCase):
def test_config(self):
expected = 'explorer'
self.assertEqual(ExplorerConfig.name, expected)
expected = 'apps.explorer'
self.assertEqual(apps.get_app_config('explorer').name, expected)
|
Add tests for the explorer app
|
Add tests for the explorer app
|
Python
|
bsd-3-clause
|
Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel
|
Add tests for the explorer app
|
from django.apps import apps
from django.test import TestCase
from ..apps import ExplorerConfig
class ExplorerConfigTestCase(TestCase):
def test_config(self):
expected = 'explorer'
self.assertEqual(ExplorerConfig.name, expected)
expected = 'apps.explorer'
self.assertEqual(apps.get_app_config('explorer').name, expected)
|
<commit_before><commit_msg>Add tests for the explorer app<commit_after>
|
from django.apps import apps
from django.test import TestCase
from ..apps import ExplorerConfig
class ExplorerConfigTestCase(TestCase):
def test_config(self):
expected = 'explorer'
self.assertEqual(ExplorerConfig.name, expected)
expected = 'apps.explorer'
self.assertEqual(apps.get_app_config('explorer').name, expected)
|
Add tests for the explorer appfrom django.apps import apps
from django.test import TestCase
from ..apps import ExplorerConfig
class ExplorerConfigTestCase(TestCase):
def test_config(self):
expected = 'explorer'
self.assertEqual(ExplorerConfig.name, expected)
expected = 'apps.explorer'
self.assertEqual(apps.get_app_config('explorer').name, expected)
|
<commit_before><commit_msg>Add tests for the explorer app<commit_after>from django.apps import apps
from django.test import TestCase
from ..apps import ExplorerConfig
class ExplorerConfigTestCase(TestCase):
def test_config(self):
expected = 'explorer'
self.assertEqual(ExplorerConfig.name, expected)
expected = 'apps.explorer'
self.assertEqual(apps.get_app_config('explorer').name, expected)
|
|
45bdb3cce8e8417362a19ac9427b06c68910aa1f
|
profile_xf28id1/startup/80-areadetector.py
|
profile_xf28id1/startup/80-areadetector.py
|
from ophyd.controls.area_detector import (AreaDetectorFileStoreHDF5,
AreaDetectorFileStoreTIFF)
shctl1 = EpicsSignal('XF:28IDC-ES:1{Det:PE1}cam1:ShutterMode', name='shctl1')
shctl2 = EpicsSignal('XF:28IDC-ES:1{Det:PE2}cam1:ShutterMode', name='shctl2')
pe1 = AreaDetectorFileStoreTIFF('XF:28IDC-ES:1{Det:PE1}', name='pe1',
stats=[],
ioc_file_path = 'X:/pe1_data',
file_path = '/home/xf28id1/pe1_data',
shutter=shctl1,
shutter_val=(1,0))
pe2 = AreaDetectorFileStoreTIFF('XF:28IDC-ES:1{Det:PE2}', name='pe2',
stats=[],
ioc_file_path = 'Z:/pe2_data',
file_path = '/home/xf28id1/pe2_data',
shutter=shctl2,
shutter_val=(1,0))
|
Add PE detectors pe1, pe2
|
Add PE detectors pe1, pe2
|
Python
|
bsd-2-clause
|
NSLS-II-XPD/ipython_ophyd,NSLS-II-XPD/ipython_ophyd,pavoljuhas/ipython_ophyd,pavoljuhas/ipython_ophyd
|
Add PE detectors pe1, pe2
|
from ophyd.controls.area_detector import (AreaDetectorFileStoreHDF5,
AreaDetectorFileStoreTIFF)
shctl1 = EpicsSignal('XF:28IDC-ES:1{Det:PE1}cam1:ShutterMode', name='shctl1')
shctl2 = EpicsSignal('XF:28IDC-ES:1{Det:PE2}cam1:ShutterMode', name='shctl2')
pe1 = AreaDetectorFileStoreTIFF('XF:28IDC-ES:1{Det:PE1}', name='pe1',
stats=[],
ioc_file_path = 'X:/pe1_data',
file_path = '/home/xf28id1/pe1_data',
shutter=shctl1,
shutter_val=(1,0))
pe2 = AreaDetectorFileStoreTIFF('XF:28IDC-ES:1{Det:PE2}', name='pe2',
stats=[],
ioc_file_path = 'Z:/pe2_data',
file_path = '/home/xf28id1/pe2_data',
shutter=shctl2,
shutter_val=(1,0))
|
<commit_before><commit_msg>Add PE detectors pe1, pe2<commit_after>
|
from ophyd.controls.area_detector import (AreaDetectorFileStoreHDF5,
AreaDetectorFileStoreTIFF)
shctl1 = EpicsSignal('XF:28IDC-ES:1{Det:PE1}cam1:ShutterMode', name='shctl1')
shctl2 = EpicsSignal('XF:28IDC-ES:1{Det:PE2}cam1:ShutterMode', name='shctl2')
pe1 = AreaDetectorFileStoreTIFF('XF:28IDC-ES:1{Det:PE1}', name='pe1',
stats=[],
ioc_file_path = 'X:/pe1_data',
file_path = '/home/xf28id1/pe1_data',
shutter=shctl1,
shutter_val=(1,0))
pe2 = AreaDetectorFileStoreTIFF('XF:28IDC-ES:1{Det:PE2}', name='pe2',
stats=[],
ioc_file_path = 'Z:/pe2_data',
file_path = '/home/xf28id1/pe2_data',
shutter=shctl2,
shutter_val=(1,0))
|
Add PE detectors pe1, pe2from ophyd.controls.area_detector import (AreaDetectorFileStoreHDF5,
AreaDetectorFileStoreTIFF)
shctl1 = EpicsSignal('XF:28IDC-ES:1{Det:PE1}cam1:ShutterMode', name='shctl1')
shctl2 = EpicsSignal('XF:28IDC-ES:1{Det:PE2}cam1:ShutterMode', name='shctl2')
pe1 = AreaDetectorFileStoreTIFF('XF:28IDC-ES:1{Det:PE1}', name='pe1',
stats=[],
ioc_file_path = 'X:/pe1_data',
file_path = '/home/xf28id1/pe1_data',
shutter=shctl1,
shutter_val=(1,0))
pe2 = AreaDetectorFileStoreTIFF('XF:28IDC-ES:1{Det:PE2}', name='pe2',
stats=[],
ioc_file_path = 'Z:/pe2_data',
file_path = '/home/xf28id1/pe2_data',
shutter=shctl2,
shutter_val=(1,0))
|
<commit_before><commit_msg>Add PE detectors pe1, pe2<commit_after>from ophyd.controls.area_detector import (AreaDetectorFileStoreHDF5,
AreaDetectorFileStoreTIFF)
shctl1 = EpicsSignal('XF:28IDC-ES:1{Det:PE1}cam1:ShutterMode', name='shctl1')
shctl2 = EpicsSignal('XF:28IDC-ES:1{Det:PE2}cam1:ShutterMode', name='shctl2')
pe1 = AreaDetectorFileStoreTIFF('XF:28IDC-ES:1{Det:PE1}', name='pe1',
stats=[],
ioc_file_path = 'X:/pe1_data',
file_path = '/home/xf28id1/pe1_data',
shutter=shctl1,
shutter_val=(1,0))
pe2 = AreaDetectorFileStoreTIFF('XF:28IDC-ES:1{Det:PE2}', name='pe2',
stats=[],
ioc_file_path = 'Z:/pe2_data',
file_path = '/home/xf28id1/pe2_data',
shutter=shctl2,
shutter_val=(1,0))
|
|
03791f4768866b199f0ef840a0ac64849def85e3
|
scripts/ingestors/awos/parse_monthly_maint.py
|
scripts/ingestors/awos/parse_monthly_maint.py
|
"""
Parse the monthly maint file I get from the DOT
id | integer | not null default nextval('iem_calibrati
on_id_seq'::regclass)
station | character varying(10) |
portfolio | character varying(10) |
valid | timestamp with time zone |
parameter | character varying(10) |
adjustment | real |
final | real |
comments | text |
"""
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
import sys
import re
import mx.DateTime
import psycopg2
PORTFOLIO = psycopg2.connect("dbname=portfolio host=meteor.geol.iastate.edu user=nobody")
pcursor = PORTFOLIO.cursor()
CALINFO = re.compile(".*Calibrated? T/DP: AWOS ([0-9\-\.]+)/([0-9\-\.]+) Std ([0-9\-\.]+)/([0-9\-\.]+)")
data = sys.stdin.read()
for line in data.split("\n"):
tokens = line.split(",")
if len(tokens) != 6:
continue
faa = tokens[0]
if len(faa) != 3:
continue
date = mx.DateTime.strptime(tokens[1], '%d-%b-%y')
parts = re.findall(CALINFO, tokens[3])
if len(parts) == 0:
print bcolors.OKGREEN + tokens[3] + bcolors.ENDC
continue
sql = """INSERT into iem_calibration(station, portfolio, valid, parameter,
adjustment, final, comments) values (%s, 'iaawos', %s, %s, %s, %s, %s)"""
tempadj = float(parts[0][2]) - float(parts[0][0])
args = (faa, date.strftime("%Y-%m-%d"), 'tmpf', tempadj,
parts[0][2], tokens[3].replace('"', ''))
pcursor.execute(sql, args)
dewpadj = float(parts[0][3]) - float(parts[0][1])
args = (faa, date.strftime("%Y-%m-%d"), 'dwpf', float(parts[0][3]) - float(parts[0][1]),
parts[0][3], tokens[3].replace('"', ''))
pcursor.execute(sql, args)
print '--> %s [%s] TMPF: %s (%s) DWPF: %s (%s)' % (faa, tokens[1],
parts[0][2], tempadj,
parts[0][3], dewpadj)
if len(sys.argv) == 1:
print 'WARNING: Disabled, call with arbitrary argument to enable'
else:
pcursor.close()
PORTFOLIO.commit()
PORTFOLIO.close()
|
Add tool to parse AWOS maint records provided monthly
|
Add tool to parse AWOS maint records provided monthly
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add tool to parse AWOS maint records provided monthly
|
"""
Parse the monthly maint file I get from the DOT
id | integer | not null default nextval('iem_calibrati
on_id_seq'::regclass)
station | character varying(10) |
portfolio | character varying(10) |
valid | timestamp with time zone |
parameter | character varying(10) |
adjustment | real |
final | real |
comments | text |
"""
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
import sys
import re
import mx.DateTime
import psycopg2
PORTFOLIO = psycopg2.connect("dbname=portfolio host=meteor.geol.iastate.edu user=nobody")
pcursor = PORTFOLIO.cursor()
CALINFO = re.compile(".*Calibrated? T/DP: AWOS ([0-9\-\.]+)/([0-9\-\.]+) Std ([0-9\-\.]+)/([0-9\-\.]+)")
data = sys.stdin.read()
for line in data.split("\n"):
tokens = line.split(",")
if len(tokens) != 6:
continue
faa = tokens[0]
if len(faa) != 3:
continue
date = mx.DateTime.strptime(tokens[1], '%d-%b-%y')
parts = re.findall(CALINFO, tokens[3])
if len(parts) == 0:
print bcolors.OKGREEN + tokens[3] + bcolors.ENDC
continue
sql = """INSERT into iem_calibration(station, portfolio, valid, parameter,
adjustment, final, comments) values (%s, 'iaawos', %s, %s, %s, %s, %s)"""
tempadj = float(parts[0][2]) - float(parts[0][0])
args = (faa, date.strftime("%Y-%m-%d"), 'tmpf', tempadj,
parts[0][2], tokens[3].replace('"', ''))
pcursor.execute(sql, args)
dewpadj = float(parts[0][3]) - float(parts[0][1])
args = (faa, date.strftime("%Y-%m-%d"), 'dwpf', float(parts[0][3]) - float(parts[0][1]),
parts[0][3], tokens[3].replace('"', ''))
pcursor.execute(sql, args)
print '--> %s [%s] TMPF: %s (%s) DWPF: %s (%s)' % (faa, tokens[1],
parts[0][2], tempadj,
parts[0][3], dewpadj)
if len(sys.argv) == 1:
print 'WARNING: Disabled, call with arbitrary argument to enable'
else:
pcursor.close()
PORTFOLIO.commit()
PORTFOLIO.close()
|
<commit_before><commit_msg>Add tool to parse AWOS maint records provided monthly<commit_after>
|
"""
Parse the monthly maint file I get from the DOT
id | integer | not null default nextval('iem_calibrati
on_id_seq'::regclass)
station | character varying(10) |
portfolio | character varying(10) |
valid | timestamp with time zone |
parameter | character varying(10) |
adjustment | real |
final | real |
comments | text |
"""
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
import sys
import re
import mx.DateTime
import psycopg2
PORTFOLIO = psycopg2.connect("dbname=portfolio host=meteor.geol.iastate.edu user=nobody")
pcursor = PORTFOLIO.cursor()
CALINFO = re.compile(".*Calibrated? T/DP: AWOS ([0-9\-\.]+)/([0-9\-\.]+) Std ([0-9\-\.]+)/([0-9\-\.]+)")
data = sys.stdin.read()
for line in data.split("\n"):
tokens = line.split(",")
if len(tokens) != 6:
continue
faa = tokens[0]
if len(faa) != 3:
continue
date = mx.DateTime.strptime(tokens[1], '%d-%b-%y')
parts = re.findall(CALINFO, tokens[3])
if len(parts) == 0:
print bcolors.OKGREEN + tokens[3] + bcolors.ENDC
continue
sql = """INSERT into iem_calibration(station, portfolio, valid, parameter,
adjustment, final, comments) values (%s, 'iaawos', %s, %s, %s, %s, %s)"""
tempadj = float(parts[0][2]) - float(parts[0][0])
args = (faa, date.strftime("%Y-%m-%d"), 'tmpf', tempadj,
parts[0][2], tokens[3].replace('"', ''))
pcursor.execute(sql, args)
dewpadj = float(parts[0][3]) - float(parts[0][1])
args = (faa, date.strftime("%Y-%m-%d"), 'dwpf', float(parts[0][3]) - float(parts[0][1]),
parts[0][3], tokens[3].replace('"', ''))
pcursor.execute(sql, args)
print '--> %s [%s] TMPF: %s (%s) DWPF: %s (%s)' % (faa, tokens[1],
parts[0][2], tempadj,
parts[0][3], dewpadj)
if len(sys.argv) == 1:
print 'WARNING: Disabled, call with arbitrary argument to enable'
else:
pcursor.close()
PORTFOLIO.commit()
PORTFOLIO.close()
|
Add tool to parse AWOS maint records provided monthly"""
Parse the monthly maint file I get from the DOT
id | integer | not null default nextval('iem_calibrati
on_id_seq'::regclass)
station | character varying(10) |
portfolio | character varying(10) |
valid | timestamp with time zone |
parameter | character varying(10) |
adjustment | real |
final | real |
comments | text |
"""
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
import sys
import re
import mx.DateTime
import psycopg2
PORTFOLIO = psycopg2.connect("dbname=portfolio host=meteor.geol.iastate.edu user=nobody")
pcursor = PORTFOLIO.cursor()
CALINFO = re.compile(".*Calibrated? T/DP: AWOS ([0-9\-\.]+)/([0-9\-\.]+) Std ([0-9\-\.]+)/([0-9\-\.]+)")
data = sys.stdin.read()
for line in data.split("\n"):
tokens = line.split(",")
if len(tokens) != 6:
continue
faa = tokens[0]
if len(faa) != 3:
continue
date = mx.DateTime.strptime(tokens[1], '%d-%b-%y')
parts = re.findall(CALINFO, tokens[3])
if len(parts) == 0:
print bcolors.OKGREEN + tokens[3] + bcolors.ENDC
continue
sql = """INSERT into iem_calibration(station, portfolio, valid, parameter,
adjustment, final, comments) values (%s, 'iaawos', %s, %s, %s, %s, %s)"""
tempadj = float(parts[0][2]) - float(parts[0][0])
args = (faa, date.strftime("%Y-%m-%d"), 'tmpf', tempadj,
parts[0][2], tokens[3].replace('"', ''))
pcursor.execute(sql, args)
dewpadj = float(parts[0][3]) - float(parts[0][1])
args = (faa, date.strftime("%Y-%m-%d"), 'dwpf', float(parts[0][3]) - float(parts[0][1]),
parts[0][3], tokens[3].replace('"', ''))
pcursor.execute(sql, args)
print '--> %s [%s] TMPF: %s (%s) DWPF: %s (%s)' % (faa, tokens[1],
parts[0][2], tempadj,
parts[0][3], dewpadj)
if len(sys.argv) == 1:
print 'WARNING: Disabled, call with arbitrary argument to enable'
else:
pcursor.close()
PORTFOLIO.commit()
PORTFOLIO.close()
|
<commit_before><commit_msg>Add tool to parse AWOS maint records provided monthly<commit_after>"""
Parse the monthly maint file I get from the DOT
id | integer | not null default nextval('iem_calibrati
on_id_seq'::regclass)
station | character varying(10) |
portfolio | character varying(10) |
valid | timestamp with time zone |
parameter | character varying(10) |
adjustment | real |
final | real |
comments | text |
"""
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
import sys
import re
import mx.DateTime
import psycopg2
PORTFOLIO = psycopg2.connect("dbname=portfolio host=meteor.geol.iastate.edu user=nobody")
pcursor = PORTFOLIO.cursor()
CALINFO = re.compile(".*Calibrated? T/DP: AWOS ([0-9\-\.]+)/([0-9\-\.]+) Std ([0-9\-\.]+)/([0-9\-\.]+)")
data = sys.stdin.read()
for line in data.split("\n"):
tokens = line.split(",")
if len(tokens) != 6:
continue
faa = tokens[0]
if len(faa) != 3:
continue
date = mx.DateTime.strptime(tokens[1], '%d-%b-%y')
parts = re.findall(CALINFO, tokens[3])
if len(parts) == 0:
print bcolors.OKGREEN + tokens[3] + bcolors.ENDC
continue
sql = """INSERT into iem_calibration(station, portfolio, valid, parameter,
adjustment, final, comments) values (%s, 'iaawos', %s, %s, %s, %s, %s)"""
tempadj = float(parts[0][2]) - float(parts[0][0])
args = (faa, date.strftime("%Y-%m-%d"), 'tmpf', tempadj,
parts[0][2], tokens[3].replace('"', ''))
pcursor.execute(sql, args)
dewpadj = float(parts[0][3]) - float(parts[0][1])
args = (faa, date.strftime("%Y-%m-%d"), 'dwpf', float(parts[0][3]) - float(parts[0][1]),
parts[0][3], tokens[3].replace('"', ''))
pcursor.execute(sql, args)
print '--> %s [%s] TMPF: %s (%s) DWPF: %s (%s)' % (faa, tokens[1],
parts[0][2], tempadj,
parts[0][3], dewpadj)
if len(sys.argv) == 1:
print 'WARNING: Disabled, call with arbitrary argument to enable'
else:
pcursor.close()
PORTFOLIO.commit()
PORTFOLIO.close()
|
|
b5468138da2947d582e37da8bd5e3d4aa5838eac
|
taggert/imagemarker.py
|
taggert/imagemarker.py
|
from gi.repository import Champlain
class ImageMarker(Champlain.Point):
def __init__(self, treeiter, filename, lat, lon, clicked):
Champlain.Point.__init__(self)
self.filename = filename
self.treeiter = treeiter
self.set_location(lat, lon)
self.set_selectable(True)
#self.set_draggable(True)
self.set_property('reactive', True)
self.connect('button-press', clicked)
|
Add ImageMarker class, overriding Champlain.Point
|
Add ImageMarker class, overriding Champlain.Point
|
Python
|
apache-2.0
|
tinuzz/taggert
|
Add ImageMarker class, overriding Champlain.Point
|
from gi.repository import Champlain
class ImageMarker(Champlain.Point):
def __init__(self, treeiter, filename, lat, lon, clicked):
Champlain.Point.__init__(self)
self.filename = filename
self.treeiter = treeiter
self.set_location(lat, lon)
self.set_selectable(True)
#self.set_draggable(True)
self.set_property('reactive', True)
self.connect('button-press', clicked)
|
<commit_before><commit_msg>Add ImageMarker class, overriding Champlain.Point<commit_after>
|
from gi.repository import Champlain
class ImageMarker(Champlain.Point):
def __init__(self, treeiter, filename, lat, lon, clicked):
Champlain.Point.__init__(self)
self.filename = filename
self.treeiter = treeiter
self.set_location(lat, lon)
self.set_selectable(True)
#self.set_draggable(True)
self.set_property('reactive', True)
self.connect('button-press', clicked)
|
Add ImageMarker class, overriding Champlain.Pointfrom gi.repository import Champlain
class ImageMarker(Champlain.Point):
def __init__(self, treeiter, filename, lat, lon, clicked):
Champlain.Point.__init__(self)
self.filename = filename
self.treeiter = treeiter
self.set_location(lat, lon)
self.set_selectable(True)
#self.set_draggable(True)
self.set_property('reactive', True)
self.connect('button-press', clicked)
|
<commit_before><commit_msg>Add ImageMarker class, overriding Champlain.Point<commit_after>from gi.repository import Champlain
class ImageMarker(Champlain.Point):
def __init__(self, treeiter, filename, lat, lon, clicked):
Champlain.Point.__init__(self)
self.filename = filename
self.treeiter = treeiter
self.set_location(lat, lon)
self.set_selectable(True)
#self.set_draggable(True)
self.set_property('reactive', True)
self.connect('button-press', clicked)
|
|
3c707036b8c7d0f85e13f7d8294051cf6f04819f
|
dump_data.py
|
dump_data.py
|
from pmg_backend import db
from pmg_backend.models import *
import json
import os
bills = Bill.query.filter_by(is_deleted=False).all()
print len(bills)
out = []
for bill in bills:
tmp_bill = {
'name': bill.name,
'code': bill.code,
'bill_type': bill.bill_type,
'year': bill.year,
'number': bill.number,
'status': bill.status,
'entries': [],
}
for entry in bill.entries:
if not entry.is_deleted:
tmp_bill['entries'].append({
'date': str(entry.date),
'type': entry.type,
'title': entry.title,
'description': entry.description,
'location': entry.location,
'agent': {
'type': entry.agent.type if entry.agent else None,
'name': entry.agent.name.strip() if entry.agent else None,
},
'url': entry.url,
})
out.append(tmp_bill)
# print json.dumps(tmp_entry, indent=4)
# print ""
with open("billtracker_dump.txt", "w") as text_file:
text_file.write(json.dumps(out, indent=4))
|
Add script for dumping database to txt.
|
Add script for dumping database to txt.
|
Python
|
apache-2.0
|
Code4SA/pmgbilltracker,Code4SA/pmgbilltracker
|
Add script for dumping database to txt.
|
from pmg_backend import db
from pmg_backend.models import *
import json
import os
bills = Bill.query.filter_by(is_deleted=False).all()
print len(bills)
out = []
for bill in bills:
tmp_bill = {
'name': bill.name,
'code': bill.code,
'bill_type': bill.bill_type,
'year': bill.year,
'number': bill.number,
'status': bill.status,
'entries': [],
}
for entry in bill.entries:
if not entry.is_deleted:
tmp_bill['entries'].append({
'date': str(entry.date),
'type': entry.type,
'title': entry.title,
'description': entry.description,
'location': entry.location,
'agent': {
'type': entry.agent.type if entry.agent else None,
'name': entry.agent.name.strip() if entry.agent else None,
},
'url': entry.url,
})
out.append(tmp_bill)
# print json.dumps(tmp_entry, indent=4)
# print ""
with open("billtracker_dump.txt", "w") as text_file:
text_file.write(json.dumps(out, indent=4))
|
<commit_before><commit_msg>Add script for dumping database to txt.<commit_after>
|
from pmg_backend import db
from pmg_backend.models import *
import json
import os
bills = Bill.query.filter_by(is_deleted=False).all()
print len(bills)
out = []
for bill in bills:
tmp_bill = {
'name': bill.name,
'code': bill.code,
'bill_type': bill.bill_type,
'year': bill.year,
'number': bill.number,
'status': bill.status,
'entries': [],
}
for entry in bill.entries:
if not entry.is_deleted:
tmp_bill['entries'].append({
'date': str(entry.date),
'type': entry.type,
'title': entry.title,
'description': entry.description,
'location': entry.location,
'agent': {
'type': entry.agent.type if entry.agent else None,
'name': entry.agent.name.strip() if entry.agent else None,
},
'url': entry.url,
})
out.append(tmp_bill)
# print json.dumps(tmp_entry, indent=4)
# print ""
with open("billtracker_dump.txt", "w") as text_file:
text_file.write(json.dumps(out, indent=4))
|
Add script for dumping database to txt.from pmg_backend import db
from pmg_backend.models import *
import json
import os
bills = Bill.query.filter_by(is_deleted=False).all()
print len(bills)
out = []
for bill in bills:
tmp_bill = {
'name': bill.name,
'code': bill.code,
'bill_type': bill.bill_type,
'year': bill.year,
'number': bill.number,
'status': bill.status,
'entries': [],
}
for entry in bill.entries:
if not entry.is_deleted:
tmp_bill['entries'].append({
'date': str(entry.date),
'type': entry.type,
'title': entry.title,
'description': entry.description,
'location': entry.location,
'agent': {
'type': entry.agent.type if entry.agent else None,
'name': entry.agent.name.strip() if entry.agent else None,
},
'url': entry.url,
})
out.append(tmp_bill)
# print json.dumps(tmp_entry, indent=4)
# print ""
with open("billtracker_dump.txt", "w") as text_file:
text_file.write(json.dumps(out, indent=4))
|
<commit_before><commit_msg>Add script for dumping database to txt.<commit_after>from pmg_backend import db
from pmg_backend.models import *
import json
import os
bills = Bill.query.filter_by(is_deleted=False).all()
print len(bills)
out = []
for bill in bills:
tmp_bill = {
'name': bill.name,
'code': bill.code,
'bill_type': bill.bill_type,
'year': bill.year,
'number': bill.number,
'status': bill.status,
'entries': [],
}
for entry in bill.entries:
if not entry.is_deleted:
tmp_bill['entries'].append({
'date': str(entry.date),
'type': entry.type,
'title': entry.title,
'description': entry.description,
'location': entry.location,
'agent': {
'type': entry.agent.type if entry.agent else None,
'name': entry.agent.name.strip() if entry.agent else None,
},
'url': entry.url,
})
out.append(tmp_bill)
# print json.dumps(tmp_entry, indent=4)
# print ""
with open("billtracker_dump.txt", "w") as text_file:
text_file.write(json.dumps(out, indent=4))
|
|
dbd1708c562c698dd6ea53fb8276ceaf9820045b
|
test_project/test_app/tests/test_models.py
|
test_project/test_app/tests/test_models.py
|
from django.contrib.gis.geos import Point
from django.test import TestCase
from cities import models
class SlugModelTest(object):
"""
Common tests for SlugModel subclasses.
"""
def instantiate(self):
"""
Implement this to return a valid instance of the model under test.
"""
raise NotImplementedError
def test_save(self):
instance = self.instantiate()
instance.save()
def test_save_force_insert(self):
"""
Regression test: save() with force_insert=True should work.
"""
instance = self.instantiate()
instance.save(force_insert=True)
class ContinentTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.Continent()
class CountryTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.Country(
population=0,
)
class RegionTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.Region(
country=country,
)
class SubregionTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
region = models.Region(
country=country,
)
region.save()
return models.Subregion(
region=region,
)
class CityTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.City(
country=country,
location=Point(0, 0),
population=0,
)
class DistrictTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
city = models.City(
country=country,
location=Point(0, 0),
population=0,
)
city.save()
return models.District(
location=Point(0, 0),
population=0,
city=city,
)
class AlternativeNameTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.AlternativeName()
class PostalCodeTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.PostalCode(
location=Point(0, 0),
country=country,
)
|
Add regression tests for saving with force_insert
|
Add regression tests for saving with force_insert
|
Python
|
mit
|
coderholic/django-cities,coderholic/django-cities,coderholic/django-cities
|
Add regression tests for saving with force_insert
|
from django.contrib.gis.geos import Point
from django.test import TestCase
from cities import models
class SlugModelTest(object):
"""
Common tests for SlugModel subclasses.
"""
def instantiate(self):
"""
Implement this to return a valid instance of the model under test.
"""
raise NotImplementedError
def test_save(self):
instance = self.instantiate()
instance.save()
def test_save_force_insert(self):
"""
Regression test: save() with force_insert=True should work.
"""
instance = self.instantiate()
instance.save(force_insert=True)
class ContinentTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.Continent()
class CountryTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.Country(
population=0,
)
class RegionTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.Region(
country=country,
)
class SubregionTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
region = models.Region(
country=country,
)
region.save()
return models.Subregion(
region=region,
)
class CityTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.City(
country=country,
location=Point(0, 0),
population=0,
)
class DistrictTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
city = models.City(
country=country,
location=Point(0, 0),
population=0,
)
city.save()
return models.District(
location=Point(0, 0),
population=0,
city=city,
)
class AlternativeNameTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.AlternativeName()
class PostalCodeTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.PostalCode(
location=Point(0, 0),
country=country,
)
|
<commit_before><commit_msg>Add regression tests for saving with force_insert<commit_after>
|
from django.contrib.gis.geos import Point
from django.test import TestCase
from cities import models
class SlugModelTest(object):
"""
Common tests for SlugModel subclasses.
"""
def instantiate(self):
"""
Implement this to return a valid instance of the model under test.
"""
raise NotImplementedError
def test_save(self):
instance = self.instantiate()
instance.save()
def test_save_force_insert(self):
"""
Regression test: save() with force_insert=True should work.
"""
instance = self.instantiate()
instance.save(force_insert=True)
class ContinentTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.Continent()
class CountryTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.Country(
population=0,
)
class RegionTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.Region(
country=country,
)
class SubregionTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
region = models.Region(
country=country,
)
region.save()
return models.Subregion(
region=region,
)
class CityTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.City(
country=country,
location=Point(0, 0),
population=0,
)
class DistrictTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
city = models.City(
country=country,
location=Point(0, 0),
population=0,
)
city.save()
return models.District(
location=Point(0, 0),
population=0,
city=city,
)
class AlternativeNameTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.AlternativeName()
class PostalCodeTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.PostalCode(
location=Point(0, 0),
country=country,
)
|
Add regression tests for saving with force_insertfrom django.contrib.gis.geos import Point
from django.test import TestCase
from cities import models
class SlugModelTest(object):
"""
Common tests for SlugModel subclasses.
"""
def instantiate(self):
"""
Implement this to return a valid instance of the model under test.
"""
raise NotImplementedError
def test_save(self):
instance = self.instantiate()
instance.save()
def test_save_force_insert(self):
"""
Regression test: save() with force_insert=True should work.
"""
instance = self.instantiate()
instance.save(force_insert=True)
class ContinentTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.Continent()
class CountryTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.Country(
population=0,
)
class RegionTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.Region(
country=country,
)
class SubregionTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
region = models.Region(
country=country,
)
region.save()
return models.Subregion(
region=region,
)
class CityTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.City(
country=country,
location=Point(0, 0),
population=0,
)
class DistrictTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
city = models.City(
country=country,
location=Point(0, 0),
population=0,
)
city.save()
return models.District(
location=Point(0, 0),
population=0,
city=city,
)
class AlternativeNameTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.AlternativeName()
class PostalCodeTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.PostalCode(
location=Point(0, 0),
country=country,
)
|
<commit_before><commit_msg>Add regression tests for saving with force_insert<commit_after>from django.contrib.gis.geos import Point
from django.test import TestCase
from cities import models
class SlugModelTest(object):
"""
Common tests for SlugModel subclasses.
"""
def instantiate(self):
"""
Implement this to return a valid instance of the model under test.
"""
raise NotImplementedError
def test_save(self):
instance = self.instantiate()
instance.save()
def test_save_force_insert(self):
"""
Regression test: save() with force_insert=True should work.
"""
instance = self.instantiate()
instance.save(force_insert=True)
class ContinentTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.Continent()
class CountryTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.Country(
population=0,
)
class RegionTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.Region(
country=country,
)
class SubregionTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
region = models.Region(
country=country,
)
region.save()
return models.Subregion(
region=region,
)
class CityTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.City(
country=country,
location=Point(0, 0),
population=0,
)
class DistrictTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
city = models.City(
country=country,
location=Point(0, 0),
population=0,
)
city.save()
return models.District(
location=Point(0, 0),
population=0,
city=city,
)
class AlternativeNameTestCase(SlugModelTest, TestCase):
def instantiate(self):
return models.AlternativeName()
class PostalCodeTestCase(SlugModelTest, TestCase):
def instantiate(self):
country = models.Country(
population=0
)
country.save()
return models.PostalCode(
location=Point(0, 0),
country=country,
)
|
|
3906f5465ac8420a86c9dc918abc61b20886718b
|
test.py
|
test.py
|
__author__ = 'adam'
import pv
import serial
import sys
pv.debug()
pv.debug_color()
port = serial.Serial('/dev/tty.usbserial')
#port.open()
from pv import cms
inv = cms.Inverter(port)
inv.reset()
sn = inv.discover()
if sn is None:
print "Inverter is not connected."
sys.exit(1)
ok = inv.register(sn) # Associates the inverter and assigns default address
if not ok:
print "Inverter registration failed."
sys.exit(1)
print inv.version()
param_layout = inv.param_layout()
parameters = inv.parameters(param_layout)
for field in parameters:
print "%-10s: %s" % field
status_layout = inv.status_layout()
status = inv.status(status_layout)
for field in status:
print "%-10s: %s" % field
port.close()
|
Test script based on README file
|
Test script based on README file
|
Python
|
mit
|
blebo/pv
|
Test script based on README file
|
__author__ = 'adam'
import pv
import serial
import sys
pv.debug()
pv.debug_color()
port = serial.Serial('/dev/tty.usbserial')
#port.open()
from pv import cms
inv = cms.Inverter(port)
inv.reset()
sn = inv.discover()
if sn is None:
print "Inverter is not connected."
sys.exit(1)
ok = inv.register(sn) # Associates the inverter and assigns default address
if not ok:
print "Inverter registration failed."
sys.exit(1)
print inv.version()
param_layout = inv.param_layout()
parameters = inv.parameters(param_layout)
for field in parameters:
print "%-10s: %s" % field
status_layout = inv.status_layout()
status = inv.status(status_layout)
for field in status:
print "%-10s: %s" % field
port.close()
|
<commit_before><commit_msg>Test script based on README file<commit_after>
|
__author__ = 'adam'
import pv
import serial
import sys
pv.debug()
pv.debug_color()
port = serial.Serial('/dev/tty.usbserial')
#port.open()
from pv import cms
inv = cms.Inverter(port)
inv.reset()
sn = inv.discover()
if sn is None:
print "Inverter is not connected."
sys.exit(1)
ok = inv.register(sn) # Associates the inverter and assigns default address
if not ok:
print "Inverter registration failed."
sys.exit(1)
print inv.version()
param_layout = inv.param_layout()
parameters = inv.parameters(param_layout)
for field in parameters:
print "%-10s: %s" % field
status_layout = inv.status_layout()
status = inv.status(status_layout)
for field in status:
print "%-10s: %s" % field
port.close()
|
Test script based on README file__author__ = 'adam'
import pv
import serial
import sys
pv.debug()
pv.debug_color()
port = serial.Serial('/dev/tty.usbserial')
#port.open()
from pv import cms
inv = cms.Inverter(port)
inv.reset()
sn = inv.discover()
if sn is None:
print "Inverter is not connected."
sys.exit(1)
ok = inv.register(sn) # Associates the inverter and assigns default address
if not ok:
print "Inverter registration failed."
sys.exit(1)
print inv.version()
param_layout = inv.param_layout()
parameters = inv.parameters(param_layout)
for field in parameters:
print "%-10s: %s" % field
status_layout = inv.status_layout()
status = inv.status(status_layout)
for field in status:
print "%-10s: %s" % field
port.close()
|
<commit_before><commit_msg>Test script based on README file<commit_after>__author__ = 'adam'
import pv
import serial
import sys
pv.debug()
pv.debug_color()
port = serial.Serial('/dev/tty.usbserial')
#port.open()
from pv import cms
inv = cms.Inverter(port)
inv.reset()
sn = inv.discover()
if sn is None:
print "Inverter is not connected."
sys.exit(1)
ok = inv.register(sn) # Associates the inverter and assigns default address
if not ok:
print "Inverter registration failed."
sys.exit(1)
print inv.version()
param_layout = inv.param_layout()
parameters = inv.parameters(param_layout)
for field in parameters:
print "%-10s: %s" % field
status_layout = inv.status_layout()
status = inv.status(status_layout)
for field in status:
print "%-10s: %s" % field
port.close()
|
|
9616e572537fd469a5fd448287fc58e558217f3c
|
Lib/test/test_getargs.py
|
Lib/test/test_getargs.py
|
"""Test the internal getargs.c implementation
PyArg_ParseTuple() is defined here.
The test here is not intended to test all of the module, just the
single case that failed between 2.1 and 2.2a2.
"""
# marshal.loads() uses PyArg_ParseTuple(args, "s#:loads")
# The s code will cause a Unicode conversion to occur. This test
# verify that the error is propagated properly from the C code back to
# Python.
# XXX If the encoding succeeds using the current default encoding,
# this test will fail because it does not test the right part of the
# PyArg_ParseTuple() implementation.
import marshal
try:
marshal.loads(u"\222")
except UnicodeError:
pass
|
Test the failed-unicode-decoding bug in PyArg_ParseTuple().
|
Test the failed-unicode-decoding bug in PyArg_ParseTuple().
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Test the failed-unicode-decoding bug in PyArg_ParseTuple().
|
"""Test the internal getargs.c implementation
PyArg_ParseTuple() is defined here.
The test here is not intended to test all of the module, just the
single case that failed between 2.1 and 2.2a2.
"""
# marshal.loads() uses PyArg_ParseTuple(args, "s#:loads")
# The s code will cause a Unicode conversion to occur. This test
# verify that the error is propagated properly from the C code back to
# Python.
# XXX If the encoding succeeds using the current default encoding,
# this test will fail because it does not test the right part of the
# PyArg_ParseTuple() implementation.
import marshal
try:
marshal.loads(u"\222")
except UnicodeError:
pass
|
<commit_before><commit_msg>Test the failed-unicode-decoding bug in PyArg_ParseTuple().<commit_after>
|
"""Test the internal getargs.c implementation
PyArg_ParseTuple() is defined here.
The test here is not intended to test all of the module, just the
single case that failed between 2.1 and 2.2a2.
"""
# marshal.loads() uses PyArg_ParseTuple(args, "s#:loads")
# The s code will cause a Unicode conversion to occur. This test
# verify that the error is propagated properly from the C code back to
# Python.
# XXX If the encoding succeeds using the current default encoding,
# this test will fail because it does not test the right part of the
# PyArg_ParseTuple() implementation.
import marshal
try:
marshal.loads(u"\222")
except UnicodeError:
pass
|
Test the failed-unicode-decoding bug in PyArg_ParseTuple()."""Test the internal getargs.c implementation
PyArg_ParseTuple() is defined here.
The test here is not intended to test all of the module, just the
single case that failed between 2.1 and 2.2a2.
"""
# marshal.loads() uses PyArg_ParseTuple(args, "s#:loads")
# The s code will cause a Unicode conversion to occur. This test
# verify that the error is propagated properly from the C code back to
# Python.
# XXX If the encoding succeeds using the current default encoding,
# this test will fail because it does not test the right part of the
# PyArg_ParseTuple() implementation.
import marshal
try:
marshal.loads(u"\222")
except UnicodeError:
pass
|
<commit_before><commit_msg>Test the failed-unicode-decoding bug in PyArg_ParseTuple().<commit_after>"""Test the internal getargs.c implementation
PyArg_ParseTuple() is defined here.
The test here is not intended to test all of the module, just the
single case that failed between 2.1 and 2.2a2.
"""
# marshal.loads() uses PyArg_ParseTuple(args, "s#:loads")
# The s code will cause a Unicode conversion to occur. This test
# verify that the error is propagated properly from the C code back to
# Python.
# XXX If the encoding succeeds using the current default encoding,
# this test will fail because it does not test the right part of the
# PyArg_ParseTuple() implementation.
import marshal
try:
marshal.loads(u"\222")
except UnicodeError:
pass
|
|
97de5c8c4ff9516bfa0b9daedd74aee708470433
|
numba/cuda/tests/cudapy/test_multithreads.py
|
numba/cuda/tests/cudapy/test_multithreads.py
|
from numba import cuda
import numpy as np
from numba import unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim
try:
from concurrent.futures import ThreadPoolExecutor, as_completed
except ImportError:
has_thread_pool = False
else:
has_thread_pool = True
@skip_on_cudasim('disabled for cudasim')
class TestMultiGPUContext(unittest.TestCase):
@unittest.skipIf(not has_thread_pool, "no concurrent.futures")
def test_concurrent_compiling(self):
@cuda.jit
def foo(x):
x[0] += 1
def use_foo(x):
foo(x)
return x
arrays = [np.arange(10) for i in range(10)]
expected = np.arange(10)
expected[0] += 1
with ThreadPoolExecutor(max_workers=4) as e:
for ary in e.map(use_foo, arrays):
np.testing.assert_equal(ary, expected)
if __name__ == '__main__':
unittest.main()
|
Add test for concurrent compilation of cuda.jit kernels
|
Add test for concurrent compilation of cuda.jit kernels
|
Python
|
bsd-2-clause
|
stonebig/numba,stefanseefeld/numba,jriehl/numba,IntelLabs/numba,cpcloud/numba,stefanseefeld/numba,stuartarchibald/numba,jriehl/numba,gmarkall/numba,sklam/numba,stonebig/numba,cpcloud/numba,numba/numba,IntelLabs/numba,cpcloud/numba,seibert/numba,sklam/numba,gmarkall/numba,numba/numba,IntelLabs/numba,IntelLabs/numba,sklam/numba,stonebig/numba,stonebig/numba,gmarkall/numba,jriehl/numba,IntelLabs/numba,stefanseefeld/numba,numba/numba,jriehl/numba,stuartarchibald/numba,stuartarchibald/numba,sklam/numba,jriehl/numba,seibert/numba,gmarkall/numba,sklam/numba,stefanseefeld/numba,numba/numba,stuartarchibald/numba,seibert/numba,stuartarchibald/numba,seibert/numba,gmarkall/numba,stefanseefeld/numba,seibert/numba,cpcloud/numba,numba/numba,stonebig/numba,cpcloud/numba
|
Add test for concurrent compilation of cuda.jit kernels
|
from numba import cuda
import numpy as np
from numba import unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim
try:
from concurrent.futures import ThreadPoolExecutor, as_completed
except ImportError:
has_thread_pool = False
else:
has_thread_pool = True
@skip_on_cudasim('disabled for cudasim')
class TestMultiGPUContext(unittest.TestCase):
@unittest.skipIf(not has_thread_pool, "no concurrent.futures")
def test_concurrent_compiling(self):
@cuda.jit
def foo(x):
x[0] += 1
def use_foo(x):
foo(x)
return x
arrays = [np.arange(10) for i in range(10)]
expected = np.arange(10)
expected[0] += 1
with ThreadPoolExecutor(max_workers=4) as e:
for ary in e.map(use_foo, arrays):
np.testing.assert_equal(ary, expected)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for concurrent compilation of cuda.jit kernels<commit_after>
|
from numba import cuda
import numpy as np
from numba import unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim
try:
from concurrent.futures import ThreadPoolExecutor, as_completed
except ImportError:
has_thread_pool = False
else:
has_thread_pool = True
@skip_on_cudasim('disabled for cudasim')
class TestMultiGPUContext(unittest.TestCase):
@unittest.skipIf(not has_thread_pool, "no concurrent.futures")
def test_concurrent_compiling(self):
@cuda.jit
def foo(x):
x[0] += 1
def use_foo(x):
foo(x)
return x
arrays = [np.arange(10) for i in range(10)]
expected = np.arange(10)
expected[0] += 1
with ThreadPoolExecutor(max_workers=4) as e:
for ary in e.map(use_foo, arrays):
np.testing.assert_equal(ary, expected)
if __name__ == '__main__':
unittest.main()
|
Add test for concurrent compilation of cuda.jit kernelsfrom numba import cuda
import numpy as np
from numba import unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim
try:
from concurrent.futures import ThreadPoolExecutor, as_completed
except ImportError:
has_thread_pool = False
else:
has_thread_pool = True
@skip_on_cudasim('disabled for cudasim')
class TestMultiGPUContext(unittest.TestCase):
@unittest.skipIf(not has_thread_pool, "no concurrent.futures")
def test_concurrent_compiling(self):
@cuda.jit
def foo(x):
x[0] += 1
def use_foo(x):
foo(x)
return x
arrays = [np.arange(10) for i in range(10)]
expected = np.arange(10)
expected[0] += 1
with ThreadPoolExecutor(max_workers=4) as e:
for ary in e.map(use_foo, arrays):
np.testing.assert_equal(ary, expected)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for concurrent compilation of cuda.jit kernels<commit_after>from numba import cuda
import numpy as np
from numba import unittest_support as unittest
from numba.cuda.testing import skip_on_cudasim
try:
from concurrent.futures import ThreadPoolExecutor, as_completed
except ImportError:
has_thread_pool = False
else:
has_thread_pool = True
@skip_on_cudasim('disabled for cudasim')
class TestMultiGPUContext(unittest.TestCase):
@unittest.skipIf(not has_thread_pool, "no concurrent.futures")
def test_concurrent_compiling(self):
@cuda.jit
def foo(x):
x[0] += 1
def use_foo(x):
foo(x)
return x
arrays = [np.arange(10) for i in range(10)]
expected = np.arange(10)
expected[0] += 1
with ThreadPoolExecutor(max_workers=4) as e:
for ary in e.map(use_foo, arrays):
np.testing.assert_equal(ary, expected)
if __name__ == '__main__':
unittest.main()
|
|
0e675a64a47df075aa0c334e3a85a45d9581401b
|
tests/test_shot.py
|
tests/test_shot.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from db.shot import Shot
def test_find_by_event_id():
shot = Shot.find_by_event_id(20160207550053)
assert shot.team_id == 15
assert shot.player_id == 8471702
assert shot.zone == "Off"
assert shot.goalie_team_id == 6
assert shot.goalie_id == 8471695
assert shot.shot_type == "Wrist"
assert shot.distance == 31
assert not shot.scored
assert not shot.penalty_shot
|
Add test script for shot items
|
Add test script for shot items
|
Python
|
mit
|
leaffan/pynhldb
|
Add test script for shot items
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from db.shot import Shot
def test_find_by_event_id():
shot = Shot.find_by_event_id(20160207550053)
assert shot.team_id == 15
assert shot.player_id == 8471702
assert shot.zone == "Off"
assert shot.goalie_team_id == 6
assert shot.goalie_id == 8471695
assert shot.shot_type == "Wrist"
assert shot.distance == 31
assert not shot.scored
assert not shot.penalty_shot
|
<commit_before><commit_msg>Add test script for shot items<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from db.shot import Shot
def test_find_by_event_id():
shot = Shot.find_by_event_id(20160207550053)
assert shot.team_id == 15
assert shot.player_id == 8471702
assert shot.zone == "Off"
assert shot.goalie_team_id == 6
assert shot.goalie_id == 8471695
assert shot.shot_type == "Wrist"
assert shot.distance == 31
assert not shot.scored
assert not shot.penalty_shot
|
Add test script for shot items#!/usr/bin/env python
# -*- coding: utf-8 -*-
from db.shot import Shot
def test_find_by_event_id():
shot = Shot.find_by_event_id(20160207550053)
assert shot.team_id == 15
assert shot.player_id == 8471702
assert shot.zone == "Off"
assert shot.goalie_team_id == 6
assert shot.goalie_id == 8471695
assert shot.shot_type == "Wrist"
assert shot.distance == 31
assert not shot.scored
assert not shot.penalty_shot
|
<commit_before><commit_msg>Add test script for shot items<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from db.shot import Shot
def test_find_by_event_id():
shot = Shot.find_by_event_id(20160207550053)
assert shot.team_id == 15
assert shot.player_id == 8471702
assert shot.zone == "Off"
assert shot.goalie_team_id == 6
assert shot.goalie_id == 8471695
assert shot.shot_type == "Wrist"
assert shot.distance == 31
assert not shot.scored
assert not shot.penalty_shot
|
|
c3fdfe911444071c4e6c0f12252c1b3322bf99f0
|
toxiproxy/utils.py
|
toxiproxy/utils.py
|
import socket
from contextlib import closing
def test_connection(host, port):
""" Test a connection to a host/port """
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return bool(sock.connect_ex((host, port)) == 0)
|
Add a function to test socket connections
|
Add a function to test socket connections
|
Python
|
mit
|
douglas/toxiproxy-python,douglas/toxiproxy-python
|
Add a function to test socket connections
|
import socket
from contextlib import closing
def test_connection(host, port):
""" Test a connection to a host/port """
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return bool(sock.connect_ex((host, port)) == 0)
|
<commit_before><commit_msg>Add a function to test socket connections<commit_after>
|
import socket
from contextlib import closing
def test_connection(host, port):
""" Test a connection to a host/port """
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return bool(sock.connect_ex((host, port)) == 0)
|
Add a function to test socket connectionsimport socket
from contextlib import closing
def test_connection(host, port):
""" Test a connection to a host/port """
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return bool(sock.connect_ex((host, port)) == 0)
|
<commit_before><commit_msg>Add a function to test socket connections<commit_after>import socket
from contextlib import closing
def test_connection(host, port):
""" Test a connection to a host/port """
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return bool(sock.connect_ex((host, port)) == 0)
|
|
5a975c3c4f48609a57e3eb69c5bd25116097f0ee
|
htmltocsv.py
|
htmltocsv.py
|
#!/usr/bin/env python
import sys
import re
import csv
from bs4 import BeautifulSoup
def row_to_dict(row):
date_str = row.find(id = re.compile(r'transactionView\.output\.transactionDate\d+')).get_text()
date = " ".join(date_str.split()[0:3])
ref_str = row.find(id = re.compile(r'transactionView\.output\.reference\d+')).get_text()
ref = " ".join(ref_str.split())
desc_str = row.find(id = re.compile(r'transactionView\.output\.transactionDescription\d+')).get_text()
desc = " ".join(desc_str.split())
maybe_money_in = row.find(id = re.compile(r'transactionView\.output\.moneyIn\d+'))
if (maybe_money_in and re.search("\d+\.\d\d", maybe_money_in.get_text())):
delta = '+' + maybe_money_in.get_text().strip()
else:
money_out = row.find(id = re.compile(r'transactionView\.output\.moneyOut\d+'))
delta = '-' + money_out.get_text().strip()
balance_str = row.find(id = re.compile(r'transactionView\.output\.total\d+')).get_text()
balance = " ".join(balance_str.split())
return {'date':date, 'ref':ref, 'desc':desc, 'delta':delta, 'balance':balance}
html = sys.stdin.read()
soup = BeautifulSoup(html)
transactions = soup.find_all('div', 'transactionJournalRow')
dicts = [row_to_dict(r) for r in transactions]
output = [d for d in dicts if not re.search('R', d['ref'])]
with open('out.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['date', 'reference', 'description', 'amount', 'balance'])
for r in output:
writer.writerow([r['date'], r['ref'], r['desc'], r['delta'], r['balance']])
|
Add simple HTML scraper that generates CSVs
|
Add simple HTML scraper that generates CSVs
|
Python
|
mit
|
mdmoss/load-and-go
|
Add simple HTML scraper that generates CSVs
|
#!/usr/bin/env python
import sys
import re
import csv
from bs4 import BeautifulSoup
def row_to_dict(row):
date_str = row.find(id = re.compile(r'transactionView\.output\.transactionDate\d+')).get_text()
date = " ".join(date_str.split()[0:3])
ref_str = row.find(id = re.compile(r'transactionView\.output\.reference\d+')).get_text()
ref = " ".join(ref_str.split())
desc_str = row.find(id = re.compile(r'transactionView\.output\.transactionDescription\d+')).get_text()
desc = " ".join(desc_str.split())
maybe_money_in = row.find(id = re.compile(r'transactionView\.output\.moneyIn\d+'))
if (maybe_money_in and re.search("\d+\.\d\d", maybe_money_in.get_text())):
delta = '+' + maybe_money_in.get_text().strip()
else:
money_out = row.find(id = re.compile(r'transactionView\.output\.moneyOut\d+'))
delta = '-' + money_out.get_text().strip()
balance_str = row.find(id = re.compile(r'transactionView\.output\.total\d+')).get_text()
balance = " ".join(balance_str.split())
return {'date':date, 'ref':ref, 'desc':desc, 'delta':delta, 'balance':balance}
html = sys.stdin.read()
soup = BeautifulSoup(html)
transactions = soup.find_all('div', 'transactionJournalRow')
dicts = [row_to_dict(r) for r in transactions]
output = [d for d in dicts if not re.search('R', d['ref'])]
with open('out.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['date', 'reference', 'description', 'amount', 'balance'])
for r in output:
writer.writerow([r['date'], r['ref'], r['desc'], r['delta'], r['balance']])
|
<commit_before><commit_msg>Add simple HTML scraper that generates CSVs<commit_after>
|
#!/usr/bin/env python
import sys
import re
import csv
from bs4 import BeautifulSoup
def row_to_dict(row):
date_str = row.find(id = re.compile(r'transactionView\.output\.transactionDate\d+')).get_text()
date = " ".join(date_str.split()[0:3])
ref_str = row.find(id = re.compile(r'transactionView\.output\.reference\d+')).get_text()
ref = " ".join(ref_str.split())
desc_str = row.find(id = re.compile(r'transactionView\.output\.transactionDescription\d+')).get_text()
desc = " ".join(desc_str.split())
maybe_money_in = row.find(id = re.compile(r'transactionView\.output\.moneyIn\d+'))
if (maybe_money_in and re.search("\d+\.\d\d", maybe_money_in.get_text())):
delta = '+' + maybe_money_in.get_text().strip()
else:
money_out = row.find(id = re.compile(r'transactionView\.output\.moneyOut\d+'))
delta = '-' + money_out.get_text().strip()
balance_str = row.find(id = re.compile(r'transactionView\.output\.total\d+')).get_text()
balance = " ".join(balance_str.split())
return {'date':date, 'ref':ref, 'desc':desc, 'delta':delta, 'balance':balance}
html = sys.stdin.read()
soup = BeautifulSoup(html)
transactions = soup.find_all('div', 'transactionJournalRow')
dicts = [row_to_dict(r) for r in transactions]
output = [d for d in dicts if not re.search('R', d['ref'])]
with open('out.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['date', 'reference', 'description', 'amount', 'balance'])
for r in output:
writer.writerow([r['date'], r['ref'], r['desc'], r['delta'], r['balance']])
|
Add simple HTML scraper that generates CSVs#!/usr/bin/env python
import sys
import re
import csv
from bs4 import BeautifulSoup
def row_to_dict(row):
date_str = row.find(id = re.compile(r'transactionView\.output\.transactionDate\d+')).get_text()
date = " ".join(date_str.split()[0:3])
ref_str = row.find(id = re.compile(r'transactionView\.output\.reference\d+')).get_text()
ref = " ".join(ref_str.split())
desc_str = row.find(id = re.compile(r'transactionView\.output\.transactionDescription\d+')).get_text()
desc = " ".join(desc_str.split())
maybe_money_in = row.find(id = re.compile(r'transactionView\.output\.moneyIn\d+'))
if (maybe_money_in and re.search("\d+\.\d\d", maybe_money_in.get_text())):
delta = '+' + maybe_money_in.get_text().strip()
else:
money_out = row.find(id = re.compile(r'transactionView\.output\.moneyOut\d+'))
delta = '-' + money_out.get_text().strip()
balance_str = row.find(id = re.compile(r'transactionView\.output\.total\d+')).get_text()
balance = " ".join(balance_str.split())
return {'date':date, 'ref':ref, 'desc':desc, 'delta':delta, 'balance':balance}
html = sys.stdin.read()
soup = BeautifulSoup(html)
transactions = soup.find_all('div', 'transactionJournalRow')
dicts = [row_to_dict(r) for r in transactions]
output = [d for d in dicts if not re.search('R', d['ref'])]
with open('out.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['date', 'reference', 'description', 'amount', 'balance'])
for r in output:
writer.writerow([r['date'], r['ref'], r['desc'], r['delta'], r['balance']])
|
<commit_before><commit_msg>Add simple HTML scraper that generates CSVs<commit_after>#!/usr/bin/env python
import sys
import re
import csv
from bs4 import BeautifulSoup
def row_to_dict(row):
date_str = row.find(id = re.compile(r'transactionView\.output\.transactionDate\d+')).get_text()
date = " ".join(date_str.split()[0:3])
ref_str = row.find(id = re.compile(r'transactionView\.output\.reference\d+')).get_text()
ref = " ".join(ref_str.split())
desc_str = row.find(id = re.compile(r'transactionView\.output\.transactionDescription\d+')).get_text()
desc = " ".join(desc_str.split())
maybe_money_in = row.find(id = re.compile(r'transactionView\.output\.moneyIn\d+'))
if (maybe_money_in and re.search("\d+\.\d\d", maybe_money_in.get_text())):
delta = '+' + maybe_money_in.get_text().strip()
else:
money_out = row.find(id = re.compile(r'transactionView\.output\.moneyOut\d+'))
delta = '-' + money_out.get_text().strip()
balance_str = row.find(id = re.compile(r'transactionView\.output\.total\d+')).get_text()
balance = " ".join(balance_str.split())
return {'date':date, 'ref':ref, 'desc':desc, 'delta':delta, 'balance':balance}
html = sys.stdin.read()
soup = BeautifulSoup(html)
transactions = soup.find_all('div', 'transactionJournalRow')
dicts = [row_to_dict(r) for r in transactions]
output = [d for d in dicts if not re.search('R', d['ref'])]
with open('out.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['date', 'reference', 'description', 'amount', 'balance'])
for r in output:
writer.writerow([r['date'], r['ref'], r['desc'], r['delta'], r['balance']])
|
|
5c7400a2e70e5d9ee7f8a73e43abbf0f7b992152
|
ibei/main.py
|
ibei/main.py
|
# -*- coding: utf-8 -*-
import numpy as np
from astropy import constants
from astropy import units
from sympy.mpmath import polylog
def uibei(order, energy_lo, temp, chem_potential):
"""
Upper incomplete Bose-Einstein integral.
"""
kT = temp * constants.k_B
reduced_energy_lo = energy_lo / kT
reduced_chem_potential = chem_potential / kT
prefactor = (2 * np.pi * np.math.factorial(order) * kT**(order + 1)) / \
(constants.h**3 * constants.c**2)
summand = 0
for indx in range(1, order + 2):
expt = (reduced_chem_potential - reduced_energy_lo).decompose()
term = reduced_energy_lo**(order - indx + 1) * polylog(indx, np.exp(expt)) / np.math.factorial(order - indx + 1)
summand += term
return summand
|
Add first draft of upper incomplete Bose-Einstein integral
|
Add first draft of upper incomplete Bose-Einstein integral
|
Python
|
mit
|
jrsmith3/tec,jrsmith3/tec,jrsmith3/ibei
|
Add first draft of upper incomplete Bose-Einstein integral
|
# -*- coding: utf-8 -*-
import numpy as np
from astropy import constants
from astropy import units
from sympy.mpmath import polylog
def uibei(order, energy_lo, temp, chem_potential):
"""
Upper incomplete Bose-Einstein integral.
"""
kT = temp * constants.k_B
reduced_energy_lo = energy_lo / kT
reduced_chem_potential = chem_potential / kT
prefactor = (2 * np.pi * np.math.factorial(order) * kT**(order + 1)) / \
(constants.h**3 * constants.c**2)
summand = 0
for indx in range(1, order + 2):
expt = (reduced_chem_potential - reduced_energy_lo).decompose()
term = reduced_energy_lo**(order - indx + 1) * polylog(indx, np.exp(expt)) / np.math.factorial(order - indx + 1)
summand += term
return summand
|
<commit_before><commit_msg>Add first draft of upper incomplete Bose-Einstein integral<commit_after>
|
# -*- coding: utf-8 -*-
import numpy as np
from astropy import constants
from astropy import units
from sympy.mpmath import polylog
def uibei(order, energy_lo, temp, chem_potential):
"""
Upper incomplete Bose-Einstein integral.
"""
kT = temp * constants.k_B
reduced_energy_lo = energy_lo / kT
reduced_chem_potential = chem_potential / kT
prefactor = (2 * np.pi * np.math.factorial(order) * kT**(order + 1)) / \
(constants.h**3 * constants.c**2)
summand = 0
for indx in range(1, order + 2):
expt = (reduced_chem_potential - reduced_energy_lo).decompose()
term = reduced_energy_lo**(order - indx + 1) * polylog(indx, np.exp(expt)) / np.math.factorial(order - indx + 1)
summand += term
return summand
|
Add first draft of upper incomplete Bose-Einstein integral# -*- coding: utf-8 -*-
import numpy as np
from astropy import constants
from astropy import units
from sympy.mpmath import polylog
def uibei(order, energy_lo, temp, chem_potential):
"""
Upper incomplete Bose-Einstein integral.
"""
kT = temp * constants.k_B
reduced_energy_lo = energy_lo / kT
reduced_chem_potential = chem_potential / kT
prefactor = (2 * np.pi * np.math.factorial(order) * kT**(order + 1)) / \
(constants.h**3 * constants.c**2)
summand = 0
for indx in range(1, order + 2):
expt = (reduced_chem_potential - reduced_energy_lo).decompose()
term = reduced_energy_lo**(order - indx + 1) * polylog(indx, np.exp(expt)) / np.math.factorial(order - indx + 1)
summand += term
return summand
|
<commit_before><commit_msg>Add first draft of upper incomplete Bose-Einstein integral<commit_after># -*- coding: utf-8 -*-
import numpy as np
from astropy import constants
from astropy import units
from sympy.mpmath import polylog
def uibei(order, energy_lo, temp, chem_potential):
"""
Upper incomplete Bose-Einstein integral.
"""
kT = temp * constants.k_B
reduced_energy_lo = energy_lo / kT
reduced_chem_potential = chem_potential / kT
prefactor = (2 * np.pi * np.math.factorial(order) * kT**(order + 1)) / \
(constants.h**3 * constants.c**2)
summand = 0
for indx in range(1, order + 2):
expt = (reduced_chem_potential - reduced_energy_lo).decompose()
term = reduced_energy_lo**(order - indx + 1) * polylog(indx, np.exp(expt)) / np.math.factorial(order - indx + 1)
summand += term
return summand
|
|
39310b46e9f2c963572001d8a4d0e110540584bf
|
app/soc/modules/gsoc/models/slot_transfer.py
|
app/soc/modules/gsoc/models/slot_transfer.py
|
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the GSoC Slot Transfer model.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from google.appengine.ext import db
from django.utils.translation import ugettext
import soc.models.base
class GSoCSlotTransfer(soc.models.base.ModelWithFieldAttributes):
"""Model that stores the organization has decided to give up.
"""
#: The number of slots the organization has decided to give up
nr_slots = db.IntegerProperty(
required=True, verbose_name=ugettext('Slots to transfer'))
nr_slots.help_text = ugettext('Number of slots you would like to transfer '
'to the pool.')
#: The remarks text explaining why the slots were given
remarks = db.StringProperty(
required=True, verbose_name=ugettext('Remarks'))
remarks.help_text = ugettext(
'A brief explanation mentioning the reason for transferring the '
'slots back to the pool.')
#: The status of slot transfer
#: pending: requested by the org, but the program admin has not taken action
#: accepted: program admin accepted the slot transfer
#: rejected: program admin rejected the request to transfer the slots
status = db.StringProperty(required=True, default='pending',
choices=['pending', 'accepted', 'rejected'])
#: date when the proposal was created
created_on = db.DateTimeProperty(required=True, auto_now_add=True)
#: date when the proposal was last modified, should be set manually on edit
last_modified_on = db.DateTimeProperty(required=True, auto_now=True)
|
Define the slot transfer data model.
|
Define the slot transfer data model.
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
Define the slot transfer data model.
|
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the GSoC Slot Transfer model.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from google.appengine.ext import db
from django.utils.translation import ugettext
import soc.models.base
class GSoCSlotTransfer(soc.models.base.ModelWithFieldAttributes):
"""Model that stores the organization has decided to give up.
"""
#: The number of slots the organization has decided to give up
nr_slots = db.IntegerProperty(
required=True, verbose_name=ugettext('Slots to transfer'))
nr_slots.help_text = ugettext('Number of slots you would like to transfer '
'to the pool.')
#: The remarks text explaining why the slots were given
remarks = db.StringProperty(
required=True, verbose_name=ugettext('Remarks'))
remarks.help_text = ugettext(
'A brief explanation mentioning the reason for transferring the '
'slots back to the pool.')
#: The status of slot transfer
#: pending: requested by the org, but the program admin has not taken action
#: accepted: program admin accepted the slot transfer
#: rejected: program admin rejected the request to transfer the slots
status = db.StringProperty(required=True, default='pending',
choices=['pending', 'accepted', 'rejected'])
#: date when the proposal was created
created_on = db.DateTimeProperty(required=True, auto_now_add=True)
#: date when the proposal was last modified, should be set manually on edit
last_modified_on = db.DateTimeProperty(required=True, auto_now=True)
|
<commit_before><commit_msg>Define the slot transfer data model.<commit_after>
|
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the GSoC Slot Transfer model.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from google.appengine.ext import db
from django.utils.translation import ugettext
import soc.models.base
class GSoCSlotTransfer(soc.models.base.ModelWithFieldAttributes):
"""Model that stores the organization has decided to give up.
"""
#: The number of slots the organization has decided to give up
nr_slots = db.IntegerProperty(
required=True, verbose_name=ugettext('Slots to transfer'))
nr_slots.help_text = ugettext('Number of slots you would like to transfer '
'to the pool.')
#: The remarks text explaining why the slots were given
remarks = db.StringProperty(
required=True, verbose_name=ugettext('Remarks'))
remarks.help_text = ugettext(
'A brief explanation mentioning the reason for transferring the '
'slots back to the pool.')
#: The status of slot transfer
#: pending: requested by the org, but the program admin has not taken action
#: accepted: program admin accepted the slot transfer
#: rejected: program admin rejected the request to transfer the slots
status = db.StringProperty(required=True, default='pending',
choices=['pending', 'accepted', 'rejected'])
#: date when the proposal was created
created_on = db.DateTimeProperty(required=True, auto_now_add=True)
#: date when the proposal was last modified, should be set manually on edit
last_modified_on = db.DateTimeProperty(required=True, auto_now=True)
|
Define the slot transfer data model.#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the GSoC Slot Transfer model.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from google.appengine.ext import db
from django.utils.translation import ugettext
import soc.models.base
class GSoCSlotTransfer(soc.models.base.ModelWithFieldAttributes):
"""Model that stores the organization has decided to give up.
"""
#: The number of slots the organization has decided to give up
nr_slots = db.IntegerProperty(
required=True, verbose_name=ugettext('Slots to transfer'))
nr_slots.help_text = ugettext('Number of slots you would like to transfer '
'to the pool.')
#: The remarks text explaining why the slots were given
remarks = db.StringProperty(
required=True, verbose_name=ugettext('Remarks'))
remarks.help_text = ugettext(
'A brief explanation mentioning the reason for transferring the '
'slots back to the pool.')
#: The status of slot transfer
#: pending: requested by the org, but the program admin has not taken action
#: accepted: program admin accepted the slot transfer
#: rejected: program admin rejected the request to transfer the slots
status = db.StringProperty(required=True, default='pending',
choices=['pending', 'accepted', 'rejected'])
#: date when the proposal was created
created_on = db.DateTimeProperty(required=True, auto_now_add=True)
#: date when the proposal was last modified, should be set manually on edit
last_modified_on = db.DateTimeProperty(required=True, auto_now=True)
|
<commit_before><commit_msg>Define the slot transfer data model.<commit_after>#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the GSoC Slot Transfer model.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from google.appengine.ext import db
from django.utils.translation import ugettext
import soc.models.base
class GSoCSlotTransfer(soc.models.base.ModelWithFieldAttributes):
"""Model that stores the organization has decided to give up.
"""
#: The number of slots the organization has decided to give up
nr_slots = db.IntegerProperty(
required=True, verbose_name=ugettext('Slots to transfer'))
nr_slots.help_text = ugettext('Number of slots you would like to transfer '
'to the pool.')
#: The remarks text explaining why the slots were given
remarks = db.StringProperty(
required=True, verbose_name=ugettext('Remarks'))
remarks.help_text = ugettext(
'A brief explanation mentioning the reason for transferring the '
'slots back to the pool.')
#: The status of slot transfer
#: pending: requested by the org, but the program admin has not taken action
#: accepted: program admin accepted the slot transfer
#: rejected: program admin rejected the request to transfer the slots
status = db.StringProperty(required=True, default='pending',
choices=['pending', 'accepted', 'rejected'])
#: date when the proposal was created
created_on = db.DateTimeProperty(required=True, auto_now_add=True)
#: date when the proposal was last modified, should be set manually on edit
last_modified_on = db.DateTimeProperty(required=True, auto_now=True)
|
|
25fff03be8cfef6534b4876e81f7c9fd036d2248
|
tools/generator/raw-data-extractor/extract-nrf.py
|
tools/generator/raw-data-extractor/extract-nrf.py
|
from pathlib import Path
import urllib.request
import zipfile
import shutil
import io
import os
packurl = "https://www.nordicsemi.com/-/media/Software-and-other-downloads/Desktop-software/nRF-MDK/sw/8-33-0/nRF_MDK_8_33_0_GCC_BSDLicense.zip"
shutil.rmtree("../raw-device-data/nrf-devices", ignore_errors=True)
Path("../raw-device-data/nrf-devices/nrf").mkdir(exist_ok=True, parents=True)
if __name__ == "__main__":
dest = "../raw-device-data/nrf-devices/nrf"
print("Downloading...")
with urllib.request.urlopen(packurl) as content:
z = zipfile.ZipFile(io.BytesIO(content.read()))
print("Extracting...")
# remove subfolders, some packs have several chips per pack
for zi in z.infolist():
if zi.filename.endswith(".svd"):
zi.filename = os.path.basename(zi.filename)
print(zi.filename)
z.extract(zi, dest)
# dirty hack because af inconsistent part names in .svd files
os.rename(dest + '/nrf51.svd', dest + '/nrf51822.svd')
os.rename(dest + '/nrf52.svd', dest + '/nrf52832.svd')
|
Add NRF device data extractor
|
[dfg] Add NRF device data extractor
|
Python
|
mpl-2.0
|
modm-io/modm-devices
|
[dfg] Add NRF device data extractor
|
from pathlib import Path
import urllib.request
import zipfile
import shutil
import io
import os
packurl = "https://www.nordicsemi.com/-/media/Software-and-other-downloads/Desktop-software/nRF-MDK/sw/8-33-0/nRF_MDK_8_33_0_GCC_BSDLicense.zip"
shutil.rmtree("../raw-device-data/nrf-devices", ignore_errors=True)
Path("../raw-device-data/nrf-devices/nrf").mkdir(exist_ok=True, parents=True)
if __name__ == "__main__":
dest = "../raw-device-data/nrf-devices/nrf"
print("Downloading...")
with urllib.request.urlopen(packurl) as content:
z = zipfile.ZipFile(io.BytesIO(content.read()))
print("Extracting...")
# remove subfolders, some packs have several chips per pack
for zi in z.infolist():
if zi.filename.endswith(".svd"):
zi.filename = os.path.basename(zi.filename)
print(zi.filename)
z.extract(zi, dest)
# dirty hack because af inconsistent part names in .svd files
os.rename(dest + '/nrf51.svd', dest + '/nrf51822.svd')
os.rename(dest + '/nrf52.svd', dest + '/nrf52832.svd')
|
<commit_before><commit_msg>[dfg] Add NRF device data extractor<commit_after>
|
from pathlib import Path
import urllib.request
import zipfile
import shutil
import io
import os
packurl = "https://www.nordicsemi.com/-/media/Software-and-other-downloads/Desktop-software/nRF-MDK/sw/8-33-0/nRF_MDK_8_33_0_GCC_BSDLicense.zip"
shutil.rmtree("../raw-device-data/nrf-devices", ignore_errors=True)
Path("../raw-device-data/nrf-devices/nrf").mkdir(exist_ok=True, parents=True)
if __name__ == "__main__":
dest = "../raw-device-data/nrf-devices/nrf"
print("Downloading...")
with urllib.request.urlopen(packurl) as content:
z = zipfile.ZipFile(io.BytesIO(content.read()))
print("Extracting...")
# remove subfolders, some packs have several chips per pack
for zi in z.infolist():
if zi.filename.endswith(".svd"):
zi.filename = os.path.basename(zi.filename)
print(zi.filename)
z.extract(zi, dest)
# dirty hack because af inconsistent part names in .svd files
os.rename(dest + '/nrf51.svd', dest + '/nrf51822.svd')
os.rename(dest + '/nrf52.svd', dest + '/nrf52832.svd')
|
[dfg] Add NRF device data extractorfrom pathlib import Path
import urllib.request
import zipfile
import shutil
import io
import os
packurl = "https://www.nordicsemi.com/-/media/Software-and-other-downloads/Desktop-software/nRF-MDK/sw/8-33-0/nRF_MDK_8_33_0_GCC_BSDLicense.zip"
shutil.rmtree("../raw-device-data/nrf-devices", ignore_errors=True)
Path("../raw-device-data/nrf-devices/nrf").mkdir(exist_ok=True, parents=True)
if __name__ == "__main__":
dest = "../raw-device-data/nrf-devices/nrf"
print("Downloading...")
with urllib.request.urlopen(packurl) as content:
z = zipfile.ZipFile(io.BytesIO(content.read()))
print("Extracting...")
# remove subfolders, some packs have several chips per pack
for zi in z.infolist():
if zi.filename.endswith(".svd"):
zi.filename = os.path.basename(zi.filename)
print(zi.filename)
z.extract(zi, dest)
# dirty hack because af inconsistent part names in .svd files
os.rename(dest + '/nrf51.svd', dest + '/nrf51822.svd')
os.rename(dest + '/nrf52.svd', dest + '/nrf52832.svd')
|
<commit_before><commit_msg>[dfg] Add NRF device data extractor<commit_after>from pathlib import Path
import urllib.request
import zipfile
import shutil
import io
import os
packurl = "https://www.nordicsemi.com/-/media/Software-and-other-downloads/Desktop-software/nRF-MDK/sw/8-33-0/nRF_MDK_8_33_0_GCC_BSDLicense.zip"
shutil.rmtree("../raw-device-data/nrf-devices", ignore_errors=True)
Path("../raw-device-data/nrf-devices/nrf").mkdir(exist_ok=True, parents=True)
if __name__ == "__main__":
dest = "../raw-device-data/nrf-devices/nrf"
print("Downloading...")
with urllib.request.urlopen(packurl) as content:
z = zipfile.ZipFile(io.BytesIO(content.read()))
print("Extracting...")
# remove subfolders, some packs have several chips per pack
for zi in z.infolist():
if zi.filename.endswith(".svd"):
zi.filename = os.path.basename(zi.filename)
print(zi.filename)
z.extract(zi, dest)
# dirty hack because af inconsistent part names in .svd files
os.rename(dest + '/nrf51.svd', dest + '/nrf51822.svd')
os.rename(dest + '/nrf52.svd', dest + '/nrf52832.svd')
|
|
71ad5c15602d6aeeea0b1ab72b244b47d634618d
|
DjangoApplication/complaint_system/forms.py
|
DjangoApplication/complaint_system/forms.py
|
from django import forms
from django.forms.widgets import CheckboxSelectMultiple
from complaint_system.models import Complaint
class AddComplaint(forms.ModelForm):
address = forms.CharField(max_length=250, required = True)
city = forms.CharField(max_length=250, required = True)
province = forms.CharField(max_length=250, required = True)
categories = forms.MultipleChoiceField(choices=Complaint.CATEGORIES, widget=forms.CheckboxSelectMultiple(), required = True)
class Meta:
model = Complaint
#class ContactInfo(forms.Form):
name = forms.CharField(max_length=250)
number = forms.CharField(max_length=250)
email = forms.CharField(max_length=250)
#def clean(self):
#num = filter(lambda e:e,[Complaint.number, Complaint.email])
#if not bool(number) or bool(email):
#raise ValidationError("at least one piece of contact information must be set")
|
Add more fields to Form and Database and have complaint_sytem page reflect that. Change 'categories' from dropdown to checkboxes.
|
Add more fields to Form and Database and have complaint_sytem page reflect that. Change 'categories' from dropdown to checkboxes.
|
Python
|
mit
|
CSC301H-Fall2013/healthyhome,CSC301H-Fall2013/healthyhome
|
Add more fields to Form and Database and have complaint_sytem page reflect that. Change 'categories' from dropdown to checkboxes.
|
from django import forms
from django.forms.widgets import CheckboxSelectMultiple
from complaint_system.models import Complaint
class AddComplaint(forms.ModelForm):
address = forms.CharField(max_length=250, required = True)
city = forms.CharField(max_length=250, required = True)
province = forms.CharField(max_length=250, required = True)
categories = forms.MultipleChoiceField(choices=Complaint.CATEGORIES, widget=forms.CheckboxSelectMultiple(), required = True)
class Meta:
model = Complaint
#class ContactInfo(forms.Form):
name = forms.CharField(max_length=250)
number = forms.CharField(max_length=250)
email = forms.CharField(max_length=250)
#def clean(self):
#num = filter(lambda e:e,[Complaint.number, Complaint.email])
#if not bool(number) or bool(email):
#raise ValidationError("at least one piece of contact information must be set")
|
<commit_before><commit_msg>Add more fields to Form and Database and have complaint_sytem page reflect that. Change 'categories' from dropdown to checkboxes.<commit_after>
|
from django import forms
from django.forms.widgets import CheckboxSelectMultiple
from complaint_system.models import Complaint
class AddComplaint(forms.ModelForm):
address = forms.CharField(max_length=250, required = True)
city = forms.CharField(max_length=250, required = True)
province = forms.CharField(max_length=250, required = True)
categories = forms.MultipleChoiceField(choices=Complaint.CATEGORIES, widget=forms.CheckboxSelectMultiple(), required = True)
class Meta:
model = Complaint
#class ContactInfo(forms.Form):
name = forms.CharField(max_length=250)
number = forms.CharField(max_length=250)
email = forms.CharField(max_length=250)
#def clean(self):
#num = filter(lambda e:e,[Complaint.number, Complaint.email])
#if not bool(number) or bool(email):
#raise ValidationError("at least one piece of contact information must be set")
|
Add more fields to Form and Database and have complaint_sytem page reflect that. Change 'categories' from dropdown to checkboxes.from django import forms
from django.forms.widgets import CheckboxSelectMultiple
from complaint_system.models import Complaint
class AddComplaint(forms.ModelForm):
address = forms.CharField(max_length=250, required = True)
city = forms.CharField(max_length=250, required = True)
province = forms.CharField(max_length=250, required = True)
categories = forms.MultipleChoiceField(choices=Complaint.CATEGORIES, widget=forms.CheckboxSelectMultiple(), required = True)
class Meta:
model = Complaint
#class ContactInfo(forms.Form):
name = forms.CharField(max_length=250)
number = forms.CharField(max_length=250)
email = forms.CharField(max_length=250)
#def clean(self):
#num = filter(lambda e:e,[Complaint.number, Complaint.email])
#if not bool(number) or bool(email):
#raise ValidationError("at least one piece of contact information must be set")
|
<commit_before><commit_msg>Add more fields to Form and Database and have complaint_sytem page reflect that. Change 'categories' from dropdown to checkboxes.<commit_after>from django import forms
from django.forms.widgets import CheckboxSelectMultiple
from complaint_system.models import Complaint
class AddComplaint(forms.ModelForm):
address = forms.CharField(max_length=250, required = True)
city = forms.CharField(max_length=250, required = True)
province = forms.CharField(max_length=250, required = True)
categories = forms.MultipleChoiceField(choices=Complaint.CATEGORIES, widget=forms.CheckboxSelectMultiple(), required = True)
class Meta:
model = Complaint
#class ContactInfo(forms.Form):
name = forms.CharField(max_length=250)
number = forms.CharField(max_length=250)
email = forms.CharField(max_length=250)
#def clean(self):
#num = filter(lambda e:e,[Complaint.number, Complaint.email])
#if not bool(number) or bool(email):
#raise ValidationError("at least one piece of contact information must be set")
|
|
e80941a4bb0a3eea4bbbde883128d586d3a13946
|
tests/cli/test_quick.py
|
tests/cli/test_quick.py
|
import os
import subprocess as sp
import base64
def test_crash():
CLI_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../cli.py")
# Test that commands don't crash crashes
assert 0 == sp.call([CLI_FILE, "help"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "list"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "contents", "vanilla"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "contents", "vanilla+"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
data = base64.b64encode("test\n# test\nrso-mod".encode()).decode()
assert 0 == sp.call([CLI_FILE, "decompress", data], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
# TODO: test match somehow (are there popular public servers?)
# TODO: test install when we have credentials
assert 0 == sp.call([CLI_FILE, "enabled"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "search", "farl"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "cache", "list"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "cache", "clear"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
# test that invalid commands crash
assert 0 != sp.call([CLI_FILE, "<<INVALID_COMMAND>>"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
|
Add quick test for finding crashes and errors
|
Add quick test for finding crashes and errors
|
Python
|
mit
|
haihala/modman
|
Add quick test for finding crashes and errors
|
import os
import subprocess as sp
import base64
def test_crash():
CLI_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../cli.py")
# Test that commands don't crash crashes
assert 0 == sp.call([CLI_FILE, "help"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "list"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "contents", "vanilla"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "contents", "vanilla+"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
data = base64.b64encode("test\n# test\nrso-mod".encode()).decode()
assert 0 == sp.call([CLI_FILE, "decompress", data], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
# TODO: test match somehow (are there popular public servers?)
# TODO: test install when we have credentials
assert 0 == sp.call([CLI_FILE, "enabled"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "search", "farl"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "cache", "list"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "cache", "clear"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
# test that invalid commands crash
assert 0 != sp.call([CLI_FILE, "<<INVALID_COMMAND>>"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
|
<commit_before><commit_msg>Add quick test for finding crashes and errors<commit_after>
|
import os
import subprocess as sp
import base64
def test_crash():
CLI_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../cli.py")
# Test that commands don't crash crashes
assert 0 == sp.call([CLI_FILE, "help"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "list"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "contents", "vanilla"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "contents", "vanilla+"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
data = base64.b64encode("test\n# test\nrso-mod".encode()).decode()
assert 0 == sp.call([CLI_FILE, "decompress", data], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
# TODO: test match somehow (are there popular public servers?)
# TODO: test install when we have credentials
assert 0 == sp.call([CLI_FILE, "enabled"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "search", "farl"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "cache", "list"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "cache", "clear"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
# test that invalid commands crash
assert 0 != sp.call([CLI_FILE, "<<INVALID_COMMAND>>"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
|
Add quick test for finding crashes and errorsimport os
import subprocess as sp
import base64
def test_crash():
CLI_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../cli.py")
# Test that commands don't crash crashes
assert 0 == sp.call([CLI_FILE, "help"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "list"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "contents", "vanilla"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "contents", "vanilla+"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
data = base64.b64encode("test\n# test\nrso-mod".encode()).decode()
assert 0 == sp.call([CLI_FILE, "decompress", data], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
# TODO: test match somehow (are there popular public servers?)
# TODO: test install when we have credentials
assert 0 == sp.call([CLI_FILE, "enabled"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "search", "farl"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "cache", "list"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "cache", "clear"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
# test that invalid commands crash
assert 0 != sp.call([CLI_FILE, "<<INVALID_COMMAND>>"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
|
<commit_before><commit_msg>Add quick test for finding crashes and errors<commit_after>import os
import subprocess as sp
import base64
def test_crash():
CLI_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../cli.py")
# Test that commands don't crash crashes
assert 0 == sp.call([CLI_FILE, "help"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "list"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "contents", "vanilla"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "contents", "vanilla+"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
data = base64.b64encode("test\n# test\nrso-mod".encode()).decode()
assert 0 == sp.call([CLI_FILE, "decompress", data], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
# TODO: test match somehow (are there popular public servers?)
# TODO: test install when we have credentials
assert 0 == sp.call([CLI_FILE, "enabled"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "search", "farl"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "cache", "list"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
assert 0 == sp.call([CLI_FILE, "cache", "clear"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
# test that invalid commands crash
assert 0 != sp.call([CLI_FILE, "<<INVALID_COMMAND>>"], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
|
|
9c52955c7e18987be6131afeb83093060afb4f98
|
q_learning/main.py
|
q_learning/main.py
|
# coding: utf-8
import random
import gym
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns # noqa
env = gym.make('FrozenLake-v0')
class Agent(object):
def __init__(self, action_space, eps=0.01, alpha=0.1, gamma=0.9):
self.action_space = action_space
self.eps = eps
self.alpha = alpha
self.gamma = gamma
self.q_table = {}
def q_function(self, obs, act):
key = (obs, act)
if key not in self.q_table:
self.q_table[key] = 100
return self.q_table[key]
def get_action_list(self):
return range(self.action_space.n)
def _get_best_action(self, obs):
best = (-1e9, None)
for act in self.get_action_list():
score = self.q_function(obs, act)
if best[0] < score:
best = (score, act)
return best[1]
def action(self, obs):
if random.random() < self.eps:
return self.action_space.sample()
return self._get_best_action(obs)
def update(self, obs, act, reward, next_obs):
next_act = self._get_best_action(next_obs)
q = 0
q += (1.0 - self.alpha) * self.q_function(obs, act)
q += self.alpha * (
reward + self.gamma * self.q_function(next_obs, next_act))
self.q_table[(obs, act)] = q
def run_episode(agent):
obs = env.reset()
cnt = 0
while True:
cnt += 1
action = agent.action(obs)
next_obs, reward, done, info = env.step(action)
if done:
break
agent.update(obs, action, reward, next_obs)
obs = next_obs
return cnt
trial = 50
n_episode = 2000
result = []
for i in range(trial):
print('trial %i start.' % (i))
agent = Agent(env.action_space)
for j in range(n_episode):
cnt = run_episode(agent)
result.append([j, cnt])
df = pd.DataFrame(result, columns=['episode', 'n_move'])
df.plot.scatter(x='episode', y='n_move')
plt.savefig('result.png')
|
Add an agent of q-learning
|
Add an agent of q-learning
|
Python
|
apache-2.0
|
nel215/reinforcement-learning
|
Add an agent of q-learning
|
# coding: utf-8
import random
import gym
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns # noqa
env = gym.make('FrozenLake-v0')
class Agent(object):
def __init__(self, action_space, eps=0.01, alpha=0.1, gamma=0.9):
self.action_space = action_space
self.eps = eps
self.alpha = alpha
self.gamma = gamma
self.q_table = {}
def q_function(self, obs, act):
key = (obs, act)
if key not in self.q_table:
self.q_table[key] = 100
return self.q_table[key]
def get_action_list(self):
return range(self.action_space.n)
def _get_best_action(self, obs):
best = (-1e9, None)
for act in self.get_action_list():
score = self.q_function(obs, act)
if best[0] < score:
best = (score, act)
return best[1]
def action(self, obs):
if random.random() < self.eps:
return self.action_space.sample()
return self._get_best_action(obs)
def update(self, obs, act, reward, next_obs):
next_act = self._get_best_action(next_obs)
q = 0
q += (1.0 - self.alpha) * self.q_function(obs, act)
q += self.alpha * (
reward + self.gamma * self.q_function(next_obs, next_act))
self.q_table[(obs, act)] = q
def run_episode(agent):
obs = env.reset()
cnt = 0
while True:
cnt += 1
action = agent.action(obs)
next_obs, reward, done, info = env.step(action)
if done:
break
agent.update(obs, action, reward, next_obs)
obs = next_obs
return cnt
trial = 50
n_episode = 2000
result = []
for i in range(trial):
print('trial %i start.' % (i))
agent = Agent(env.action_space)
for j in range(n_episode):
cnt = run_episode(agent)
result.append([j, cnt])
df = pd.DataFrame(result, columns=['episode', 'n_move'])
df.plot.scatter(x='episode', y='n_move')
plt.savefig('result.png')
|
<commit_before><commit_msg>Add an agent of q-learning<commit_after>
|
# coding: utf-8
import random
import gym
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns # noqa
env = gym.make('FrozenLake-v0')
class Agent(object):
def __init__(self, action_space, eps=0.01, alpha=0.1, gamma=0.9):
self.action_space = action_space
self.eps = eps
self.alpha = alpha
self.gamma = gamma
self.q_table = {}
def q_function(self, obs, act):
key = (obs, act)
if key not in self.q_table:
self.q_table[key] = 100
return self.q_table[key]
def get_action_list(self):
return range(self.action_space.n)
def _get_best_action(self, obs):
best = (-1e9, None)
for act in self.get_action_list():
score = self.q_function(obs, act)
if best[0] < score:
best = (score, act)
return best[1]
def action(self, obs):
if random.random() < self.eps:
return self.action_space.sample()
return self._get_best_action(obs)
def update(self, obs, act, reward, next_obs):
next_act = self._get_best_action(next_obs)
q = 0
q += (1.0 - self.alpha) * self.q_function(obs, act)
q += self.alpha * (
reward + self.gamma * self.q_function(next_obs, next_act))
self.q_table[(obs, act)] = q
def run_episode(agent):
obs = env.reset()
cnt = 0
while True:
cnt += 1
action = agent.action(obs)
next_obs, reward, done, info = env.step(action)
if done:
break
agent.update(obs, action, reward, next_obs)
obs = next_obs
return cnt
trial = 50
n_episode = 2000
result = []
for i in range(trial):
print('trial %i start.' % (i))
agent = Agent(env.action_space)
for j in range(n_episode):
cnt = run_episode(agent)
result.append([j, cnt])
df = pd.DataFrame(result, columns=['episode', 'n_move'])
df.plot.scatter(x='episode', y='n_move')
plt.savefig('result.png')
|
Add an agent of q-learning# coding: utf-8
import random
import gym
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns # noqa
env = gym.make('FrozenLake-v0')
class Agent(object):
def __init__(self, action_space, eps=0.01, alpha=0.1, gamma=0.9):
self.action_space = action_space
self.eps = eps
self.alpha = alpha
self.gamma = gamma
self.q_table = {}
def q_function(self, obs, act):
key = (obs, act)
if key not in self.q_table:
self.q_table[key] = 100
return self.q_table[key]
def get_action_list(self):
return range(self.action_space.n)
def _get_best_action(self, obs):
best = (-1e9, None)
for act in self.get_action_list():
score = self.q_function(obs, act)
if best[0] < score:
best = (score, act)
return best[1]
def action(self, obs):
if random.random() < self.eps:
return self.action_space.sample()
return self._get_best_action(obs)
def update(self, obs, act, reward, next_obs):
next_act = self._get_best_action(next_obs)
q = 0
q += (1.0 - self.alpha) * self.q_function(obs, act)
q += self.alpha * (
reward + self.gamma * self.q_function(next_obs, next_act))
self.q_table[(obs, act)] = q
def run_episode(agent):
obs = env.reset()
cnt = 0
while True:
cnt += 1
action = agent.action(obs)
next_obs, reward, done, info = env.step(action)
if done:
break
agent.update(obs, action, reward, next_obs)
obs = next_obs
return cnt
trial = 50
n_episode = 2000
result = []
for i in range(trial):
print('trial %i start.' % (i))
agent = Agent(env.action_space)
for j in range(n_episode):
cnt = run_episode(agent)
result.append([j, cnt])
df = pd.DataFrame(result, columns=['episode', 'n_move'])
df.plot.scatter(x='episode', y='n_move')
plt.savefig('result.png')
|
<commit_before><commit_msg>Add an agent of q-learning<commit_after># coding: utf-8
import random
import gym
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns # noqa
env = gym.make('FrozenLake-v0')
class Agent(object):
def __init__(self, action_space, eps=0.01, alpha=0.1, gamma=0.9):
self.action_space = action_space
self.eps = eps
self.alpha = alpha
self.gamma = gamma
self.q_table = {}
def q_function(self, obs, act):
key = (obs, act)
if key not in self.q_table:
self.q_table[key] = 100
return self.q_table[key]
def get_action_list(self):
return range(self.action_space.n)
def _get_best_action(self, obs):
best = (-1e9, None)
for act in self.get_action_list():
score = self.q_function(obs, act)
if best[0] < score:
best = (score, act)
return best[1]
def action(self, obs):
if random.random() < self.eps:
return self.action_space.sample()
return self._get_best_action(obs)
def update(self, obs, act, reward, next_obs):
next_act = self._get_best_action(next_obs)
q = 0
q += (1.0 - self.alpha) * self.q_function(obs, act)
q += self.alpha * (
reward + self.gamma * self.q_function(next_obs, next_act))
self.q_table[(obs, act)] = q
def run_episode(agent):
obs = env.reset()
cnt = 0
while True:
cnt += 1
action = agent.action(obs)
next_obs, reward, done, info = env.step(action)
if done:
break
agent.update(obs, action, reward, next_obs)
obs = next_obs
return cnt
trial = 50
n_episode = 2000
result = []
for i in range(trial):
print('trial %i start.' % (i))
agent = Agent(env.action_space)
for j in range(n_episode):
cnt = run_episode(agent)
result.append([j, cnt])
df = pd.DataFrame(result, columns=['episode', 'n_move'])
df.plot.scatter(x='episode', y='n_move')
plt.savefig('result.png')
|
|
694a62f94391e0ce89a5ba52aa4616d41dac59e2
|
scripts/file_counts.py
|
scripts/file_counts.py
|
#!/usr/bin/env python
import hashlib
import os
import os.path
import stat
import sys
"""
A script for counting file popularity information in given directory.
Usage:
./file_counts.py DIRECTORY
The popularity data will be written to stdout. Each line contains information
about a single file in the following format:
<SHA-1 HASH> <COUNT> <SIZE>
Here:
* SHA-1 HASH = the hash of the file contents
* COUNT = the number of copies this file had in the given directory
* SIZE = the size of the file in bytes
"""
data = {}
if len(sys.argv) < 2:
print "Usage: %s directory" % sys.argv[0]
sys.exit(1)
directory = sys.argv[1];
if not os.path.isdir(directory):
print "%s is not a directory." % directory
sys.exit(1)
def fhash(f):
hasher = hashlib.sha1()
try:
with open(f, "rb") as fd:
for chunk in iter(lambda: fd.read(2048), ''):
hasher.update(chunk)
except IOError as e:
return None
return hasher.hexdigest()
for root, dirs, files in os.walk(directory):
if root.startswith("/proc") or root.startswith("/sys") or root.startswith("/dev"):
continue
for f in files:
path = os.path.join(root, f)
try:
st = os.stat(path)
except OSError:
continue
if not stat.S_ISREG(st.st_mode):
continue
sha1 = fhash(path)
if sha1 is None:
continue
size = st.st_size
identifier = "%s|%i" % (sha1, size)
if identifier in data:
data[identifier] += 1
else:
data[identifier] = 1
for identifier, count in data.iteritems():
sha1, size = identifier.split("|")
print "%s %s %s" % (sha1, count, size)
|
Add the script used to collect file popularity and size information
|
Add the script used to collect file popularity and size information
|
Python
|
apache-2.0
|
sjakthol/dedup-simulator,sjakthol/dedup-simulator
|
Add the script used to collect file popularity and size information
|
#!/usr/bin/env python
import hashlib
import os
import os.path
import stat
import sys
"""
A script for counting file popularity information in given directory.
Usage:
./file_counts.py DIRECTORY
The popularity data will be written to stdout. Each line contains information
about a single file in the following format:
<SHA-1 HASH> <COUNT> <SIZE>
Here:
* SHA-1 HASH = the hash of the file contents
* COUNT = the number of copies this file had in the given directory
* SIZE = the size of the file in bytes
"""
data = {}
if len(sys.argv) < 2:
print "Usage: %s directory" % sys.argv[0]
sys.exit(1)
directory = sys.argv[1];
if not os.path.isdir(directory):
print "%s is not a directory." % directory
sys.exit(1)
def fhash(f):
hasher = hashlib.sha1()
try:
with open(f, "rb") as fd:
for chunk in iter(lambda: fd.read(2048), ''):
hasher.update(chunk)
except IOError as e:
return None
return hasher.hexdigest()
for root, dirs, files in os.walk(directory):
if root.startswith("/proc") or root.startswith("/sys") or root.startswith("/dev"):
continue
for f in files:
path = os.path.join(root, f)
try:
st = os.stat(path)
except OSError:
continue
if not stat.S_ISREG(st.st_mode):
continue
sha1 = fhash(path)
if sha1 is None:
continue
size = st.st_size
identifier = "%s|%i" % (sha1, size)
if identifier in data:
data[identifier] += 1
else:
data[identifier] = 1
for identifier, count in data.iteritems():
sha1, size = identifier.split("|")
print "%s %s %s" % (sha1, count, size)
|
<commit_before><commit_msg>Add the script used to collect file popularity and size information<commit_after>
|
#!/usr/bin/env python
import hashlib
import os
import os.path
import stat
import sys
"""
A script for counting file popularity information in given directory.
Usage:
./file_counts.py DIRECTORY
The popularity data will be written to stdout. Each line contains information
about a single file in the following format:
<SHA-1 HASH> <COUNT> <SIZE>
Here:
* SHA-1 HASH = the hash of the file contents
* COUNT = the number of copies this file had in the given directory
* SIZE = the size of the file in bytes
"""
data = {}
if len(sys.argv) < 2:
print "Usage: %s directory" % sys.argv[0]
sys.exit(1)
directory = sys.argv[1];
if not os.path.isdir(directory):
print "%s is not a directory." % directory
sys.exit(1)
def fhash(f):
hasher = hashlib.sha1()
try:
with open(f, "rb") as fd:
for chunk in iter(lambda: fd.read(2048), ''):
hasher.update(chunk)
except IOError as e:
return None
return hasher.hexdigest()
for root, dirs, files in os.walk(directory):
if root.startswith("/proc") or root.startswith("/sys") or root.startswith("/dev"):
continue
for f in files:
path = os.path.join(root, f)
try:
st = os.stat(path)
except OSError:
continue
if not stat.S_ISREG(st.st_mode):
continue
sha1 = fhash(path)
if sha1 is None:
continue
size = st.st_size
identifier = "%s|%i" % (sha1, size)
if identifier in data:
data[identifier] += 1
else:
data[identifier] = 1
for identifier, count in data.iteritems():
sha1, size = identifier.split("|")
print "%s %s %s" % (sha1, count, size)
|
Add the script used to collect file popularity and size information#!/usr/bin/env python
import hashlib
import os
import os.path
import stat
import sys
"""
A script for counting file popularity information in given directory.
Usage:
./file_counts.py DIRECTORY
The popularity data will be written to stdout. Each line contains information
about a single file in the following format:
<SHA-1 HASH> <COUNT> <SIZE>
Here:
* SHA-1 HASH = the hash of the file contents
* COUNT = the number of copies this file had in the given directory
* SIZE = the size of the file in bytes
"""
data = {}
if len(sys.argv) < 2:
print "Usage: %s directory" % sys.argv[0]
sys.exit(1)
directory = sys.argv[1];
if not os.path.isdir(directory):
print "%s is not a directory." % directory
sys.exit(1)
def fhash(f):
hasher = hashlib.sha1()
try:
with open(f, "rb") as fd:
for chunk in iter(lambda: fd.read(2048), ''):
hasher.update(chunk)
except IOError as e:
return None
return hasher.hexdigest()
for root, dirs, files in os.walk(directory):
if root.startswith("/proc") or root.startswith("/sys") or root.startswith("/dev"):
continue
for f in files:
path = os.path.join(root, f)
try:
st = os.stat(path)
except OSError:
continue
if not stat.S_ISREG(st.st_mode):
continue
sha1 = fhash(path)
if sha1 is None:
continue
size = st.st_size
identifier = "%s|%i" % (sha1, size)
if identifier in data:
data[identifier] += 1
else:
data[identifier] = 1
for identifier, count in data.iteritems():
sha1, size = identifier.split("|")
print "%s %s %s" % (sha1, count, size)
|
<commit_before><commit_msg>Add the script used to collect file popularity and size information<commit_after>#!/usr/bin/env python
import hashlib
import os
import os.path
import stat
import sys
"""
A script for counting file popularity information in given directory.
Usage:
./file_counts.py DIRECTORY
The popularity data will be written to stdout. Each line contains information
about a single file in the following format:
<SHA-1 HASH> <COUNT> <SIZE>
Here:
* SHA-1 HASH = the hash of the file contents
* COUNT = the number of copies this file had in the given directory
* SIZE = the size of the file in bytes
"""
data = {}
if len(sys.argv) < 2:
print "Usage: %s directory" % sys.argv[0]
sys.exit(1)
directory = sys.argv[1];
if not os.path.isdir(directory):
print "%s is not a directory." % directory
sys.exit(1)
def fhash(f):
hasher = hashlib.sha1()
try:
with open(f, "rb") as fd:
for chunk in iter(lambda: fd.read(2048), ''):
hasher.update(chunk)
except IOError as e:
return None
return hasher.hexdigest()
for root, dirs, files in os.walk(directory):
if root.startswith("/proc") or root.startswith("/sys") or root.startswith("/dev"):
continue
for f in files:
path = os.path.join(root, f)
try:
st = os.stat(path)
except OSError:
continue
if not stat.S_ISREG(st.st_mode):
continue
sha1 = fhash(path)
if sha1 is None:
continue
size = st.st_size
identifier = "%s|%i" % (sha1, size)
if identifier in data:
data[identifier] += 1
else:
data[identifier] = 1
for identifier, count in data.iteritems():
sha1, size = identifier.split("|")
print "%s %s %s" % (sha1, count, size)
|
|
79c7720fcc7302d498bd81106361bae126218648
|
lab/07/template_07_c.py
|
lab/07/template_07_c.py
|
def main():
matkul = #buat sebuah dictionary
while #buat agar meminta input terus :
masukkan = input(">>> ")
######
#buat agar program berhenti saat masukkan adalah "selesai"
######
masukkan_split = masukkan.split(" ")
if (masukkan_split[0] == "tambah"):
nama_matkul = masukkan_split[1]
npm_semua = masukkan_split[2:]
# buat sebuah set untuk menampung NPM-NPM diatas
# masukkan ke dictionary yang telah dibuat diatas
print(masukkan_split[1],"Berhasil Ditambahkan !")
# Gunakan method yang dimiliki set untuk melakukan 3 operasi di bawah ini
elif (masukkan_split[0] == "gabungan"):
elif (masukkan_split[0] == "pengambil"):
elif(masukkan_split[0] == "hanya"):
# ------------------------------------------------------------
elif (masukkan_split[0] == "cetak"):
# Lakukan selection, apakah cetak sebuah matkul, atau semuanya
# dan lakukan operasi print sesuai format
else:
print("Perintah salah !")
main()
|
Add lab 07 template for class C
|
Add lab 07 template for class C
|
Python
|
mit
|
laymonage/TarungLab,giovanism/TarungLab
|
Add lab 07 template for class C
|
def main():
matkul = #buat sebuah dictionary
while #buat agar meminta input terus :
masukkan = input(">>> ")
######
#buat agar program berhenti saat masukkan adalah "selesai"
######
masukkan_split = masukkan.split(" ")
if (masukkan_split[0] == "tambah"):
nama_matkul = masukkan_split[1]
npm_semua = masukkan_split[2:]
# buat sebuah set untuk menampung NPM-NPM diatas
# masukkan ke dictionary yang telah dibuat diatas
print(masukkan_split[1],"Berhasil Ditambahkan !")
# Gunakan method yang dimiliki set untuk melakukan 3 operasi di bawah ini
elif (masukkan_split[0] == "gabungan"):
elif (masukkan_split[0] == "pengambil"):
elif(masukkan_split[0] == "hanya"):
# ------------------------------------------------------------
elif (masukkan_split[0] == "cetak"):
# Lakukan selection, apakah cetak sebuah matkul, atau semuanya
# dan lakukan operasi print sesuai format
else:
print("Perintah salah !")
main()
|
<commit_before><commit_msg>Add lab 07 template for class C<commit_after>
|
def main():
matkul = #buat sebuah dictionary
while #buat agar meminta input terus :
masukkan = input(">>> ")
######
#buat agar program berhenti saat masukkan adalah "selesai"
######
masukkan_split = masukkan.split(" ")
if (masukkan_split[0] == "tambah"):
nama_matkul = masukkan_split[1]
npm_semua = masukkan_split[2:]
# buat sebuah set untuk menampung NPM-NPM diatas
# masukkan ke dictionary yang telah dibuat diatas
print(masukkan_split[1],"Berhasil Ditambahkan !")
# Gunakan method yang dimiliki set untuk melakukan 3 operasi di bawah ini
elif (masukkan_split[0] == "gabungan"):
elif (masukkan_split[0] == "pengambil"):
elif(masukkan_split[0] == "hanya"):
# ------------------------------------------------------------
elif (masukkan_split[0] == "cetak"):
# Lakukan selection, apakah cetak sebuah matkul, atau semuanya
# dan lakukan operasi print sesuai format
else:
print("Perintah salah !")
main()
|
Add lab 07 template for class Cdef main():
matkul = #buat sebuah dictionary
while #buat agar meminta input terus :
masukkan = input(">>> ")
######
#buat agar program berhenti saat masukkan adalah "selesai"
######
masukkan_split = masukkan.split(" ")
if (masukkan_split[0] == "tambah"):
nama_matkul = masukkan_split[1]
npm_semua = masukkan_split[2:]
# buat sebuah set untuk menampung NPM-NPM diatas
# masukkan ke dictionary yang telah dibuat diatas
print(masukkan_split[1],"Berhasil Ditambahkan !")
# Gunakan method yang dimiliki set untuk melakukan 3 operasi di bawah ini
elif (masukkan_split[0] == "gabungan"):
elif (masukkan_split[0] == "pengambil"):
elif(masukkan_split[0] == "hanya"):
# ------------------------------------------------------------
elif (masukkan_split[0] == "cetak"):
# Lakukan selection, apakah cetak sebuah matkul, atau semuanya
# dan lakukan operasi print sesuai format
else:
print("Perintah salah !")
main()
|
<commit_before><commit_msg>Add lab 07 template for class C<commit_after>def main():
matkul = #buat sebuah dictionary
while #buat agar meminta input terus :
masukkan = input(">>> ")
######
#buat agar program berhenti saat masukkan adalah "selesai"
######
masukkan_split = masukkan.split(" ")
if (masukkan_split[0] == "tambah"):
nama_matkul = masukkan_split[1]
npm_semua = masukkan_split[2:]
# buat sebuah set untuk menampung NPM-NPM diatas
# masukkan ke dictionary yang telah dibuat diatas
print(masukkan_split[1],"Berhasil Ditambahkan !")
# Gunakan method yang dimiliki set untuk melakukan 3 operasi di bawah ini
elif (masukkan_split[0] == "gabungan"):
elif (masukkan_split[0] == "pengambil"):
elif(masukkan_split[0] == "hanya"):
# ------------------------------------------------------------
elif (masukkan_split[0] == "cetak"):
# Lakukan selection, apakah cetak sebuah matkul, atau semuanya
# dan lakukan operasi print sesuai format
else:
print("Perintah salah !")
main()
|
|
45091c5bd93c3e6c40da9e1987eb22be61d33956
|
aospy/test/test_timedate.py
|
aospy/test/test_timedate.py
|
#!/usr/bin/env python
"""Test suite for aospy.utils module."""
import sys
import unittest
import numpy as np
import xarray as xr
from datetime import datetime
from aospy.timedate import TimeManager
class AospyTimeManagerTestCase(unittest.TestCase):
def setUp(self):
# self.tm = TimeManager()
pass
def tearDown(self):
pass
class TestTimeManager(AospyTimeManagerTestCase):
def test_to_datetime_bool(self):
self.assertEqual(TimeManager.to_datetime(True), True)
self.assertEqual(TimeManager.to_datetime(False), False)
def test_to_datetime_datetime(self):
self.assertEqual(TimeManager.to_datetime(datetime(2000, 1, 1)),
datetime(2000, 1, 1))
def test_to_datetime_year_only(self):
self.assertEqual(TimeManager.to_datetime(2000), datetime(2000, 1, 1))
def test_to_datetime_str(self):
self.assertEqual(TimeManager.to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_str_to_datetime(self):
self.assertEqual(TimeManager.str_to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_apply_year_offset(self):
self.assertEqual(TimeManager.apply_year_offset(datetime(1678, 1, 1)),
datetime(1678, 1, 1))
self.assertEqual(TimeManager.apply_year_offset(datetime(1, 1, 1)),
datetime(1900, 1, 1))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestTimeManager)
unittest.TextTestRunner(verbosity=2).run(suite)
|
TEST Added tests of TimeManager
|
TEST Added tests of TimeManager
|
Python
|
apache-2.0
|
spencerkclark/aospy,spencerahill/aospy
|
TEST Added tests of TimeManager
|
#!/usr/bin/env python
"""Test suite for aospy.utils module."""
import sys
import unittest
import numpy as np
import xarray as xr
from datetime import datetime
from aospy.timedate import TimeManager
class AospyTimeManagerTestCase(unittest.TestCase):
def setUp(self):
# self.tm = TimeManager()
pass
def tearDown(self):
pass
class TestTimeManager(AospyTimeManagerTestCase):
def test_to_datetime_bool(self):
self.assertEqual(TimeManager.to_datetime(True), True)
self.assertEqual(TimeManager.to_datetime(False), False)
def test_to_datetime_datetime(self):
self.assertEqual(TimeManager.to_datetime(datetime(2000, 1, 1)),
datetime(2000, 1, 1))
def test_to_datetime_year_only(self):
self.assertEqual(TimeManager.to_datetime(2000), datetime(2000, 1, 1))
def test_to_datetime_str(self):
self.assertEqual(TimeManager.to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_str_to_datetime(self):
self.assertEqual(TimeManager.str_to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_apply_year_offset(self):
self.assertEqual(TimeManager.apply_year_offset(datetime(1678, 1, 1)),
datetime(1678, 1, 1))
self.assertEqual(TimeManager.apply_year_offset(datetime(1, 1, 1)),
datetime(1900, 1, 1))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestTimeManager)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>TEST Added tests of TimeManager<commit_after>
|
#!/usr/bin/env python
"""Test suite for aospy.utils module."""
import sys
import unittest
import numpy as np
import xarray as xr
from datetime import datetime
from aospy.timedate import TimeManager
class AospyTimeManagerTestCase(unittest.TestCase):
def setUp(self):
# self.tm = TimeManager()
pass
def tearDown(self):
pass
class TestTimeManager(AospyTimeManagerTestCase):
def test_to_datetime_bool(self):
self.assertEqual(TimeManager.to_datetime(True), True)
self.assertEqual(TimeManager.to_datetime(False), False)
def test_to_datetime_datetime(self):
self.assertEqual(TimeManager.to_datetime(datetime(2000, 1, 1)),
datetime(2000, 1, 1))
def test_to_datetime_year_only(self):
self.assertEqual(TimeManager.to_datetime(2000), datetime(2000, 1, 1))
def test_to_datetime_str(self):
self.assertEqual(TimeManager.to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_str_to_datetime(self):
self.assertEqual(TimeManager.str_to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_apply_year_offset(self):
self.assertEqual(TimeManager.apply_year_offset(datetime(1678, 1, 1)),
datetime(1678, 1, 1))
self.assertEqual(TimeManager.apply_year_offset(datetime(1, 1, 1)),
datetime(1900, 1, 1))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestTimeManager)
unittest.TextTestRunner(verbosity=2).run(suite)
|
TEST Added tests of TimeManager#!/usr/bin/env python
"""Test suite for aospy.utils module."""
import sys
import unittest
import numpy as np
import xarray as xr
from datetime import datetime
from aospy.timedate import TimeManager
class AospyTimeManagerTestCase(unittest.TestCase):
def setUp(self):
# self.tm = TimeManager()
pass
def tearDown(self):
pass
class TestTimeManager(AospyTimeManagerTestCase):
def test_to_datetime_bool(self):
self.assertEqual(TimeManager.to_datetime(True), True)
self.assertEqual(TimeManager.to_datetime(False), False)
def test_to_datetime_datetime(self):
self.assertEqual(TimeManager.to_datetime(datetime(2000, 1, 1)),
datetime(2000, 1, 1))
def test_to_datetime_year_only(self):
self.assertEqual(TimeManager.to_datetime(2000), datetime(2000, 1, 1))
def test_to_datetime_str(self):
self.assertEqual(TimeManager.to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_str_to_datetime(self):
self.assertEqual(TimeManager.str_to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_apply_year_offset(self):
self.assertEqual(TimeManager.apply_year_offset(datetime(1678, 1, 1)),
datetime(1678, 1, 1))
self.assertEqual(TimeManager.apply_year_offset(datetime(1, 1, 1)),
datetime(1900, 1, 1))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestTimeManager)
unittest.TextTestRunner(verbosity=2).run(suite)
|
<commit_before><commit_msg>TEST Added tests of TimeManager<commit_after>#!/usr/bin/env python
"""Test suite for aospy.utils module."""
import sys
import unittest
import numpy as np
import xarray as xr
from datetime import datetime
from aospy.timedate import TimeManager
class AospyTimeManagerTestCase(unittest.TestCase):
def setUp(self):
# self.tm = TimeManager()
pass
def tearDown(self):
pass
class TestTimeManager(AospyTimeManagerTestCase):
def test_to_datetime_bool(self):
self.assertEqual(TimeManager.to_datetime(True), True)
self.assertEqual(TimeManager.to_datetime(False), False)
def test_to_datetime_datetime(self):
self.assertEqual(TimeManager.to_datetime(datetime(2000, 1, 1)),
datetime(2000, 1, 1))
def test_to_datetime_year_only(self):
self.assertEqual(TimeManager.to_datetime(2000), datetime(2000, 1, 1))
def test_to_datetime_str(self):
self.assertEqual(TimeManager.to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_str_to_datetime(self):
self.assertEqual(TimeManager.str_to_datetime('2000-01-01'),
datetime(2000, 1, 1))
def test_apply_year_offset(self):
self.assertEqual(TimeManager.apply_year_offset(datetime(1678, 1, 1)),
datetime(1678, 1, 1))
self.assertEqual(TimeManager.apply_year_offset(datetime(1, 1, 1)),
datetime(1900, 1, 1))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestTimeManager)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
4cd919e5301880895d1c05ac1bb19cee2c2443ec
|
py/string-compression.py
|
py/string-compression.py
|
class Solution(object):
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
idx = 0
prev = None
cnt = 0
for c in chars:
if c == prev:
cnt += 1
else:
if prev is not None:
chars[idx] = prev
idx += 1
if cnt > 1:
for cnt_c in str(cnt):
chars[idx] = cnt_c
idx += 1
cnt = 1
prev = c
if prev is not None:
chars[idx] = prev
idx += 1
if cnt > 1:
for cnt_c in str(cnt):
chars[idx] = cnt_c
idx += 1
return idx
|
Add py solution for 443. String Compression
|
Add py solution for 443. String Compression
443. String Compression: https://leetcode.com/problems/string-compression/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 443. String Compression
443. String Compression: https://leetcode.com/problems/string-compression/
|
class Solution(object):
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
idx = 0
prev = None
cnt = 0
for c in chars:
if c == prev:
cnt += 1
else:
if prev is not None:
chars[idx] = prev
idx += 1
if cnt > 1:
for cnt_c in str(cnt):
chars[idx] = cnt_c
idx += 1
cnt = 1
prev = c
if prev is not None:
chars[idx] = prev
idx += 1
if cnt > 1:
for cnt_c in str(cnt):
chars[idx] = cnt_c
idx += 1
return idx
|
<commit_before><commit_msg>Add py solution for 443. String Compression
443. String Compression: https://leetcode.com/problems/string-compression/<commit_after>
|
class Solution(object):
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
idx = 0
prev = None
cnt = 0
for c in chars:
if c == prev:
cnt += 1
else:
if prev is not None:
chars[idx] = prev
idx += 1
if cnt > 1:
for cnt_c in str(cnt):
chars[idx] = cnt_c
idx += 1
cnt = 1
prev = c
if prev is not None:
chars[idx] = prev
idx += 1
if cnt > 1:
for cnt_c in str(cnt):
chars[idx] = cnt_c
idx += 1
return idx
|
Add py solution for 443. String Compression
443. String Compression: https://leetcode.com/problems/string-compression/class Solution(object):
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
idx = 0
prev = None
cnt = 0
for c in chars:
if c == prev:
cnt += 1
else:
if prev is not None:
chars[idx] = prev
idx += 1
if cnt > 1:
for cnt_c in str(cnt):
chars[idx] = cnt_c
idx += 1
cnt = 1
prev = c
if prev is not None:
chars[idx] = prev
idx += 1
if cnt > 1:
for cnt_c in str(cnt):
chars[idx] = cnt_c
idx += 1
return idx
|
<commit_before><commit_msg>Add py solution for 443. String Compression
443. String Compression: https://leetcode.com/problems/string-compression/<commit_after>class Solution(object):
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
idx = 0
prev = None
cnt = 0
for c in chars:
if c == prev:
cnt += 1
else:
if prev is not None:
chars[idx] = prev
idx += 1
if cnt > 1:
for cnt_c in str(cnt):
chars[idx] = cnt_c
idx += 1
cnt = 1
prev = c
if prev is not None:
chars[idx] = prev
idx += 1
if cnt > 1:
for cnt_c in str(cnt):
chars[idx] = cnt_c
idx += 1
return idx
|
|
c67cf282f4b74e44cb0df11e4ab72ef8214f62d6
|
lightning/types/utils.py
|
lightning/types/utils.py
|
from numpy import asarray, vstack, newaxis, zeros, nonzero, concatenate, transpose, atleast_2d
def check_colors(clrs):
clrs = asarray(clrs)
if clrs.ndim == 2 and clrs.shape[1] == 1:
clrs = clrs.flatten()
if clrs.ndim == 2 and clrs.shape[0] == 1:
clrs = clrs.flatten()
if clrs.ndim == 1:
clrs = clrs[:,newaxis]
elif clrs.shape[1] != 3:
raise Exception("Color array must have three values per point")
return clrs
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
points = vstack([x, y, range(0,len(x))]).T
return points
def mat_to_links(mat, labels=None):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
# pick group assignments (default is all 1s)
n = mat.shape[0]
if labels is None:
nodes = zeros((1, n)).T
else:
if labels.size != n:
raise Exception("Must provide label for each row")
nodes = labels.astype(int).reshape(labels.size, 1)
return links, nodes
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
|
Move parsing utilities to separate module
|
Move parsing utilities to separate module
|
Python
|
mit
|
peterkshultz/lightning-python,peterkshultz/lightning-python,lightning-viz/lightning-python,peterkshultz/lightning-python,garretstuber/lightning-python,garretstuber/lightning-python,lightning-viz/lightning-python,garretstuber/lightning-python
|
Move parsing utilities to separate module
|
from numpy import asarray, vstack, newaxis, zeros, nonzero, concatenate, transpose, atleast_2d
def check_colors(clrs):
clrs = asarray(clrs)
if clrs.ndim == 2 and clrs.shape[1] == 1:
clrs = clrs.flatten()
if clrs.ndim == 2 and clrs.shape[0] == 1:
clrs = clrs.flatten()
if clrs.ndim == 1:
clrs = clrs[:,newaxis]
elif clrs.shape[1] != 3:
raise Exception("Color array must have three values per point")
return clrs
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
points = vstack([x, y, range(0,len(x))]).T
return points
def mat_to_links(mat, labels=None):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
# pick group assignments (default is all 1s)
n = mat.shape[0]
if labels is None:
nodes = zeros((1, n)).T
else:
if labels.size != n:
raise Exception("Must provide label for each row")
nodes = labels.astype(int).reshape(labels.size, 1)
return links, nodes
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
|
<commit_before><commit_msg>Move parsing utilities to separate module<commit_after>
|
from numpy import asarray, vstack, newaxis, zeros, nonzero, concatenate, transpose, atleast_2d
def check_colors(clrs):
clrs = asarray(clrs)
if clrs.ndim == 2 and clrs.shape[1] == 1:
clrs = clrs.flatten()
if clrs.ndim == 2 and clrs.shape[0] == 1:
clrs = clrs.flatten()
if clrs.ndim == 1:
clrs = clrs[:,newaxis]
elif clrs.shape[1] != 3:
raise Exception("Color array must have three values per point")
return clrs
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
points = vstack([x, y, range(0,len(x))]).T
return points
def mat_to_links(mat, labels=None):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
# pick group assignments (default is all 1s)
n = mat.shape[0]
if labels is None:
nodes = zeros((1, n)).T
else:
if labels.size != n:
raise Exception("Must provide label for each row")
nodes = labels.astype(int).reshape(labels.size, 1)
return links, nodes
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
|
Move parsing utilities to separate modulefrom numpy import asarray, vstack, newaxis, zeros, nonzero, concatenate, transpose, atleast_2d
def check_colors(clrs):
clrs = asarray(clrs)
if clrs.ndim == 2 and clrs.shape[1] == 1:
clrs = clrs.flatten()
if clrs.ndim == 2 and clrs.shape[0] == 1:
clrs = clrs.flatten()
if clrs.ndim == 1:
clrs = clrs[:,newaxis]
elif clrs.shape[1] != 3:
raise Exception("Color array must have three values per point")
return clrs
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
points = vstack([x, y, range(0,len(x))]).T
return points
def mat_to_links(mat, labels=None):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
# pick group assignments (default is all 1s)
n = mat.shape[0]
if labels is None:
nodes = zeros((1, n)).T
else:
if labels.size != n:
raise Exception("Must provide label for each row")
nodes = labels.astype(int).reshape(labels.size, 1)
return links, nodes
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
|
<commit_before><commit_msg>Move parsing utilities to separate module<commit_after>from numpy import asarray, vstack, newaxis, zeros, nonzero, concatenate, transpose, atleast_2d
def check_colors(clrs):
clrs = asarray(clrs)
if clrs.ndim == 2 and clrs.shape[1] == 1:
clrs = clrs.flatten()
if clrs.ndim == 2 and clrs.shape[0] == 1:
clrs = clrs.flatten()
if clrs.ndim == 1:
clrs = clrs[:,newaxis]
elif clrs.shape[1] != 3:
raise Exception("Color array must have three values per point")
return clrs
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
points = vstack([x, y, range(0,len(x))]).T
return points
def mat_to_links(mat, labels=None):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
# pick group assignments (default is all 1s)
n = mat.shape[0]
if labels is None:
nodes = zeros((1, n)).T
else:
if labels.size != n:
raise Exception("Must provide label for each row")
nodes = labels.astype(int).reshape(labels.size, 1)
return links, nodes
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
|
|
62daab10b3c0edfc10367e4f08f0501c487915d0
|
recipe_engine/unittests/test_env.py
|
recipe_engine/unittests/test_env.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to setup the environment to run unit tests.
Modifies PYTHONPATH to automatically include parent, common and pylibs
directories.
"""
import os
import sys
RUNTESTS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(RUNTESTS_DIR, 'data')
BASE_DIR = os.path.abspath(os.path.join(RUNTESTS_DIR, '..', '..', '..'))
sys.path.insert(0, os.path.join(BASE_DIR, 'scripts'))
sys.path.insert(0, os.path.join(BASE_DIR, 'site_config'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'buildbot_8_4p1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'twisted_10_2'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'mock-1.0.1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'coverage-3.6'))
from common import find_depot_tools # pylint: disable=W0611
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to setup the environment to run unit tests.
Modifies PYTHONPATH to automatically include parent, common and pylibs
directories.
"""
import os
import sys
RUNTESTS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(RUNTESTS_DIR, 'data')
BASE_DIR = os.path.abspath(os.path.join(RUNTESTS_DIR, '..', '..', '..'))
sys.path.insert(0, os.path.join(BASE_DIR, 'scripts'))
sys.path.insert(0, os.path.join(BASE_DIR, 'site_config'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'buildbot_slave_8_4'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'twisted_10_2'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'mock-1.0.1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'coverage-3.6'))
from common import find_depot_tools # pylint: disable=W0611
|
Remove forgotten bits of buildbot 0.7.12 compatibility code.
|
Remove forgotten bits of buildbot 0.7.12 compatibility code.
A second take of https://chromiumcodereview.appspot.com/13560017 but should work now.
R=iannucci@chromium.org
BUG=
Review URL: https://chromiumcodereview.appspot.com/20481003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@217592 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
apache-2.0
|
luci/recipes-py,shishkander/recipes-py,luci/recipes-py,shishkander/recipes-py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to setup the environment to run unit tests.
Modifies PYTHONPATH to automatically include parent, common and pylibs
directories.
"""
import os
import sys
RUNTESTS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(RUNTESTS_DIR, 'data')
BASE_DIR = os.path.abspath(os.path.join(RUNTESTS_DIR, '..', '..', '..'))
sys.path.insert(0, os.path.join(BASE_DIR, 'scripts'))
sys.path.insert(0, os.path.join(BASE_DIR, 'site_config'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'buildbot_8_4p1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'twisted_10_2'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'mock-1.0.1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'coverage-3.6'))
from common import find_depot_tools # pylint: disable=W0611
Remove forgotten bits of buildbot 0.7.12 compatibility code.
A second take of https://chromiumcodereview.appspot.com/13560017 but should work now.
R=iannucci@chromium.org
BUG=
Review URL: https://chromiumcodereview.appspot.com/20481003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@217592 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to setup the environment to run unit tests.
Modifies PYTHONPATH to automatically include parent, common and pylibs
directories.
"""
import os
import sys
RUNTESTS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(RUNTESTS_DIR, 'data')
BASE_DIR = os.path.abspath(os.path.join(RUNTESTS_DIR, '..', '..', '..'))
sys.path.insert(0, os.path.join(BASE_DIR, 'scripts'))
sys.path.insert(0, os.path.join(BASE_DIR, 'site_config'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'buildbot_slave_8_4'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'twisted_10_2'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'mock-1.0.1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'coverage-3.6'))
from common import find_depot_tools # pylint: disable=W0611
|
<commit_before># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to setup the environment to run unit tests.
Modifies PYTHONPATH to automatically include parent, common and pylibs
directories.
"""
import os
import sys
RUNTESTS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(RUNTESTS_DIR, 'data')
BASE_DIR = os.path.abspath(os.path.join(RUNTESTS_DIR, '..', '..', '..'))
sys.path.insert(0, os.path.join(BASE_DIR, 'scripts'))
sys.path.insert(0, os.path.join(BASE_DIR, 'site_config'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'buildbot_8_4p1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'twisted_10_2'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'mock-1.0.1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'coverage-3.6'))
from common import find_depot_tools # pylint: disable=W0611
<commit_msg>Remove forgotten bits of buildbot 0.7.12 compatibility code.
A second take of https://chromiumcodereview.appspot.com/13560017 but should work now.
R=iannucci@chromium.org
BUG=
Review URL: https://chromiumcodereview.appspot.com/20481003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@217592 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to setup the environment to run unit tests.
Modifies PYTHONPATH to automatically include parent, common and pylibs
directories.
"""
import os
import sys
RUNTESTS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(RUNTESTS_DIR, 'data')
BASE_DIR = os.path.abspath(os.path.join(RUNTESTS_DIR, '..', '..', '..'))
sys.path.insert(0, os.path.join(BASE_DIR, 'scripts'))
sys.path.insert(0, os.path.join(BASE_DIR, 'site_config'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'buildbot_slave_8_4'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'twisted_10_2'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'mock-1.0.1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'coverage-3.6'))
from common import find_depot_tools # pylint: disable=W0611
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to setup the environment to run unit tests.
Modifies PYTHONPATH to automatically include parent, common and pylibs
directories.
"""
import os
import sys
RUNTESTS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(RUNTESTS_DIR, 'data')
BASE_DIR = os.path.abspath(os.path.join(RUNTESTS_DIR, '..', '..', '..'))
sys.path.insert(0, os.path.join(BASE_DIR, 'scripts'))
sys.path.insert(0, os.path.join(BASE_DIR, 'site_config'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'buildbot_8_4p1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'twisted_10_2'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'mock-1.0.1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'coverage-3.6'))
from common import find_depot_tools # pylint: disable=W0611
Remove forgotten bits of buildbot 0.7.12 compatibility code.
A second take of https://chromiumcodereview.appspot.com/13560017 but should work now.
R=iannucci@chromium.org
BUG=
Review URL: https://chromiumcodereview.appspot.com/20481003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@217592 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to setup the environment to run unit tests.
Modifies PYTHONPATH to automatically include parent, common and pylibs
directories.
"""
import os
import sys
RUNTESTS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(RUNTESTS_DIR, 'data')
BASE_DIR = os.path.abspath(os.path.join(RUNTESTS_DIR, '..', '..', '..'))
sys.path.insert(0, os.path.join(BASE_DIR, 'scripts'))
sys.path.insert(0, os.path.join(BASE_DIR, 'site_config'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'buildbot_slave_8_4'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'twisted_10_2'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'mock-1.0.1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'coverage-3.6'))
from common import find_depot_tools # pylint: disable=W0611
|
<commit_before># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to setup the environment to run unit tests.
Modifies PYTHONPATH to automatically include parent, common and pylibs
directories.
"""
import os
import sys
RUNTESTS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(RUNTESTS_DIR, 'data')
BASE_DIR = os.path.abspath(os.path.join(RUNTESTS_DIR, '..', '..', '..'))
sys.path.insert(0, os.path.join(BASE_DIR, 'scripts'))
sys.path.insert(0, os.path.join(BASE_DIR, 'site_config'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'buildbot_8_4p1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'twisted_10_2'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'mock-1.0.1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'coverage-3.6'))
from common import find_depot_tools # pylint: disable=W0611
<commit_msg>Remove forgotten bits of buildbot 0.7.12 compatibility code.
A second take of https://chromiumcodereview.appspot.com/13560017 but should work now.
R=iannucci@chromium.org
BUG=
Review URL: https://chromiumcodereview.appspot.com/20481003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@217592 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to setup the environment to run unit tests.
Modifies PYTHONPATH to automatically include parent, common and pylibs
directories.
"""
import os
import sys
RUNTESTS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(RUNTESTS_DIR, 'data')
BASE_DIR = os.path.abspath(os.path.join(RUNTESTS_DIR, '..', '..', '..'))
sys.path.insert(0, os.path.join(BASE_DIR, 'scripts'))
sys.path.insert(0, os.path.join(BASE_DIR, 'site_config'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'buildbot_slave_8_4'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'twisted_10_2'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'mock-1.0.1'))
sys.path.insert(0, os.path.join(BASE_DIR, 'third_party', 'coverage-3.6'))
from common import find_depot_tools # pylint: disable=W0611
|
d4927b00ec95029b0995f5a6c64ecd0cb2398079
|
examples/tic_ql_mlp_selfplay_all.py
|
examples/tic_ql_mlp_selfplay_all.py
|
'''
The Q-learning algorithm is used to learn the state-action values for all
Tic-Tac-Toe positions by playing games against itself (self-play).
'''
from capstone.game.games import TicTacToe
from capstone.game.players import RandPlayer
from capstone.rl import Environment, GameMDP
from capstone.rl.learners import ApproxQLearningSelfPlay
from capstone.rl.policies import RandomPolicy
from capstone.rl.utils import EpisodicWLDPlotter
from capstone.rl.value_functions import MLP
seed = 23
game = TicTacToe()
mdp = GameMDP(game)
env = Environment(mdp)
mlp = MLP()
qlearning = ApproxQLearningSelfPlay(
env=env,
qfunction=MLP(),
policy=RandomPolicy(env.actions, random_state=seed),
discount_factor=0.99,
n_episodes=100000,
callbacks=[
EpisodicWLDPlotter(
game=game,
opp_player=RandPlayer(random_state=seed),
n_matches=100,
period=1000,
filepath='figures/tic_ql_mlp_selfplay_all.pdf'
)
]
)
qlearning.train()
|
Add example for Tic-Tac-Toe with a Q-Network via Self-play
|
Add example for Tic-Tac-Toe with a Q-Network via Self-play
|
Python
|
mit
|
davidrobles/mlnd-capstone-code
|
Add example for Tic-Tac-Toe with a Q-Network via Self-play
|
'''
The Q-learning algorithm is used to learn the state-action values for all
Tic-Tac-Toe positions by playing games against itself (self-play).
'''
from capstone.game.games import TicTacToe
from capstone.game.players import RandPlayer
from capstone.rl import Environment, GameMDP
from capstone.rl.learners import ApproxQLearningSelfPlay
from capstone.rl.policies import RandomPolicy
from capstone.rl.utils import EpisodicWLDPlotter
from capstone.rl.value_functions import MLP
seed = 23
game = TicTacToe()
mdp = GameMDP(game)
env = Environment(mdp)
mlp = MLP()
qlearning = ApproxQLearningSelfPlay(
env=env,
qfunction=MLP(),
policy=RandomPolicy(env.actions, random_state=seed),
discount_factor=0.99,
n_episodes=100000,
callbacks=[
EpisodicWLDPlotter(
game=game,
opp_player=RandPlayer(random_state=seed),
n_matches=100,
period=1000,
filepath='figures/tic_ql_mlp_selfplay_all.pdf'
)
]
)
qlearning.train()
|
<commit_before><commit_msg>Add example for Tic-Tac-Toe with a Q-Network via Self-play<commit_after>
|
'''
The Q-learning algorithm is used to learn the state-action values for all
Tic-Tac-Toe positions by playing games against itself (self-play).
'''
from capstone.game.games import TicTacToe
from capstone.game.players import RandPlayer
from capstone.rl import Environment, GameMDP
from capstone.rl.learners import ApproxQLearningSelfPlay
from capstone.rl.policies import RandomPolicy
from capstone.rl.utils import EpisodicWLDPlotter
from capstone.rl.value_functions import MLP
seed = 23
game = TicTacToe()
mdp = GameMDP(game)
env = Environment(mdp)
mlp = MLP()
qlearning = ApproxQLearningSelfPlay(
env=env,
qfunction=MLP(),
policy=RandomPolicy(env.actions, random_state=seed),
discount_factor=0.99,
n_episodes=100000,
callbacks=[
EpisodicWLDPlotter(
game=game,
opp_player=RandPlayer(random_state=seed),
n_matches=100,
period=1000,
filepath='figures/tic_ql_mlp_selfplay_all.pdf'
)
]
)
qlearning.train()
|
Add example for Tic-Tac-Toe with a Q-Network via Self-play'''
The Q-learning algorithm is used to learn the state-action values for all
Tic-Tac-Toe positions by playing games against itself (self-play).
'''
from capstone.game.games import TicTacToe
from capstone.game.players import RandPlayer
from capstone.rl import Environment, GameMDP
from capstone.rl.learners import ApproxQLearningSelfPlay
from capstone.rl.policies import RandomPolicy
from capstone.rl.utils import EpisodicWLDPlotter
from capstone.rl.value_functions import MLP
seed = 23
game = TicTacToe()
mdp = GameMDP(game)
env = Environment(mdp)
mlp = MLP()
qlearning = ApproxQLearningSelfPlay(
env=env,
qfunction=MLP(),
policy=RandomPolicy(env.actions, random_state=seed),
discount_factor=0.99,
n_episodes=100000,
callbacks=[
EpisodicWLDPlotter(
game=game,
opp_player=RandPlayer(random_state=seed),
n_matches=100,
period=1000,
filepath='figures/tic_ql_mlp_selfplay_all.pdf'
)
]
)
qlearning.train()
|
<commit_before><commit_msg>Add example for Tic-Tac-Toe with a Q-Network via Self-play<commit_after>'''
The Q-learning algorithm is used to learn the state-action values for all
Tic-Tac-Toe positions by playing games against itself (self-play).
'''
from capstone.game.games import TicTacToe
from capstone.game.players import RandPlayer
from capstone.rl import Environment, GameMDP
from capstone.rl.learners import ApproxQLearningSelfPlay
from capstone.rl.policies import RandomPolicy
from capstone.rl.utils import EpisodicWLDPlotter
from capstone.rl.value_functions import MLP
seed = 23
game = TicTacToe()
mdp = GameMDP(game)
env = Environment(mdp)
mlp = MLP()
qlearning = ApproxQLearningSelfPlay(
env=env,
qfunction=MLP(),
policy=RandomPolicy(env.actions, random_state=seed),
discount_factor=0.99,
n_episodes=100000,
callbacks=[
EpisodicWLDPlotter(
game=game,
opp_player=RandPlayer(random_state=seed),
n_matches=100,
period=1000,
filepath='figures/tic_ql_mlp_selfplay_all.pdf'
)
]
)
qlearning.train()
|
|
491ddec4c429993b9149eb61139fe71d691f697f
|
mysite/search/models.py
|
mysite/search/models.py
|
from django.db import models
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=200)
language = models.CharField(max_length=200)
icon_url = models.URLField(max_length=200)
class Bug(models.Model):
project = models.ForeignKey(Project)
title = models.CharField(max_length=200)
description = models.TextField()
status = models.CharField(max_length=200)
importance = models.CharField(max_length=200)
people_involved = models.IntegerField()
date_reported = models.DateTimeField()
last_touched = models.DateTimeField()
last_polled = models.DateTimeField()
submitter_username = models.CharField(max_length=200)
submitter_realname = models.CharField(max_length=200)
|
from django.db import models
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=200)
language = models.CharField(max_length=200)
icon_url = models.URLField(max_length=200)
class Bug(models.Model):
project = models.ForeignKey(Project)
title = models.CharField(max_length=200)
description = models.TextField()
status = models.CharField(max_length=200)
importance = models.CharField(max_length=200)
people_involved = models.IntegerField()
date_reported = models.DateTimeField()
last_touched = models.DateTimeField()
last_polled = models.DateTimeField()
submitter_username = models.CharField(max_length=200)
submitter_realname = models.CharField(max_length=200)
canonical_bug_link = models.URLField(max_length=200)
|
Add a bug link field
|
Add a bug link field
|
Python
|
agpl-3.0
|
onceuponatimeforever/oh-mainline,nirmeshk/oh-mainline,moijes12/oh-mainline,SnappleCap/oh-mainline,openhatch/oh-mainline,nirmeshk/oh-mainline,SnappleCap/oh-mainline,Changaco/oh-mainline,ehashman/oh-mainline,mzdaniel/oh-mainline,Changaco/oh-mainline,vipul-sharma20/oh-mainline,nirmeshk/oh-mainline,ojengwa/oh-mainline,Changaco/oh-mainline,moijes12/oh-mainline,mzdaniel/oh-mainline,eeshangarg/oh-mainline,willingc/oh-mainline,heeraj123/oh-mainline,mzdaniel/oh-mainline,mzdaniel/oh-mainline,openhatch/oh-mainline,ojengwa/oh-mainline,waseem18/oh-mainline,waseem18/oh-mainline,waseem18/oh-mainline,vipul-sharma20/oh-mainline,mzdaniel/oh-mainline,SnappleCap/oh-mainline,sudheesh001/oh-mainline,sudheesh001/oh-mainline,heeraj123/oh-mainline,eeshangarg/oh-mainline,onceuponatimeforever/oh-mainline,onceuponatimeforever/oh-mainline,campbe13/openhatch,sudheesh001/oh-mainline,willingc/oh-mainline,campbe13/openhatch,vipul-sharma20/oh-mainline,Changaco/oh-mainline,sudheesh001/oh-mainline,onceuponatimeforever/oh-mainline,openhatch/oh-mainline,ehashman/oh-mainline,willingc/oh-mainline,ojengwa/oh-mainline,jledbetter/openhatch,jledbetter/openhatch,Changaco/oh-mainline,vipul-sharma20/oh-mainline,ehashman/oh-mainline,openhatch/oh-mainline,sudheesh001/oh-mainline,mzdaniel/oh-mainline,jledbetter/openhatch,waseem18/oh-mainline,onceuponatimeforever/oh-mainline,jledbetter/openhatch,waseem18/oh-mainline,mzdaniel/oh-mainline,eeshangarg/oh-mainline,ojengwa/oh-mainline,moijes12/oh-mainline,eeshangarg/oh-mainline,willingc/oh-mainline,openhatch/oh-mainline,eeshangarg/oh-mainline,heeraj123/oh-mainline,nirmeshk/oh-mainline,jledbetter/openhatch,heeraj123/oh-mainline,moijes12/oh-mainline,SnappleCap/oh-mainline,willingc/oh-mainline,ojengwa/oh-mainline,ehashman/oh-mainline,campbe13/openhatch,moijes12/oh-mainline,SnappleCap/oh-mainline,ehashman/oh-mainline,campbe13/openhatch,heeraj123/oh-mainline,vipul-sharma20/oh-mainline,nirmeshk/oh-mainline,campbe13/openhatch
|
from django.db import models
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=200)
language = models.CharField(max_length=200)
icon_url = models.URLField(max_length=200)
class Bug(models.Model):
project = models.ForeignKey(Project)
title = models.CharField(max_length=200)
description = models.TextField()
status = models.CharField(max_length=200)
importance = models.CharField(max_length=200)
people_involved = models.IntegerField()
date_reported = models.DateTimeField()
last_touched = models.DateTimeField()
last_polled = models.DateTimeField()
submitter_username = models.CharField(max_length=200)
submitter_realname = models.CharField(max_length=200)
Add a bug link field
|
from django.db import models
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=200)
language = models.CharField(max_length=200)
icon_url = models.URLField(max_length=200)
class Bug(models.Model):
project = models.ForeignKey(Project)
title = models.CharField(max_length=200)
description = models.TextField()
status = models.CharField(max_length=200)
importance = models.CharField(max_length=200)
people_involved = models.IntegerField()
date_reported = models.DateTimeField()
last_touched = models.DateTimeField()
last_polled = models.DateTimeField()
submitter_username = models.CharField(max_length=200)
submitter_realname = models.CharField(max_length=200)
canonical_bug_link = models.URLField(max_length=200)
|
<commit_before>from django.db import models
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=200)
language = models.CharField(max_length=200)
icon_url = models.URLField(max_length=200)
class Bug(models.Model):
project = models.ForeignKey(Project)
title = models.CharField(max_length=200)
description = models.TextField()
status = models.CharField(max_length=200)
importance = models.CharField(max_length=200)
people_involved = models.IntegerField()
date_reported = models.DateTimeField()
last_touched = models.DateTimeField()
last_polled = models.DateTimeField()
submitter_username = models.CharField(max_length=200)
submitter_realname = models.CharField(max_length=200)
<commit_msg>Add a bug link field<commit_after>
|
from django.db import models
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=200)
language = models.CharField(max_length=200)
icon_url = models.URLField(max_length=200)
class Bug(models.Model):
project = models.ForeignKey(Project)
title = models.CharField(max_length=200)
description = models.TextField()
status = models.CharField(max_length=200)
importance = models.CharField(max_length=200)
people_involved = models.IntegerField()
date_reported = models.DateTimeField()
last_touched = models.DateTimeField()
last_polled = models.DateTimeField()
submitter_username = models.CharField(max_length=200)
submitter_realname = models.CharField(max_length=200)
canonical_bug_link = models.URLField(max_length=200)
|
from django.db import models
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=200)
language = models.CharField(max_length=200)
icon_url = models.URLField(max_length=200)
class Bug(models.Model):
project = models.ForeignKey(Project)
title = models.CharField(max_length=200)
description = models.TextField()
status = models.CharField(max_length=200)
importance = models.CharField(max_length=200)
people_involved = models.IntegerField()
date_reported = models.DateTimeField()
last_touched = models.DateTimeField()
last_polled = models.DateTimeField()
submitter_username = models.CharField(max_length=200)
submitter_realname = models.CharField(max_length=200)
Add a bug link fieldfrom django.db import models
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=200)
language = models.CharField(max_length=200)
icon_url = models.URLField(max_length=200)
class Bug(models.Model):
project = models.ForeignKey(Project)
title = models.CharField(max_length=200)
description = models.TextField()
status = models.CharField(max_length=200)
importance = models.CharField(max_length=200)
people_involved = models.IntegerField()
date_reported = models.DateTimeField()
last_touched = models.DateTimeField()
last_polled = models.DateTimeField()
submitter_username = models.CharField(max_length=200)
submitter_realname = models.CharField(max_length=200)
canonical_bug_link = models.URLField(max_length=200)
|
<commit_before>from django.db import models
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=200)
language = models.CharField(max_length=200)
icon_url = models.URLField(max_length=200)
class Bug(models.Model):
project = models.ForeignKey(Project)
title = models.CharField(max_length=200)
description = models.TextField()
status = models.CharField(max_length=200)
importance = models.CharField(max_length=200)
people_involved = models.IntegerField()
date_reported = models.DateTimeField()
last_touched = models.DateTimeField()
last_polled = models.DateTimeField()
submitter_username = models.CharField(max_length=200)
submitter_realname = models.CharField(max_length=200)
<commit_msg>Add a bug link field<commit_after>from django.db import models
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=200)
language = models.CharField(max_length=200)
icon_url = models.URLField(max_length=200)
class Bug(models.Model):
project = models.ForeignKey(Project)
title = models.CharField(max_length=200)
description = models.TextField()
status = models.CharField(max_length=200)
importance = models.CharField(max_length=200)
people_involved = models.IntegerField()
date_reported = models.DateTimeField()
last_touched = models.DateTimeField()
last_polled = models.DateTimeField()
submitter_username = models.CharField(max_length=200)
submitter_realname = models.CharField(max_length=200)
canonical_bug_link = models.URLField(max_length=200)
|
815f7feb1d0dfae944f43e12086ade31d62afcdc
|
handover_api/migrations/0014_auto_20160616_1500.py
|
handover_api/migrations/0014_auto_20160616_1500.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-16 15:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('handover_api', '0013_auto_20160614_2010'),
]
operations = [
migrations.AlterField(
model_name='draft',
name='from_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='drafts_from', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='draft',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='handover_api.DukeDSProject'),
),
migrations.AlterField(
model_name='draft',
name='to_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='drafts_to', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='handover',
name='from_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='handovers_from', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='handover',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='handover_api.DukeDSProject'),
),
migrations.AlterField(
model_name='handover',
name='to_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='handovers_to', to='handover_api.DukeDSUser'),
),
]
|
Add migration that cascades deletion of handover/draft on delete
|
Add migration that cascades deletion of handover/draft on delete
|
Python
|
mit
|
Duke-GCB/DukeDSHandoverService,Duke-GCB/DukeDSHandoverService,Duke-GCB/DukeDSHandoverService
|
Add migration that cascades deletion of handover/draft on delete
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-16 15:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('handover_api', '0013_auto_20160614_2010'),
]
operations = [
migrations.AlterField(
model_name='draft',
name='from_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='drafts_from', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='draft',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='handover_api.DukeDSProject'),
),
migrations.AlterField(
model_name='draft',
name='to_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='drafts_to', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='handover',
name='from_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='handovers_from', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='handover',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='handover_api.DukeDSProject'),
),
migrations.AlterField(
model_name='handover',
name='to_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='handovers_to', to='handover_api.DukeDSUser'),
),
]
|
<commit_before><commit_msg>Add migration that cascades deletion of handover/draft on delete<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-16 15:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('handover_api', '0013_auto_20160614_2010'),
]
operations = [
migrations.AlterField(
model_name='draft',
name='from_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='drafts_from', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='draft',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='handover_api.DukeDSProject'),
),
migrations.AlterField(
model_name='draft',
name='to_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='drafts_to', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='handover',
name='from_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='handovers_from', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='handover',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='handover_api.DukeDSProject'),
),
migrations.AlterField(
model_name='handover',
name='to_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='handovers_to', to='handover_api.DukeDSUser'),
),
]
|
Add migration that cascades deletion of handover/draft on delete# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-16 15:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('handover_api', '0013_auto_20160614_2010'),
]
operations = [
migrations.AlterField(
model_name='draft',
name='from_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='drafts_from', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='draft',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='handover_api.DukeDSProject'),
),
migrations.AlterField(
model_name='draft',
name='to_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='drafts_to', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='handover',
name='from_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='handovers_from', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='handover',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='handover_api.DukeDSProject'),
),
migrations.AlterField(
model_name='handover',
name='to_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='handovers_to', to='handover_api.DukeDSUser'),
),
]
|
<commit_before><commit_msg>Add migration that cascades deletion of handover/draft on delete<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-16 15:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('handover_api', '0013_auto_20160614_2010'),
]
operations = [
migrations.AlterField(
model_name='draft',
name='from_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='drafts_from', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='draft',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='handover_api.DukeDSProject'),
),
migrations.AlterField(
model_name='draft',
name='to_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='drafts_to', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='handover',
name='from_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='handovers_from', to='handover_api.DukeDSUser'),
),
migrations.AlterField(
model_name='handover',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='handover_api.DukeDSProject'),
),
migrations.AlterField(
model_name='handover',
name='to_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='handovers_to', to='handover_api.DukeDSUser'),
),
]
|
|
ff3779f3c482f57182f4855ae09cff3d95ca1304
|
talempd/zest/skype/ValidateBinarySearchTree.py
|
talempd/zest/skype/ValidateBinarySearchTree.py
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of binary tree.
@return: True if the binary tree is BST, or false
"""
def isValidBST(self, root):
# write your code here
stack, pre = [root], None
while stack != [] and stack[0] is not None:
node = stack.pop()
while node is not None:
stack.append(node)
node = node.left
node = stack.pop()
if pre is not None and pre.val >= node.val:
return False
pre = node
stack.append(node.right)
return True
|
Add ValBST for Zest Skype
|
Add ValBST for Zest Skype
|
Python
|
mit
|
cc13ny/Allin,cc13ny/algo,cc13ny/algo,cc13ny/Allin,Chasego/codirit,cc13ny/Allin,cc13ny/algo,Chasego/cod,Chasego/cod,cc13ny/algo,cc13ny/Allin,Chasego/codi,Chasego/cod,Chasego/codirit,cc13ny/Allin,Chasego/codi,Chasego/cod,Chasego/codirit,Chasego/codi,Chasego/codi,cc13ny/algo,Chasego/cod,Chasego/codirit,Chasego/codi,Chasego/codirit
|
Add ValBST for Zest Skype
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of binary tree.
@return: True if the binary tree is BST, or false
"""
def isValidBST(self, root):
# write your code here
stack, pre = [root], None
while stack != [] and stack[0] is not None:
node = stack.pop()
while node is not None:
stack.append(node)
node = node.left
node = stack.pop()
if pre is not None and pre.val >= node.val:
return False
pre = node
stack.append(node.right)
return True
|
<commit_before><commit_msg>Add ValBST for Zest Skype<commit_after>
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of binary tree.
@return: True if the binary tree is BST, or false
"""
def isValidBST(self, root):
# write your code here
stack, pre = [root], None
while stack != [] and stack[0] is not None:
node = stack.pop()
while node is not None:
stack.append(node)
node = node.left
node = stack.pop()
if pre is not None and pre.val >= node.val:
return False
pre = node
stack.append(node.right)
return True
|
Add ValBST for Zest Skype"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of binary tree.
@return: True if the binary tree is BST, or false
"""
def isValidBST(self, root):
# write your code here
stack, pre = [root], None
while stack != [] and stack[0] is not None:
node = stack.pop()
while node is not None:
stack.append(node)
node = node.left
node = stack.pop()
if pre is not None and pre.val >= node.val:
return False
pre = node
stack.append(node.right)
return True
|
<commit_before><commit_msg>Add ValBST for Zest Skype<commit_after>"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of binary tree.
@return: True if the binary tree is BST, or false
"""
def isValidBST(self, root):
# write your code here
stack, pre = [root], None
while stack != [] and stack[0] is not None:
node = stack.pop()
while node is not None:
stack.append(node)
node = node.left
node = stack.pop()
if pre is not None and pre.val >= node.val:
return False
pre = node
stack.append(node.right)
return True
|
|
08392cf690ba8f8025ddee7eb494810de5c94bcc
|
pattsgui/aboutdialog.py
|
pattsgui/aboutdialog.py
|
##
## patts-qt - Qt GUI client for PATTS
## Copyright (C) 2015 Delwink, LLC
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as published by
## the Free Software Foundation, version 3 only.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
|
Add file for info dialog
|
Add file for info dialog
|
Python
|
agpl-3.0
|
delwink/patts-qt
|
Add file for info dialog
|
##
## patts-qt - Qt GUI client for PATTS
## Copyright (C) 2015 Delwink, LLC
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as published by
## the Free Software Foundation, version 3 only.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
|
<commit_before><commit_msg>Add file for info dialog<commit_after>
|
##
## patts-qt - Qt GUI client for PATTS
## Copyright (C) 2015 Delwink, LLC
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as published by
## the Free Software Foundation, version 3 only.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
|
Add file for info dialog##
## patts-qt - Qt GUI client for PATTS
## Copyright (C) 2015 Delwink, LLC
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as published by
## the Free Software Foundation, version 3 only.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
|
<commit_before><commit_msg>Add file for info dialog<commit_after>##
## patts-qt - Qt GUI client for PATTS
## Copyright (C) 2015 Delwink, LLC
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as published by
## the Free Software Foundation, version 3 only.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
|
|
70b1313fe9528a72fab966ab69e7d784e40653d5
|
functest/tests/unit/features/test_netready.py
|
functest/tests/unit/features/test_netready.py
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import netready
from functest.utils import constants
class NetreadyTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.netready = netready.GluonVping()
def test_init(self):
self.assertEqual(self.netready.project_name, "netready")
self.assertEqual(self.netready.case_name, "gluon_vping")
self.assertEqual(
self.netready.repo,
constants.CONST.__getattribute__("dir_repo_netready"))
self.assertEqual(
self.netready.cmd,
'cd {}/test/functest && python ./gluon-test-suite.py'.format(
self.netready.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for netready
|
Add unit tests for netready
Change-Id: I45f9209c55bd65c9538fc3b1181ccbcfbdd23a40
Signed-off-by: CΓ©dric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
Python
|
apache-2.0
|
mywulin/functest,mywulin/functest,opnfv/functest,opnfv/functest
|
Add unit tests for netready
Change-Id: I45f9209c55bd65c9538fc3b1181ccbcfbdd23a40
Signed-off-by: CΓ©dric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import netready
from functest.utils import constants
class NetreadyTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.netready = netready.GluonVping()
def test_init(self):
self.assertEqual(self.netready.project_name, "netready")
self.assertEqual(self.netready.case_name, "gluon_vping")
self.assertEqual(
self.netready.repo,
constants.CONST.__getattribute__("dir_repo_netready"))
self.assertEqual(
self.netready.cmd,
'cd {}/test/functest && python ./gluon-test-suite.py'.format(
self.netready.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for netready
Change-Id: I45f9209c55bd65c9538fc3b1181ccbcfbdd23a40
Signed-off-by: CΓ©dric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import netready
from functest.utils import constants
class NetreadyTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.netready = netready.GluonVping()
def test_init(self):
self.assertEqual(self.netready.project_name, "netready")
self.assertEqual(self.netready.case_name, "gluon_vping")
self.assertEqual(
self.netready.repo,
constants.CONST.__getattribute__("dir_repo_netready"))
self.assertEqual(
self.netready.cmd,
'cd {}/test/functest && python ./gluon-test-suite.py'.format(
self.netready.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for netready
Change-Id: I45f9209c55bd65c9538fc3b1181ccbcfbdd23a40
Signed-off-by: CΓ©dric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import netready
from functest.utils import constants
class NetreadyTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.netready = netready.GluonVping()
def test_init(self):
self.assertEqual(self.netready.project_name, "netready")
self.assertEqual(self.netready.case_name, "gluon_vping")
self.assertEqual(
self.netready.repo,
constants.CONST.__getattribute__("dir_repo_netready"))
self.assertEqual(
self.netready.cmd,
'cd {}/test/functest && python ./gluon-test-suite.py'.format(
self.netready.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for netready
Change-Id: I45f9209c55bd65c9538fc3b1181ccbcfbdd23a40
Signed-off-by: CΓ©dric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import netready
from functest.utils import constants
class NetreadyTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.netready = netready.GluonVping()
def test_init(self):
self.assertEqual(self.netready.project_name, "netready")
self.assertEqual(self.netready.case_name, "gluon_vping")
self.assertEqual(
self.netready.repo,
constants.CONST.__getattribute__("dir_repo_netready"))
self.assertEqual(
self.netready.cmd,
'cd {}/test/functest && python ./gluon-test-suite.py'.format(
self.netready.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
956e363d41b21ab85d2c64bbc81db1d9becab43f
|
java/graveyard/support/scripts/copy-string.py
|
java/graveyard/support/scripts/copy-string.py
|
#!/usr/bin/env python
import os
import os.path
import sys
import lxml.etree
source_path = os.path.expanduser('~/workspace/git/android/platform/packages/apps/Mms')
dest_path = os.path.expanduser('~/workspace/git/android-sms-merge/android_sms_merge')
def main():
if len(sys.argv) < 2:
sys.exit('Error: STRING is required')
string_to_copy = sys.argv[1]
source_res_path = os.path.join(source_path, 'res')
dest_res_path = os.path.join(dest_path, 'res')
# This allows lxml to output much nicer looking output
parser = lxml.etree.XMLParser(remove_blank_text=True)
for values_folder in os.listdir(source_res_path):
source_values_path = os.path.join(source_res_path, values_folder)
if (os.path.isdir(source_values_path)
and values_folder.startswith('values')):
source_strings_path = os.path.join(source_values_path, 'strings.xml')
if (os.path.isfile(source_strings_path)):
source_root = lxml.etree.parse(source_strings_path, parser)
for source_element in source_root.iter('string'):
if source_element.get('name') == string_to_copy:
dest_values_path = os.path.join(dest_res_path, values_folder)
# Create the destination values folder if necessary
if not os.path.exists(dest_values_path):
os.mkdir(dest_values_path)
dest_strings_path = os.path.join(dest_values_path, 'strings.xml')
if not os.path.exists(dest_strings_path):
root = lxml.etree.Element('resources')
root.append(source_element)
dest_root = lxml.etree.ElementTree(root)
else:
dest_root = lxml.etree.parse(dest_strings_path, parser)
# Iterate over the elements in the destination file
it = dest_root.iter('string')
while True:
try:
dest_element = it.next()
# Don't insert duplicate elements
if dest_element.get('name') == source_element.get('name'):
break
# Insert the new string alphabetically
if string_to_copy < dest_element.get('name'):
dest_element.addprevious(source_element)
# Don't process any more destination elements
break
except StopIteration:
# If we made it this far, add it to the end
dest_element.addnext(source_element)
break
# Write the updated XML file
dest_root.write(
dest_strings_path,
encoding='utf-8',
pretty_print=True,
xml_declaration=True,
)
# Don't process any more source elements
break
if __name__ == '__main__':
main()
|
Add script for copying strings from one project/app to another
|
Add script for copying strings from one project/app to another
|
Python
|
mit
|
bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile
|
Add script for copying strings from one project/app to another
|
#!/usr/bin/env python
import os
import os.path
import sys
import lxml.etree
source_path = os.path.expanduser('~/workspace/git/android/platform/packages/apps/Mms')
dest_path = os.path.expanduser('~/workspace/git/android-sms-merge/android_sms_merge')
def main():
if len(sys.argv) < 2:
sys.exit('Error: STRING is required')
string_to_copy = sys.argv[1]
source_res_path = os.path.join(source_path, 'res')
dest_res_path = os.path.join(dest_path, 'res')
# This allows lxml to output much nicer looking output
parser = lxml.etree.XMLParser(remove_blank_text=True)
for values_folder in os.listdir(source_res_path):
source_values_path = os.path.join(source_res_path, values_folder)
if (os.path.isdir(source_values_path)
and values_folder.startswith('values')):
source_strings_path = os.path.join(source_values_path, 'strings.xml')
if (os.path.isfile(source_strings_path)):
source_root = lxml.etree.parse(source_strings_path, parser)
for source_element in source_root.iter('string'):
if source_element.get('name') == string_to_copy:
dest_values_path = os.path.join(dest_res_path, values_folder)
# Create the destination values folder if necessary
if not os.path.exists(dest_values_path):
os.mkdir(dest_values_path)
dest_strings_path = os.path.join(dest_values_path, 'strings.xml')
if not os.path.exists(dest_strings_path):
root = lxml.etree.Element('resources')
root.append(source_element)
dest_root = lxml.etree.ElementTree(root)
else:
dest_root = lxml.etree.parse(dest_strings_path, parser)
# Iterate over the elements in the destination file
it = dest_root.iter('string')
while True:
try:
dest_element = it.next()
# Don't insert duplicate elements
if dest_element.get('name') == source_element.get('name'):
break
# Insert the new string alphabetically
if string_to_copy < dest_element.get('name'):
dest_element.addprevious(source_element)
# Don't process any more destination elements
break
except StopIteration:
# If we made it this far, add it to the end
dest_element.addnext(source_element)
break
# Write the updated XML file
dest_root.write(
dest_strings_path,
encoding='utf-8',
pretty_print=True,
xml_declaration=True,
)
# Don't process any more source elements
break
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for copying strings from one project/app to another<commit_after>
|
#!/usr/bin/env python
import os
import os.path
import sys
import lxml.etree
source_path = os.path.expanduser('~/workspace/git/android/platform/packages/apps/Mms')
dest_path = os.path.expanduser('~/workspace/git/android-sms-merge/android_sms_merge')
def main():
if len(sys.argv) < 2:
sys.exit('Error: STRING is required')
string_to_copy = sys.argv[1]
source_res_path = os.path.join(source_path, 'res')
dest_res_path = os.path.join(dest_path, 'res')
# This allows lxml to output much nicer looking output
parser = lxml.etree.XMLParser(remove_blank_text=True)
for values_folder in os.listdir(source_res_path):
source_values_path = os.path.join(source_res_path, values_folder)
if (os.path.isdir(source_values_path)
and values_folder.startswith('values')):
source_strings_path = os.path.join(source_values_path, 'strings.xml')
if (os.path.isfile(source_strings_path)):
source_root = lxml.etree.parse(source_strings_path, parser)
for source_element in source_root.iter('string'):
if source_element.get('name') == string_to_copy:
dest_values_path = os.path.join(dest_res_path, values_folder)
# Create the destination values folder if necessary
if not os.path.exists(dest_values_path):
os.mkdir(dest_values_path)
dest_strings_path = os.path.join(dest_values_path, 'strings.xml')
if not os.path.exists(dest_strings_path):
root = lxml.etree.Element('resources')
root.append(source_element)
dest_root = lxml.etree.ElementTree(root)
else:
dest_root = lxml.etree.parse(dest_strings_path, parser)
# Iterate over the elements in the destination file
it = dest_root.iter('string')
while True:
try:
dest_element = it.next()
# Don't insert duplicate elements
if dest_element.get('name') == source_element.get('name'):
break
# Insert the new string alphabetically
if string_to_copy < dest_element.get('name'):
dest_element.addprevious(source_element)
# Don't process any more destination elements
break
except StopIteration:
# If we made it this far, add it to the end
dest_element.addnext(source_element)
break
# Write the updated XML file
dest_root.write(
dest_strings_path,
encoding='utf-8',
pretty_print=True,
xml_declaration=True,
)
# Don't process any more source elements
break
if __name__ == '__main__':
main()
|
Add script for copying strings from one project/app to another#!/usr/bin/env python
import os
import os.path
import sys
import lxml.etree
source_path = os.path.expanduser('~/workspace/git/android/platform/packages/apps/Mms')
dest_path = os.path.expanduser('~/workspace/git/android-sms-merge/android_sms_merge')
def main():
if len(sys.argv) < 2:
sys.exit('Error: STRING is required')
string_to_copy = sys.argv[1]
source_res_path = os.path.join(source_path, 'res')
dest_res_path = os.path.join(dest_path, 'res')
# This allows lxml to output much nicer looking output
parser = lxml.etree.XMLParser(remove_blank_text=True)
for values_folder in os.listdir(source_res_path):
source_values_path = os.path.join(source_res_path, values_folder)
if (os.path.isdir(source_values_path)
and values_folder.startswith('values')):
source_strings_path = os.path.join(source_values_path, 'strings.xml')
if (os.path.isfile(source_strings_path)):
source_root = lxml.etree.parse(source_strings_path, parser)
for source_element in source_root.iter('string'):
if source_element.get('name') == string_to_copy:
dest_values_path = os.path.join(dest_res_path, values_folder)
# Create the destination values folder if necessary
if not os.path.exists(dest_values_path):
os.mkdir(dest_values_path)
dest_strings_path = os.path.join(dest_values_path, 'strings.xml')
if not os.path.exists(dest_strings_path):
root = lxml.etree.Element('resources')
root.append(source_element)
dest_root = lxml.etree.ElementTree(root)
else:
dest_root = lxml.etree.parse(dest_strings_path, parser)
# Iterate over the elements in the destination file
it = dest_root.iter('string')
while True:
try:
dest_element = it.next()
# Don't insert duplicate elements
if dest_element.get('name') == source_element.get('name'):
break
# Insert the new string alphabetically
if string_to_copy < dest_element.get('name'):
dest_element.addprevious(source_element)
# Don't process any more destination elements
break
except StopIteration:
# If we made it this far, add it to the end
dest_element.addnext(source_element)
break
# Write the updated XML file
dest_root.write(
dest_strings_path,
encoding='utf-8',
pretty_print=True,
xml_declaration=True,
)
# Don't process any more source elements
break
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for copying strings from one project/app to another<commit_after>#!/usr/bin/env python
import os
import os.path
import sys
import lxml.etree
source_path = os.path.expanduser('~/workspace/git/android/platform/packages/apps/Mms')
dest_path = os.path.expanduser('~/workspace/git/android-sms-merge/android_sms_merge')
def main():
if len(sys.argv) < 2:
sys.exit('Error: STRING is required')
string_to_copy = sys.argv[1]
source_res_path = os.path.join(source_path, 'res')
dest_res_path = os.path.join(dest_path, 'res')
# This allows lxml to output much nicer looking output
parser = lxml.etree.XMLParser(remove_blank_text=True)
for values_folder in os.listdir(source_res_path):
source_values_path = os.path.join(source_res_path, values_folder)
if (os.path.isdir(source_values_path)
and values_folder.startswith('values')):
source_strings_path = os.path.join(source_values_path, 'strings.xml')
if (os.path.isfile(source_strings_path)):
source_root = lxml.etree.parse(source_strings_path, parser)
for source_element in source_root.iter('string'):
if source_element.get('name') == string_to_copy:
dest_values_path = os.path.join(dest_res_path, values_folder)
# Create the destination values folder if necessary
if not os.path.exists(dest_values_path):
os.mkdir(dest_values_path)
dest_strings_path = os.path.join(dest_values_path, 'strings.xml')
if not os.path.exists(dest_strings_path):
root = lxml.etree.Element('resources')
root.append(source_element)
dest_root = lxml.etree.ElementTree(root)
else:
dest_root = lxml.etree.parse(dest_strings_path, parser)
# Iterate over the elements in the destination file
it = dest_root.iter('string')
while True:
try:
dest_element = it.next()
# Don't insert duplicate elements
if dest_element.get('name') == source_element.get('name'):
break
# Insert the new string alphabetically
if string_to_copy < dest_element.get('name'):
dest_element.addprevious(source_element)
# Don't process any more destination elements
break
except StopIteration:
# If we made it this far, add it to the end
dest_element.addnext(source_element)
break
# Write the updated XML file
dest_root.write(
dest_strings_path,
encoding='utf-8',
pretty_print=True,
xml_declaration=True,
)
# Don't process any more source elements
break
if __name__ == '__main__':
main()
|
|
cbb4a2c3b23571cbbbe4e7316e4ce651e8422856
|
migrations/versions/0241_another_letter_org.py
|
migrations/versions/0241_another_letter_org.py
|
"""empty message
Revision ID: 0241_another_letter_org
Revises: 0240_dvla_org_non_nullable
"""
# revision identifiers, used by Alembic.
revision = '0241_another_letter_org'
down_revision = '0240_dvla_org_non_nullable'
from alembic import op
NEW_ORGANISATIONS = [
('515', 'ACAS', 'acas'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add ACAS to dvla_org_id table
|
Add ACAS to dvla_org_id table
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add ACAS to dvla_org_id table
|
"""empty message
Revision ID: 0241_another_letter_org
Revises: 0240_dvla_org_non_nullable
"""
# revision identifiers, used by Alembic.
revision = '0241_another_letter_org'
down_revision = '0240_dvla_org_non_nullable'
from alembic import op
NEW_ORGANISATIONS = [
('515', 'ACAS', 'acas'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add ACAS to dvla_org_id table<commit_after>
|
"""empty message
Revision ID: 0241_another_letter_org
Revises: 0240_dvla_org_non_nullable
"""
# revision identifiers, used by Alembic.
revision = '0241_another_letter_org'
down_revision = '0240_dvla_org_non_nullable'
from alembic import op
NEW_ORGANISATIONS = [
('515', 'ACAS', 'acas'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add ACAS to dvla_org_id table"""empty message
Revision ID: 0241_another_letter_org
Revises: 0240_dvla_org_non_nullable
"""
# revision identifiers, used by Alembic.
revision = '0241_another_letter_org'
down_revision = '0240_dvla_org_non_nullable'
from alembic import op
NEW_ORGANISATIONS = [
('515', 'ACAS', 'acas'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add ACAS to dvla_org_id table<commit_after>"""empty message
Revision ID: 0241_another_letter_org
Revises: 0240_dvla_org_non_nullable
"""
# revision identifiers, used by Alembic.
revision = '0241_another_letter_org'
down_revision = '0240_dvla_org_non_nullable'
from alembic import op
NEW_ORGANISATIONS = [
('515', 'ACAS', 'acas'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
|
add75a5c4222485a1b8a5266f8a1c26ec9600fb0
|
mygpo/podcasts/migrations/0036_related_podcasts.py
|
mygpo/podcasts/migrations/0036_related_podcasts.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-07 17:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('podcasts', '0035_django_uuidfield'),
]
operations = [
migrations.AlterField(
model_name='podcast',
name='related_podcasts',
field=models.ManyToManyField(related_name='_podcast_related_podcasts_+', to='podcasts.Podcast'),
),
]
|
Add migration for related podcasts
|
Add migration for related podcasts
Change was discovered by makemigrations, even though no change was made in the
source.
|
Python
|
agpl-3.0
|
gpodder/mygpo,gpodder/mygpo,gpodder/mygpo,gpodder/mygpo
|
Add migration for related podcasts
Change was discovered by makemigrations, even though no change was made in the
source.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-07 17:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('podcasts', '0035_django_uuidfield'),
]
operations = [
migrations.AlterField(
model_name='podcast',
name='related_podcasts',
field=models.ManyToManyField(related_name='_podcast_related_podcasts_+', to='podcasts.Podcast'),
),
]
|
<commit_before><commit_msg>Add migration for related podcasts
Change was discovered by makemigrations, even though no change was made in the
source.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-07 17:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('podcasts', '0035_django_uuidfield'),
]
operations = [
migrations.AlterField(
model_name='podcast',
name='related_podcasts',
field=models.ManyToManyField(related_name='_podcast_related_podcasts_+', to='podcasts.Podcast'),
),
]
|
Add migration for related podcasts
Change was discovered by makemigrations, even though no change was made in the
source.# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-07 17:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('podcasts', '0035_django_uuidfield'),
]
operations = [
migrations.AlterField(
model_name='podcast',
name='related_podcasts',
field=models.ManyToManyField(related_name='_podcast_related_podcasts_+', to='podcasts.Podcast'),
),
]
|
<commit_before><commit_msg>Add migration for related podcasts
Change was discovered by makemigrations, even though no change was made in the
source.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-07 17:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('podcasts', '0035_django_uuidfield'),
]
operations = [
migrations.AlterField(
model_name='podcast',
name='related_podcasts',
field=models.ManyToManyField(related_name='_podcast_related_podcasts_+', to='podcasts.Podcast'),
),
]
|
|
23343f0040f319f59991192636cadfd74af187af
|
oslo_concurrency/_i18n.py
|
oslo_concurrency/_i18n.py
|
# Copyright 2014 Mirantis Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo import i18n
_translators = i18n.TranslatorFactory(domain='oslo.concurrency')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
|
# Copyright 2014 Mirantis Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo.concurrency')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
|
Drop use of namespaced oslo.i18n
|
Drop use of namespaced oslo.i18n
Related-blueprint: drop-namespace-packages
Change-Id: Ic8247cb896ba6337932d7a74618debd698584fa0
|
Python
|
apache-2.0
|
varunarya10/oslo.concurrency,JioCloud/oslo.concurrency
|
# Copyright 2014 Mirantis Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo import i18n
_translators = i18n.TranslatorFactory(domain='oslo.concurrency')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
Drop use of namespaced oslo.i18n
Related-blueprint: drop-namespace-packages
Change-Id: Ic8247cb896ba6337932d7a74618debd698584fa0
|
# Copyright 2014 Mirantis Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo.concurrency')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
|
<commit_before># Copyright 2014 Mirantis Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo import i18n
_translators = i18n.TranslatorFactory(domain='oslo.concurrency')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
<commit_msg>Drop use of namespaced oslo.i18n
Related-blueprint: drop-namespace-packages
Change-Id: Ic8247cb896ba6337932d7a74618debd698584fa0<commit_after>
|
# Copyright 2014 Mirantis Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo.concurrency')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
|
# Copyright 2014 Mirantis Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo import i18n
_translators = i18n.TranslatorFactory(domain='oslo.concurrency')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
Drop use of namespaced oslo.i18n
Related-blueprint: drop-namespace-packages
Change-Id: Ic8247cb896ba6337932d7a74618debd698584fa0# Copyright 2014 Mirantis Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo.concurrency')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
|
<commit_before># Copyright 2014 Mirantis Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo import i18n
_translators = i18n.TranslatorFactory(domain='oslo.concurrency')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
<commit_msg>Drop use of namespaced oslo.i18n
Related-blueprint: drop-namespace-packages
Change-Id: Ic8247cb896ba6337932d7a74618debd698584fa0<commit_after># Copyright 2014 Mirantis Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo.concurrency')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
|
68b8725500ae0d8d9260f36dc4478b146cacdcc0
|
tests/unit/modules/test_nilrt_ip.py
|
tests/unit/modules/test_nilrt_ip.py
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.nilrt_ip as nilrt_ip
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import patch
class NilrtIPTestCase(TestCase, LoaderModuleMockMixin):
'''
TestCase for salt.modules.nilrt_ip module
'''
def setup_loader_modules(self):
return {nilrt_ip: {'__grains__':
{'lsb_distrib_id': 'not_nilrt'}}
}
def test_change_state_down_state(self):
'''
Tests _change_state when not connected
and new state is down
'''
with patch('salt.modules.nilrt_ip._interface_to_service', return_value=True):
with patch('salt.modules.nilrt_ip._connected', return_value=False):
assert nilrt_ip._change_state('test_interface', 'down')
def test_change_state_up_state(self):
'''
Tests _change_state when connected
and new state is up
'''
with patch('salt.modules.nilrt_ip._interface_to_service', return_value=True):
with patch('salt.modules.nilrt_ip._connected', return_value=True):
assert nilrt_ip._change_state('test_interface', 'up')
|
Add nilrt_ip module unit tests
|
Add nilrt_ip module unit tests
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add nilrt_ip module unit tests
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.nilrt_ip as nilrt_ip
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import patch
class NilrtIPTestCase(TestCase, LoaderModuleMockMixin):
'''
TestCase for salt.modules.nilrt_ip module
'''
def setup_loader_modules(self):
return {nilrt_ip: {'__grains__':
{'lsb_distrib_id': 'not_nilrt'}}
}
def test_change_state_down_state(self):
'''
Tests _change_state when not connected
and new state is down
'''
with patch('salt.modules.nilrt_ip._interface_to_service', return_value=True):
with patch('salt.modules.nilrt_ip._connected', return_value=False):
assert nilrt_ip._change_state('test_interface', 'down')
def test_change_state_up_state(self):
'''
Tests _change_state when connected
and new state is up
'''
with patch('salt.modules.nilrt_ip._interface_to_service', return_value=True):
with patch('salt.modules.nilrt_ip._connected', return_value=True):
assert nilrt_ip._change_state('test_interface', 'up')
|
<commit_before><commit_msg>Add nilrt_ip module unit tests<commit_after>
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.nilrt_ip as nilrt_ip
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import patch
class NilrtIPTestCase(TestCase, LoaderModuleMockMixin):
'''
TestCase for salt.modules.nilrt_ip module
'''
def setup_loader_modules(self):
return {nilrt_ip: {'__grains__':
{'lsb_distrib_id': 'not_nilrt'}}
}
def test_change_state_down_state(self):
'''
Tests _change_state when not connected
and new state is down
'''
with patch('salt.modules.nilrt_ip._interface_to_service', return_value=True):
with patch('salt.modules.nilrt_ip._connected', return_value=False):
assert nilrt_ip._change_state('test_interface', 'down')
def test_change_state_up_state(self):
'''
Tests _change_state when connected
and new state is up
'''
with patch('salt.modules.nilrt_ip._interface_to_service', return_value=True):
with patch('salt.modules.nilrt_ip._connected', return_value=True):
assert nilrt_ip._change_state('test_interface', 'up')
|
Add nilrt_ip module unit tests# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.nilrt_ip as nilrt_ip
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import patch
class NilrtIPTestCase(TestCase, LoaderModuleMockMixin):
'''
TestCase for salt.modules.nilrt_ip module
'''
def setup_loader_modules(self):
return {nilrt_ip: {'__grains__':
{'lsb_distrib_id': 'not_nilrt'}}
}
def test_change_state_down_state(self):
'''
Tests _change_state when not connected
and new state is down
'''
with patch('salt.modules.nilrt_ip._interface_to_service', return_value=True):
with patch('salt.modules.nilrt_ip._connected', return_value=False):
assert nilrt_ip._change_state('test_interface', 'down')
def test_change_state_up_state(self):
'''
Tests _change_state when connected
and new state is up
'''
with patch('salt.modules.nilrt_ip._interface_to_service', return_value=True):
with patch('salt.modules.nilrt_ip._connected', return_value=True):
assert nilrt_ip._change_state('test_interface', 'up')
|
<commit_before><commit_msg>Add nilrt_ip module unit tests<commit_after># -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.nilrt_ip as nilrt_ip
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import patch
class NilrtIPTestCase(TestCase, LoaderModuleMockMixin):
'''
TestCase for salt.modules.nilrt_ip module
'''
def setup_loader_modules(self):
return {nilrt_ip: {'__grains__':
{'lsb_distrib_id': 'not_nilrt'}}
}
def test_change_state_down_state(self):
'''
Tests _change_state when not connected
and new state is down
'''
with patch('salt.modules.nilrt_ip._interface_to_service', return_value=True):
with patch('salt.modules.nilrt_ip._connected', return_value=False):
assert nilrt_ip._change_state('test_interface', 'down')
def test_change_state_up_state(self):
'''
Tests _change_state when connected
and new state is up
'''
with patch('salt.modules.nilrt_ip._interface_to_service', return_value=True):
with patch('salt.modules.nilrt_ip._connected', return_value=True):
assert nilrt_ip._change_state('test_interface', 'up')
|
|
a161197020a120a81ba8c59f58be77cfcfb8b426
|
rest_test.py
|
rest_test.py
|
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import urllib3
from mock import patch
from kubernetes.client import Configuration
from kubernetes.client.rest import RESTClientObject
class RestTest(unittest.TestCase):
def test_poolmanager(self):
'Test that a poolmanager is created for rest client'
with patch.object(urllib3, 'PoolManager') as pool:
RESTClientObject(config=Configuration())
pool.assert_called_once()
def test_proxy(self):
'Test that proxy is created when the config especifies it'
config = Configuration()
config.http_proxy_url = 'http://proxy.example.com'
with patch.object(urllib3, 'proxy_from_url') as proxy:
RESTClientObject(config=config)
proxy.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
Add unitest for restclient PoolManager and http proxy
|
Add unitest for restclient PoolManager and http proxy
|
Python
|
apache-2.0
|
kubernetes-client/python,mbohlool/python-base,kubernetes-client/python,mbohlool/python-base
|
Add unitest for restclient PoolManager and http proxy
|
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import urllib3
from mock import patch
from kubernetes.client import Configuration
from kubernetes.client.rest import RESTClientObject
class RestTest(unittest.TestCase):
def test_poolmanager(self):
'Test that a poolmanager is created for rest client'
with patch.object(urllib3, 'PoolManager') as pool:
RESTClientObject(config=Configuration())
pool.assert_called_once()
def test_proxy(self):
'Test that proxy is created when the config especifies it'
config = Configuration()
config.http_proxy_url = 'http://proxy.example.com'
with patch.object(urllib3, 'proxy_from_url') as proxy:
RESTClientObject(config=config)
proxy.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unitest for restclient PoolManager and http proxy<commit_after>
|
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import urllib3
from mock import patch
from kubernetes.client import Configuration
from kubernetes.client.rest import RESTClientObject
class RestTest(unittest.TestCase):
def test_poolmanager(self):
'Test that a poolmanager is created for rest client'
with patch.object(urllib3, 'PoolManager') as pool:
RESTClientObject(config=Configuration())
pool.assert_called_once()
def test_proxy(self):
'Test that proxy is created when the config especifies it'
config = Configuration()
config.http_proxy_url = 'http://proxy.example.com'
with patch.object(urllib3, 'proxy_from_url') as proxy:
RESTClientObject(config=config)
proxy.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
Add unitest for restclient PoolManager and http proxy# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import urllib3
from mock import patch
from kubernetes.client import Configuration
from kubernetes.client.rest import RESTClientObject
class RestTest(unittest.TestCase):
def test_poolmanager(self):
'Test that a poolmanager is created for rest client'
with patch.object(urllib3, 'PoolManager') as pool:
RESTClientObject(config=Configuration())
pool.assert_called_once()
def test_proxy(self):
'Test that proxy is created when the config especifies it'
config = Configuration()
config.http_proxy_url = 'http://proxy.example.com'
with patch.object(urllib3, 'proxy_from_url') as proxy:
RESTClientObject(config=config)
proxy.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unitest for restclient PoolManager and http proxy<commit_after># Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import urllib3
from mock import patch
from kubernetes.client import Configuration
from kubernetes.client.rest import RESTClientObject
class RestTest(unittest.TestCase):
def test_poolmanager(self):
'Test that a poolmanager is created for rest client'
with patch.object(urllib3, 'PoolManager') as pool:
RESTClientObject(config=Configuration())
pool.assert_called_once()
def test_proxy(self):
'Test that proxy is created when the config especifies it'
config = Configuration()
config.http_proxy_url = 'http://proxy.example.com'
with patch.object(urllib3, 'proxy_from_url') as proxy:
RESTClientObject(config=config)
proxy.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
|
733ff0f3bd9f36cf0a50e4319b613da2726960f8
|
python/query_spatial.py
|
python/query_spatial.py
|
from pprint import pprint
from amigocloud import AmigoCloud
# Use amigocloud version 1.0.5 or higher to login with tokens
# This will raise an AmigoCloudError if the API token is invalid or has expired
ac = AmigoCloud(token='<your token>')
# For examples of how to get these values, see simple_example2.py
PROJECT_OWNER = 1
PROJECT_ID = 14098
DATASET_ID = 84746
#API endpoint
sql_url = '/users/{user_id}/projects/{project_id}/sql'.format(
user_id=PROJECT_OWNER, project_id=PROJECT_ID
)
# find all rows that intersect the hard coded point
query = """
SELECT *
FROM dataset_{dataset_id}
WHERE ST_Intersects(wkb_geometry, ST_PointFromText('POINT(-117.150727812638 32.7068451387017)', 4326))
""".format(dataset_id = DATASET_ID)
response = ac.get(sql_url, {'query': query,
'dataset_id': DATASET_ID})
# print schema of response
pprint(response['columns'])
# print row contents
pprint(response['data'])
|
Add example of spatial query
|
Add example of spatial query
|
Python
|
mit
|
amigocloud/amigocloud_samples,amigocloud/amigocloud_samples,amigocloud/amigocloud_samples,amigocloud/amigocloud_samples,amigocloud/amigocloud_samples
|
Add example of spatial query
|
from pprint import pprint
from amigocloud import AmigoCloud
# Use amigocloud version 1.0.5 or higher to login with tokens
# This will raise an AmigoCloudError if the API token is invalid or has expired
ac = AmigoCloud(token='<your token>')
# For examples of how to get these values, see simple_example2.py
PROJECT_OWNER = 1
PROJECT_ID = 14098
DATASET_ID = 84746
#API endpoint
sql_url = '/users/{user_id}/projects/{project_id}/sql'.format(
user_id=PROJECT_OWNER, project_id=PROJECT_ID
)
# find all rows that intersect the hard coded point
query = """
SELECT *
FROM dataset_{dataset_id}
WHERE ST_Intersects(wkb_geometry, ST_PointFromText('POINT(-117.150727812638 32.7068451387017)', 4326))
""".format(dataset_id = DATASET_ID)
response = ac.get(sql_url, {'query': query,
'dataset_id': DATASET_ID})
# print schema of response
pprint(response['columns'])
# print row contents
pprint(response['data'])
|
<commit_before><commit_msg>Add example of spatial query<commit_after>
|
from pprint import pprint
from amigocloud import AmigoCloud
# Use amigocloud version 1.0.5 or higher to login with tokens
# This will raise an AmigoCloudError if the API token is invalid or has expired
ac = AmigoCloud(token='<your token>')
# For examples of how to get these values, see simple_example2.py
PROJECT_OWNER = 1
PROJECT_ID = 14098
DATASET_ID = 84746
#API endpoint
sql_url = '/users/{user_id}/projects/{project_id}/sql'.format(
user_id=PROJECT_OWNER, project_id=PROJECT_ID
)
# find all rows that intersect the hard coded point
query = """
SELECT *
FROM dataset_{dataset_id}
WHERE ST_Intersects(wkb_geometry, ST_PointFromText('POINT(-117.150727812638 32.7068451387017)', 4326))
""".format(dataset_id = DATASET_ID)
response = ac.get(sql_url, {'query': query,
'dataset_id': DATASET_ID})
# print schema of response
pprint(response['columns'])
# print row contents
pprint(response['data'])
|
Add example of spatial queryfrom pprint import pprint
from amigocloud import AmigoCloud
# Use amigocloud version 1.0.5 or higher to login with tokens
# This will raise an AmigoCloudError if the API token is invalid or has expired
ac = AmigoCloud(token='<your token>')
# For examples of how to get these values, see simple_example2.py
PROJECT_OWNER = 1
PROJECT_ID = 14098
DATASET_ID = 84746
#API endpoint
sql_url = '/users/{user_id}/projects/{project_id}/sql'.format(
user_id=PROJECT_OWNER, project_id=PROJECT_ID
)
# find all rows that intersect the hard coded point
query = """
SELECT *
FROM dataset_{dataset_id}
WHERE ST_Intersects(wkb_geometry, ST_PointFromText('POINT(-117.150727812638 32.7068451387017)', 4326))
""".format(dataset_id = DATASET_ID)
response = ac.get(sql_url, {'query': query,
'dataset_id': DATASET_ID})
# print schema of response
pprint(response['columns'])
# print row contents
pprint(response['data'])
|
<commit_before><commit_msg>Add example of spatial query<commit_after>from pprint import pprint
from amigocloud import AmigoCloud
# Use amigocloud version 1.0.5 or higher to login with tokens
# This will raise an AmigoCloudError if the API token is invalid or has expired
ac = AmigoCloud(token='<your token>')
# For examples of how to get these values, see simple_example2.py
PROJECT_OWNER = 1
PROJECT_ID = 14098
DATASET_ID = 84746
#API endpoint
sql_url = '/users/{user_id}/projects/{project_id}/sql'.format(
user_id=PROJECT_OWNER, project_id=PROJECT_ID
)
# find all rows that intersect the hard coded point
query = """
SELECT *
FROM dataset_{dataset_id}
WHERE ST_Intersects(wkb_geometry, ST_PointFromText('POINT(-117.150727812638 32.7068451387017)', 4326))
""".format(dataset_id = DATASET_ID)
response = ac.get(sql_url, {'query': query,
'dataset_id': DATASET_ID})
# print schema of response
pprint(response['columns'])
# print row contents
pprint(response['data'])
|
|
fdf7daf8abc4f8e1bfb8b729fd9ffc4d0c95c509
|
apps/xformmanager/management/commands/generate_xforms.py
|
apps/xformmanager/management/commands/generate_xforms.py
|
""" This script generates all the necessary data to
synchronize with a remote CommCareHQ server on that server.
This is only really useful if you intend to manually
scp/rsync data to your local server, which requires a
login to the remote server. So this is not the standard
synchronization workflow (but is necessary for low-connectivity
settings)
"""
import sys
import urllib
from optparse import make_option
from django.core.management.base import LabelCommand, CommandError
from xformmanager.management.commands import util
from xformmanager.models import FormDefModel
from receiver.models import Submission
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('-a','--all', action='store_true', dest='download_all', \
default=False, help='Download all files'),
)
help = "Generate synchronization files on a CommCareHQ remote server."
args = "<remote_url username password>"
label = 'IP address of the remote server (including port), username, and password'
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Please specify %s.' % self.label)
remote_url = args[0]
username = args[1]
password = args[2]
print "Generating synchronization data from %s" % remote_url
download_all = options.get('download_all', False)
generate_xforms(remote_url, username, password, not download_all)
def __del__(self):
pass
def generate_xforms(remote_url, username, password, latest=True):
""" Generate sync data from remote server
remote_url: url of remote server (ip:port)
username, password: credentials for logging in
"""
status = util.login(remote_url, username, password)
if not status:
print "Sorry. Your credentials were not accepted."
sys.exit()
def _generate_latest(url, django_model):
# for now, we assume schemas and submissions appear with monotonically
# increasing id's. I doubt this is always the case.
# TODO: fix
start_id = -1
received_count = django_model.objects.count()
if url.find("?") == -1: url = url + "?"
else: url = url + "&"
url = url + ("received_count=%s" % received_count)
print "Hitting %s" % url
# TODO - update this to use content-disposition instead of FILE_NAME
urllib.urlopen(url)
print "Generated tar from %s" % url
url = 'http://%s/api/xforms/?format=sync' % remote_url
if latest: _generate_latest(url, FormDefModel)
else:
urllib.urlopen(url)
print "Generated remote schemata archive"
# TODO - move this to receiver/management?
url = 'http://%s/api/submissions/' % remote_url
if latest: _generate_latest(url, Submission)
else:
urllib.urlopen(url)
print "Generated remote submissions archive"
return
|
Add a command to generate xform archives on the remote server (without downloading)
|
Add a command to generate xform archives on the remote server
(without downloading)
|
Python
|
bsd-3-clause
|
SEL-Columbia/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,gmimano/commcaretest,puttarajubr/commcare-hq,dimagi/commcare-hq,gmimano/commcaretest,gmimano/commcaretest,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq
|
Add a command to generate xform archives on the remote server
(without downloading)
|
""" This script generates all the necessary data to
synchronize with a remote CommCareHQ server on that server.
This is only really useful if you intend to manually
scp/rsync data to your local server, which requires a
login to the remote server. So this is not the standard
synchronization workflow (but is necessary for low-connectivity
settings)
"""
import sys
import urllib
from optparse import make_option
from django.core.management.base import LabelCommand, CommandError
from xformmanager.management.commands import util
from xformmanager.models import FormDefModel
from receiver.models import Submission
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('-a','--all', action='store_true', dest='download_all', \
default=False, help='Download all files'),
)
help = "Generate synchronization files on a CommCareHQ remote server."
args = "<remote_url username password>"
label = 'IP address of the remote server (including port), username, and password'
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Please specify %s.' % self.label)
remote_url = args[0]
username = args[1]
password = args[2]
print "Generating synchronization data from %s" % remote_url
download_all = options.get('download_all', False)
generate_xforms(remote_url, username, password, not download_all)
def __del__(self):
pass
def generate_xforms(remote_url, username, password, latest=True):
""" Generate sync data from remote server
remote_url: url of remote server (ip:port)
username, password: credentials for logging in
"""
status = util.login(remote_url, username, password)
if not status:
print "Sorry. Your credentials were not accepted."
sys.exit()
def _generate_latest(url, django_model):
# for now, we assume schemas and submissions appear with monotonically
# increasing id's. I doubt this is always the case.
# TODO: fix
start_id = -1
received_count = django_model.objects.count()
if url.find("?") == -1: url = url + "?"
else: url = url + "&"
url = url + ("received_count=%s" % received_count)
print "Hitting %s" % url
# TODO - update this to use content-disposition instead of FILE_NAME
urllib.urlopen(url)
print "Generated tar from %s" % url
url = 'http://%s/api/xforms/?format=sync' % remote_url
if latest: _generate_latest(url, FormDefModel)
else:
urllib.urlopen(url)
print "Generated remote schemata archive"
# TODO - move this to receiver/management?
url = 'http://%s/api/submissions/' % remote_url
if latest: _generate_latest(url, Submission)
else:
urllib.urlopen(url)
print "Generated remote submissions archive"
return
|
<commit_before><commit_msg>Add a command to generate xform archives on the remote server
(without downloading)<commit_after>
|
""" This script generates all the necessary data to
synchronize with a remote CommCareHQ server on that server.
This is only really useful if you intend to manually
scp/rsync data to your local server, which requires a
login to the remote server. So this is not the standard
synchronization workflow (but is necessary for low-connectivity
settings)
"""
import sys
import urllib
from optparse import make_option
from django.core.management.base import LabelCommand, CommandError
from xformmanager.management.commands import util
from xformmanager.models import FormDefModel
from receiver.models import Submission
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('-a','--all', action='store_true', dest='download_all', \
default=False, help='Download all files'),
)
help = "Generate synchronization files on a CommCareHQ remote server."
args = "<remote_url username password>"
label = 'IP address of the remote server (including port), username, and password'
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Please specify %s.' % self.label)
remote_url = args[0]
username = args[1]
password = args[2]
print "Generating synchronization data from %s" % remote_url
download_all = options.get('download_all', False)
generate_xforms(remote_url, username, password, not download_all)
def __del__(self):
pass
def generate_xforms(remote_url, username, password, latest=True):
""" Generate sync data from remote server
remote_url: url of remote server (ip:port)
username, password: credentials for logging in
"""
status = util.login(remote_url, username, password)
if not status:
print "Sorry. Your credentials were not accepted."
sys.exit()
def _generate_latest(url, django_model):
# for now, we assume schemas and submissions appear with monotonically
# increasing id's. I doubt this is always the case.
# TODO: fix
start_id = -1
received_count = django_model.objects.count()
if url.find("?") == -1: url = url + "?"
else: url = url + "&"
url = url + ("received_count=%s" % received_count)
print "Hitting %s" % url
# TODO - update this to use content-disposition instead of FILE_NAME
urllib.urlopen(url)
print "Generated tar from %s" % url
url = 'http://%s/api/xforms/?format=sync' % remote_url
if latest: _generate_latest(url, FormDefModel)
else:
urllib.urlopen(url)
print "Generated remote schemata archive"
# TODO - move this to receiver/management?
url = 'http://%s/api/submissions/' % remote_url
if latest: _generate_latest(url, Submission)
else:
urllib.urlopen(url)
print "Generated remote submissions archive"
return
|
Add a command to generate xform archives on the remote server
(without downloading)""" This script generates all the necessary data to
synchronize with a remote CommCareHQ server on that server.
This is only really useful if you intend to manually
scp/rsync data to your local server, which requires a
login to the remote server. So this is not the standard
synchronization workflow (but is necessary for low-connectivity
settings)
"""
import sys
import urllib
from optparse import make_option
from django.core.management.base import LabelCommand, CommandError
from xformmanager.management.commands import util
from xformmanager.models import FormDefModel
from receiver.models import Submission
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('-a','--all', action='store_true', dest='download_all', \
default=False, help='Download all files'),
)
help = "Generate synchronization files on a CommCareHQ remote server."
args = "<remote_url username password>"
label = 'IP address of the remote server (including port), username, and password'
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Please specify %s.' % self.label)
remote_url = args[0]
username = args[1]
password = args[2]
print "Generating synchronization data from %s" % remote_url
download_all = options.get('download_all', False)
generate_xforms(remote_url, username, password, not download_all)
def __del__(self):
pass
def generate_xforms(remote_url, username, password, latest=True):
""" Generate sync data from remote server
remote_url: url of remote server (ip:port)
username, password: credentials for logging in
"""
status = util.login(remote_url, username, password)
if not status:
print "Sorry. Your credentials were not accepted."
sys.exit()
def _generate_latest(url, django_model):
# for now, we assume schemas and submissions appear with monotonically
# increasing id's. I doubt this is always the case.
# TODO: fix
start_id = -1
received_count = django_model.objects.count()
if url.find("?") == -1: url = url + "?"
else: url = url + "&"
url = url + ("received_count=%s" % received_count)
print "Hitting %s" % url
# TODO - update this to use content-disposition instead of FILE_NAME
urllib.urlopen(url)
print "Generated tar from %s" % url
url = 'http://%s/api/xforms/?format=sync' % remote_url
if latest: _generate_latest(url, FormDefModel)
else:
urllib.urlopen(url)
print "Generated remote schemata archive"
# TODO - move this to receiver/management?
url = 'http://%s/api/submissions/' % remote_url
if latest: _generate_latest(url, Submission)
else:
urllib.urlopen(url)
print "Generated remote submissions archive"
return
|
<commit_before><commit_msg>Add a command to generate xform archives on the remote server
(without downloading)<commit_after>""" This script generates all the necessary data to
synchronize with a remote CommCareHQ server on that server.
This is only really useful if you intend to manually
scp/rsync data to your local server, which requires a
login to the remote server. So this is not the standard
synchronization workflow (but is necessary for low-connectivity
settings)
"""
import sys
import urllib
from optparse import make_option
from django.core.management.base import LabelCommand, CommandError
from xformmanager.management.commands import util
from xformmanager.models import FormDefModel
from receiver.models import Submission
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('-a','--all', action='store_true', dest='download_all', \
default=False, help='Download all files'),
)
help = "Generate synchronization files on a CommCareHQ remote server."
args = "<remote_url username password>"
label = 'IP address of the remote server (including port), username, and password'
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Please specify %s.' % self.label)
remote_url = args[0]
username = args[1]
password = args[2]
print "Generating synchronization data from %s" % remote_url
download_all = options.get('download_all', False)
generate_xforms(remote_url, username, password, not download_all)
def __del__(self):
pass
def generate_xforms(remote_url, username, password, latest=True):
""" Generate sync data from remote server
remote_url: url of remote server (ip:port)
username, password: credentials for logging in
"""
status = util.login(remote_url, username, password)
if not status:
print "Sorry. Your credentials were not accepted."
sys.exit()
def _generate_latest(url, django_model):
# for now, we assume schemas and submissions appear with monotonically
# increasing id's. I doubt this is always the case.
# TODO: fix
start_id = -1
received_count = django_model.objects.count()
if url.find("?") == -1: url = url + "?"
else: url = url + "&"
url = url + ("received_count=%s" % received_count)
print "Hitting %s" % url
# TODO - update this to use content-disposition instead of FILE_NAME
urllib.urlopen(url)
print "Generated tar from %s" % url
url = 'http://%s/api/xforms/?format=sync' % remote_url
if latest: _generate_latest(url, FormDefModel)
else:
urllib.urlopen(url)
print "Generated remote schemata archive"
# TODO - move this to receiver/management?
url = 'http://%s/api/submissions/' % remote_url
if latest: _generate_latest(url, Submission)
else:
urllib.urlopen(url)
print "Generated remote submissions archive"
return
|
|
165548d4f7a9e7af634c6e2f3eb7bfe70dbd0b53
|
custom/icds/management/commands/rebuild_for_migration.py
|
custom/icds/management/commands/rebuild_for_migration.py
|
from django.core.management.base import BaseCommand
from collections import namedtuple
from corehq.apps.userreports.models import AsyncIndicator, get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
DOMAIN = 'icds-cas'
DATA_SOURCES = (
'static-icds-cas-static-child_cases_monthly_tableau_v2',
'static-icds-cas-static-ccs_record_cases_monthly_tableau_v2',
)
FakeChange = namedtuple('FakeChange', ['id', 'document'])
CASE_DOC_TYPE = 'CommCareCase'
STATE_IDS = [
'f98e91aa003accb7b849a0f18ebd7039',
'f9b47ea2ee2d8a02acddeeb491d3e175',
'a2fcb186e9be8464e167bb1c56ce8fd9',
'f1cd643f0df908421abd915298ba57bc',
'd982a6fb4cca0824fbde59db18d3800f',
'9cd4fd88d9f047088a377b7e7d144830',
'ea4d587fa93a2ed8300853d51db661ef',
]
class Command(BaseCommand):
help = ""
def handle(self, *args, **options):
fake_change_doc = {'doc_type': CASE_DOC_TYPE, 'domain': DOMAIN}
for data_source_id in DATA_SOURCES:
print("processing data source %s" % data_source_id)
data_source, is_static = get_datasource_config(data_source_id, DOMAIN)
assert is_static
adapter = get_indicator_adapter(data_source)
table = adapter.get_table()
for case_id in self._get_case_ids_to_process(adapter, table):
change = FakeChange(case_id, fake_change_doc)
AsyncIndicator.update_indicators(change, [data_source])
def _get_case_ids_to_process(self, adapter, table):
for state_id in STATE_IDS:
print("processing state %s" % state_id)
query = adapter.session_helper.Session.query(table.columns.doc_id).distinct(table.columns.doc_id)
case_ids = query.filter(
table.columns.state_id == state_id,
table.columns.resident == 'no'
).all()
num_case_ids = len(case_ids)
print("processing %d cases" % (num_case_ids))
for i, case_id in enumerate(case_ids):
yield case_id
if i % 1000 == 0:
print("processed %d / %d docs" % (i, num_case_ids))
|
Add one-off to recalculate migrated cases
|
Add one-off to recalculate migrated cases
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add one-off to recalculate migrated cases
|
from django.core.management.base import BaseCommand
from collections import namedtuple
from corehq.apps.userreports.models import AsyncIndicator, get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
DOMAIN = 'icds-cas'
DATA_SOURCES = (
'static-icds-cas-static-child_cases_monthly_tableau_v2',
'static-icds-cas-static-ccs_record_cases_monthly_tableau_v2',
)
FakeChange = namedtuple('FakeChange', ['id', 'document'])
CASE_DOC_TYPE = 'CommCareCase'
STATE_IDS = [
'f98e91aa003accb7b849a0f18ebd7039',
'f9b47ea2ee2d8a02acddeeb491d3e175',
'a2fcb186e9be8464e167bb1c56ce8fd9',
'f1cd643f0df908421abd915298ba57bc',
'd982a6fb4cca0824fbde59db18d3800f',
'9cd4fd88d9f047088a377b7e7d144830',
'ea4d587fa93a2ed8300853d51db661ef',
]
class Command(BaseCommand):
help = ""
def handle(self, *args, **options):
fake_change_doc = {'doc_type': CASE_DOC_TYPE, 'domain': DOMAIN}
for data_source_id in DATA_SOURCES:
print("processing data source %s" % data_source_id)
data_source, is_static = get_datasource_config(data_source_id, DOMAIN)
assert is_static
adapter = get_indicator_adapter(data_source)
table = adapter.get_table()
for case_id in self._get_case_ids_to_process(adapter, table):
change = FakeChange(case_id, fake_change_doc)
AsyncIndicator.update_indicators(change, [data_source])
def _get_case_ids_to_process(self, adapter, table):
for state_id in STATE_IDS:
print("processing state %s" % state_id)
query = adapter.session_helper.Session.query(table.columns.doc_id).distinct(table.columns.doc_id)
case_ids = query.filter(
table.columns.state_id == state_id,
table.columns.resident == 'no'
).all()
num_case_ids = len(case_ids)
print("processing %d cases" % (num_case_ids))
for i, case_id in enumerate(case_ids):
yield case_id
if i % 1000 == 0:
print("processed %d / %d docs" % (i, num_case_ids))
|
<commit_before><commit_msg>Add one-off to recalculate migrated cases<commit_after>
|
from django.core.management.base import BaseCommand
from collections import namedtuple
from corehq.apps.userreports.models import AsyncIndicator, get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
DOMAIN = 'icds-cas'
DATA_SOURCES = (
'static-icds-cas-static-child_cases_monthly_tableau_v2',
'static-icds-cas-static-ccs_record_cases_monthly_tableau_v2',
)
FakeChange = namedtuple('FakeChange', ['id', 'document'])
CASE_DOC_TYPE = 'CommCareCase'
STATE_IDS = [
'f98e91aa003accb7b849a0f18ebd7039',
'f9b47ea2ee2d8a02acddeeb491d3e175',
'a2fcb186e9be8464e167bb1c56ce8fd9',
'f1cd643f0df908421abd915298ba57bc',
'd982a6fb4cca0824fbde59db18d3800f',
'9cd4fd88d9f047088a377b7e7d144830',
'ea4d587fa93a2ed8300853d51db661ef',
]
class Command(BaseCommand):
help = ""
def handle(self, *args, **options):
fake_change_doc = {'doc_type': CASE_DOC_TYPE, 'domain': DOMAIN}
for data_source_id in DATA_SOURCES:
print("processing data source %s" % data_source_id)
data_source, is_static = get_datasource_config(data_source_id, DOMAIN)
assert is_static
adapter = get_indicator_adapter(data_source)
table = adapter.get_table()
for case_id in self._get_case_ids_to_process(adapter, table):
change = FakeChange(case_id, fake_change_doc)
AsyncIndicator.update_indicators(change, [data_source])
def _get_case_ids_to_process(self, adapter, table):
for state_id in STATE_IDS:
print("processing state %s" % state_id)
query = adapter.session_helper.Session.query(table.columns.doc_id).distinct(table.columns.doc_id)
case_ids = query.filter(
table.columns.state_id == state_id,
table.columns.resident == 'no'
).all()
num_case_ids = len(case_ids)
print("processing %d cases" % (num_case_ids))
for i, case_id in enumerate(case_ids):
yield case_id
if i % 1000 == 0:
print("processed %d / %d docs" % (i, num_case_ids))
|
Add one-off to recalculate migrated casesfrom django.core.management.base import BaseCommand
from collections import namedtuple
from corehq.apps.userreports.models import AsyncIndicator, get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
DOMAIN = 'icds-cas'
DATA_SOURCES = (
'static-icds-cas-static-child_cases_monthly_tableau_v2',
'static-icds-cas-static-ccs_record_cases_monthly_tableau_v2',
)
FakeChange = namedtuple('FakeChange', ['id', 'document'])
CASE_DOC_TYPE = 'CommCareCase'
STATE_IDS = [
'f98e91aa003accb7b849a0f18ebd7039',
'f9b47ea2ee2d8a02acddeeb491d3e175',
'a2fcb186e9be8464e167bb1c56ce8fd9',
'f1cd643f0df908421abd915298ba57bc',
'd982a6fb4cca0824fbde59db18d3800f',
'9cd4fd88d9f047088a377b7e7d144830',
'ea4d587fa93a2ed8300853d51db661ef',
]
class Command(BaseCommand):
help = ""
def handle(self, *args, **options):
fake_change_doc = {'doc_type': CASE_DOC_TYPE, 'domain': DOMAIN}
for data_source_id in DATA_SOURCES:
print("processing data source %s" % data_source_id)
data_source, is_static = get_datasource_config(data_source_id, DOMAIN)
assert is_static
adapter = get_indicator_adapter(data_source)
table = adapter.get_table()
for case_id in self._get_case_ids_to_process(adapter, table):
change = FakeChange(case_id, fake_change_doc)
AsyncIndicator.update_indicators(change, [data_source])
def _get_case_ids_to_process(self, adapter, table):
for state_id in STATE_IDS:
print("processing state %s" % state_id)
query = adapter.session_helper.Session.query(table.columns.doc_id).distinct(table.columns.doc_id)
case_ids = query.filter(
table.columns.state_id == state_id,
table.columns.resident == 'no'
).all()
num_case_ids = len(case_ids)
print("processing %d cases" % (num_case_ids))
for i, case_id in enumerate(case_ids):
yield case_id
if i % 1000 == 0:
print("processed %d / %d docs" % (i, num_case_ids))
|
<commit_before><commit_msg>Add one-off to recalculate migrated cases<commit_after>from django.core.management.base import BaseCommand
from collections import namedtuple
from corehq.apps.userreports.models import AsyncIndicator, get_datasource_config
from corehq.apps.userreports.util import get_indicator_adapter
DOMAIN = 'icds-cas'
DATA_SOURCES = (
'static-icds-cas-static-child_cases_monthly_tableau_v2',
'static-icds-cas-static-ccs_record_cases_monthly_tableau_v2',
)
FakeChange = namedtuple('FakeChange', ['id', 'document'])
CASE_DOC_TYPE = 'CommCareCase'
STATE_IDS = [
'f98e91aa003accb7b849a0f18ebd7039',
'f9b47ea2ee2d8a02acddeeb491d3e175',
'a2fcb186e9be8464e167bb1c56ce8fd9',
'f1cd643f0df908421abd915298ba57bc',
'd982a6fb4cca0824fbde59db18d3800f',
'9cd4fd88d9f047088a377b7e7d144830',
'ea4d587fa93a2ed8300853d51db661ef',
]
class Command(BaseCommand):
help = ""
def handle(self, *args, **options):
fake_change_doc = {'doc_type': CASE_DOC_TYPE, 'domain': DOMAIN}
for data_source_id in DATA_SOURCES:
print("processing data source %s" % data_source_id)
data_source, is_static = get_datasource_config(data_source_id, DOMAIN)
assert is_static
adapter = get_indicator_adapter(data_source)
table = adapter.get_table()
for case_id in self._get_case_ids_to_process(adapter, table):
change = FakeChange(case_id, fake_change_doc)
AsyncIndicator.update_indicators(change, [data_source])
def _get_case_ids_to_process(self, adapter, table):
for state_id in STATE_IDS:
print("processing state %s" % state_id)
query = adapter.session_helper.Session.query(table.columns.doc_id).distinct(table.columns.doc_id)
case_ids = query.filter(
table.columns.state_id == state_id,
table.columns.resident == 'no'
).all()
num_case_ids = len(case_ids)
print("processing %d cases" % (num_case_ids))
for i, case_id in enumerate(case_ids):
yield case_id
if i % 1000 == 0:
print("processed %d / %d docs" % (i, num_case_ids))
|
|
12e9814d0225960450bb7cf0fc80502cef13195b
|
rewind/test/test_code.py
|
rewind/test/test_code.py
|
"""Test code format and coding standards."""
import importlib
import inspect
import pkgutil
import unittest
def setUpModule():
global modules
modules = [name for _, name, ispkg in pkgutil.walk_packages(['rewind'],
'rewind.')
if not ispkg and not name.startswith('rewind.test.') and
not name.startswith('rewind.messages.')]
assert modules, "Expected to have found a couple of modules. Did not."
modules = map(importlib.import_module, modules)
def tearDownModule():
"""Clearing up global namespace in test_code."""
global modules
del modules
def _get_public_classes_from_object(obj, prepend_name=''):
classes = [(prepend_name+name, value)
for name, value in inspect.getmembers(obj)
if inspect.isclass(value) and not name.startswith('_')]
result = list(classes)
for name, value in classes:
partialres = _get_public_classes_from_object(value,
'{0}.'.format(name))
result.extend(partialres)
return result
def _get_public_classes():
classes = []
for module in modules:
assert inspect.ismodule(module)
someclasses = _get_public_classes_from_object(module,
'{0}.'.format(module.__name__))
classes.extend(someclasses)
return classes
class TestPydoc(unittest.TestCase):
"""Tests for pydoc."""
def testAllPublicClasses(self):
"""Test that all public classes have a pydoc."""
classes = _get_public_classes()
self.assertNotEqual(len(classes), 0)
for classname, clazz in classes:
doc = inspect.getdoc(clazz)
msg = "{0} lacks a Pydoc string.".format(classname)
self.assertTrue(doc and len(doc) > 4, msg)
|
Test that asserts all public classes have pydoc
|
Test that asserts all public classes have pydoc
|
Python
|
agpl-3.0
|
JensRantil/rewind,JensRantil/rewind-client
|
Test that asserts all public classes have pydoc
|
"""Test code format and coding standards."""
import importlib
import inspect
import pkgutil
import unittest
def setUpModule():
global modules
modules = [name for _, name, ispkg in pkgutil.walk_packages(['rewind'],
'rewind.')
if not ispkg and not name.startswith('rewind.test.') and
not name.startswith('rewind.messages.')]
assert modules, "Expected to have found a couple of modules. Did not."
modules = map(importlib.import_module, modules)
def tearDownModule():
"""Clearing up global namespace in test_code."""
global modules
del modules
def _get_public_classes_from_object(obj, prepend_name=''):
classes = [(prepend_name+name, value)
for name, value in inspect.getmembers(obj)
if inspect.isclass(value) and not name.startswith('_')]
result = list(classes)
for name, value in classes:
partialres = _get_public_classes_from_object(value,
'{0}.'.format(name))
result.extend(partialres)
return result
def _get_public_classes():
classes = []
for module in modules:
assert inspect.ismodule(module)
someclasses = _get_public_classes_from_object(module,
'{0}.'.format(module.__name__))
classes.extend(someclasses)
return classes
class TestPydoc(unittest.TestCase):
"""Tests for pydoc."""
def testAllPublicClasses(self):
"""Test that all public classes have a pydoc."""
classes = _get_public_classes()
self.assertNotEqual(len(classes), 0)
for classname, clazz in classes:
doc = inspect.getdoc(clazz)
msg = "{0} lacks a Pydoc string.".format(classname)
self.assertTrue(doc and len(doc) > 4, msg)
|
<commit_before><commit_msg>Test that asserts all public classes have pydoc<commit_after>
|
"""Test code format and coding standards."""
import importlib
import inspect
import pkgutil
import unittest
def setUpModule():
global modules
modules = [name for _, name, ispkg in pkgutil.walk_packages(['rewind'],
'rewind.')
if not ispkg and not name.startswith('rewind.test.') and
not name.startswith('rewind.messages.')]
assert modules, "Expected to have found a couple of modules. Did not."
modules = map(importlib.import_module, modules)
def tearDownModule():
"""Clearing up global namespace in test_code."""
global modules
del modules
def _get_public_classes_from_object(obj, prepend_name=''):
classes = [(prepend_name+name, value)
for name, value in inspect.getmembers(obj)
if inspect.isclass(value) and not name.startswith('_')]
result = list(classes)
for name, value in classes:
partialres = _get_public_classes_from_object(value,
'{0}.'.format(name))
result.extend(partialres)
return result
def _get_public_classes():
classes = []
for module in modules:
assert inspect.ismodule(module)
someclasses = _get_public_classes_from_object(module,
'{0}.'.format(module.__name__))
classes.extend(someclasses)
return classes
class TestPydoc(unittest.TestCase):
"""Tests for pydoc."""
def testAllPublicClasses(self):
"""Test that all public classes have a pydoc."""
classes = _get_public_classes()
self.assertNotEqual(len(classes), 0)
for classname, clazz in classes:
doc = inspect.getdoc(clazz)
msg = "{0} lacks a Pydoc string.".format(classname)
self.assertTrue(doc and len(doc) > 4, msg)
|
Test that asserts all public classes have pydoc"""Test code format and coding standards."""
import importlib
import inspect
import pkgutil
import unittest
def setUpModule():
global modules
modules = [name for _, name, ispkg in pkgutil.walk_packages(['rewind'],
'rewind.')
if not ispkg and not name.startswith('rewind.test.') and
not name.startswith('rewind.messages.')]
assert modules, "Expected to have found a couple of modules. Did not."
modules = map(importlib.import_module, modules)
def tearDownModule():
"""Clearing up global namespace in test_code."""
global modules
del modules
def _get_public_classes_from_object(obj, prepend_name=''):
classes = [(prepend_name+name, value)
for name, value in inspect.getmembers(obj)
if inspect.isclass(value) and not name.startswith('_')]
result = list(classes)
for name, value in classes:
partialres = _get_public_classes_from_object(value,
'{0}.'.format(name))
result.extend(partialres)
return result
def _get_public_classes():
classes = []
for module in modules:
assert inspect.ismodule(module)
someclasses = _get_public_classes_from_object(module,
'{0}.'.format(module.__name__))
classes.extend(someclasses)
return classes
class TestPydoc(unittest.TestCase):
"""Tests for pydoc."""
def testAllPublicClasses(self):
"""Test that all public classes have a pydoc."""
classes = _get_public_classes()
self.assertNotEqual(len(classes), 0)
for classname, clazz in classes:
doc = inspect.getdoc(clazz)
msg = "{0} lacks a Pydoc string.".format(classname)
self.assertTrue(doc and len(doc) > 4, msg)
|
<commit_before><commit_msg>Test that asserts all public classes have pydoc<commit_after>"""Test code format and coding standards."""
import importlib
import inspect
import pkgutil
import unittest
def setUpModule():
global modules
modules = [name for _, name, ispkg in pkgutil.walk_packages(['rewind'],
'rewind.')
if not ispkg and not name.startswith('rewind.test.') and
not name.startswith('rewind.messages.')]
assert modules, "Expected to have found a couple of modules. Did not."
modules = map(importlib.import_module, modules)
def tearDownModule():
"""Clearing up global namespace in test_code."""
global modules
del modules
def _get_public_classes_from_object(obj, prepend_name=''):
classes = [(prepend_name+name, value)
for name, value in inspect.getmembers(obj)
if inspect.isclass(value) and not name.startswith('_')]
result = list(classes)
for name, value in classes:
partialres = _get_public_classes_from_object(value,
'{0}.'.format(name))
result.extend(partialres)
return result
def _get_public_classes():
classes = []
for module in modules:
assert inspect.ismodule(module)
someclasses = _get_public_classes_from_object(module,
'{0}.'.format(module.__name__))
classes.extend(someclasses)
return classes
class TestPydoc(unittest.TestCase):
"""Tests for pydoc."""
def testAllPublicClasses(self):
"""Test that all public classes have a pydoc."""
classes = _get_public_classes()
self.assertNotEqual(len(classes), 0)
for classname, clazz in classes:
doc = inspect.getdoc(clazz)
msg = "{0} lacks a Pydoc string.".format(classname)
self.assertTrue(doc and len(doc) > 4, msg)
|
|
447f19638c43cf273b6922796a203d33407bc29e
|
test/util.py
|
test/util.py
|
'''Helper code for theanets unit tests.'''
import numpy as np
class MNIST(object):
NUM_DIGITS = 100
DIGIT_SIZE = 784
def setUp(self):
# we just create some random "mnist digit" data of the right shape.
np.random.seed(3)
self.images = np.random.randn(NUM_DIGITS, DIGIT_SIZE).astype('f')
self.labels = np.random.randint(0, 10, NUM_DIGITS).astype('i')
|
'''Helper code for theanets unit tests.'''
import numpy as np
class MNIST(object):
NUM_DIGITS = 100
DIGIT_SIZE = 784
def setUp(self):
# we just create some random "mnist digit" data of the right shape.
np.random.seed(3)
self.images = np.random.randn(MNIST.NUM_DIGITS, MNIST.DIGIT_SIZE).astype('f')
self.labels = np.random.randint(0, 10, MNIST.NUM_DIGITS).astype('i')
|
Use proper namespace for constants.
|
Use proper namespace for constants.
|
Python
|
mit
|
chrinide/theanets,devdoer/theanets,lmjohns3/theanets
|
'''Helper code for theanets unit tests.'''
import numpy as np
class MNIST(object):
NUM_DIGITS = 100
DIGIT_SIZE = 784
def setUp(self):
# we just create some random "mnist digit" data of the right shape.
np.random.seed(3)
self.images = np.random.randn(NUM_DIGITS, DIGIT_SIZE).astype('f')
self.labels = np.random.randint(0, 10, NUM_DIGITS).astype('i')
Use proper namespace for constants.
|
'''Helper code for theanets unit tests.'''
import numpy as np
class MNIST(object):
NUM_DIGITS = 100
DIGIT_SIZE = 784
def setUp(self):
# we just create some random "mnist digit" data of the right shape.
np.random.seed(3)
self.images = np.random.randn(MNIST.NUM_DIGITS, MNIST.DIGIT_SIZE).astype('f')
self.labels = np.random.randint(0, 10, MNIST.NUM_DIGITS).astype('i')
|
<commit_before>'''Helper code for theanets unit tests.'''
import numpy as np
class MNIST(object):
NUM_DIGITS = 100
DIGIT_SIZE = 784
def setUp(self):
# we just create some random "mnist digit" data of the right shape.
np.random.seed(3)
self.images = np.random.randn(NUM_DIGITS, DIGIT_SIZE).astype('f')
self.labels = np.random.randint(0, 10, NUM_DIGITS).astype('i')
<commit_msg>Use proper namespace for constants.<commit_after>
|
'''Helper code for theanets unit tests.'''
import numpy as np
class MNIST(object):
NUM_DIGITS = 100
DIGIT_SIZE = 784
def setUp(self):
# we just create some random "mnist digit" data of the right shape.
np.random.seed(3)
self.images = np.random.randn(MNIST.NUM_DIGITS, MNIST.DIGIT_SIZE).astype('f')
self.labels = np.random.randint(0, 10, MNIST.NUM_DIGITS).astype('i')
|
'''Helper code for theanets unit tests.'''
import numpy as np
class MNIST(object):
NUM_DIGITS = 100
DIGIT_SIZE = 784
def setUp(self):
# we just create some random "mnist digit" data of the right shape.
np.random.seed(3)
self.images = np.random.randn(NUM_DIGITS, DIGIT_SIZE).astype('f')
self.labels = np.random.randint(0, 10, NUM_DIGITS).astype('i')
Use proper namespace for constants.'''Helper code for theanets unit tests.'''
import numpy as np
class MNIST(object):
NUM_DIGITS = 100
DIGIT_SIZE = 784
def setUp(self):
# we just create some random "mnist digit" data of the right shape.
np.random.seed(3)
self.images = np.random.randn(MNIST.NUM_DIGITS, MNIST.DIGIT_SIZE).astype('f')
self.labels = np.random.randint(0, 10, MNIST.NUM_DIGITS).astype('i')
|
<commit_before>'''Helper code for theanets unit tests.'''
import numpy as np
class MNIST(object):
NUM_DIGITS = 100
DIGIT_SIZE = 784
def setUp(self):
# we just create some random "mnist digit" data of the right shape.
np.random.seed(3)
self.images = np.random.randn(NUM_DIGITS, DIGIT_SIZE).astype('f')
self.labels = np.random.randint(0, 10, NUM_DIGITS).astype('i')
<commit_msg>Use proper namespace for constants.<commit_after>'''Helper code for theanets unit tests.'''
import numpy as np
class MNIST(object):
NUM_DIGITS = 100
DIGIT_SIZE = 784
def setUp(self):
# we just create some random "mnist digit" data of the right shape.
np.random.seed(3)
self.images = np.random.randn(MNIST.NUM_DIGITS, MNIST.DIGIT_SIZE).astype('f')
self.labels = np.random.randint(0, 10, MNIST.NUM_DIGITS).astype('i')
|
bfeb07b70237dfae49eb18cc44c7150360c06fbd
|
PRESUBMIT.py
|
PRESUBMIT.py
|
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Top-level presubmit script for Dart.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
def CheckChangeOnCommit(input_api, output_api):
results = []
status_check = input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://dart-status.appspot.com/current?format=json')
results.extend(status_check)
return results
|
Add gcl presubmit script to the dart src tree.
|
Add gcl presubmit script to the dart src tree.
Currently I just added a tree status check but we can extend this over time.
Review URL: https://chromiumcodereview.appspot.com//10891021
git-svn-id: c93d8a2297af3b929165606efe145742a534bc71@11575 260f80e4-7a28-3924-810f-c04153c831b5
|
Python
|
bsd-3-clause
|
dart-lang/sdk,dart-lang/sdk,dart-archive/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dart-lang/sdk,dart-lang/sdk,dart-archive/dart-sdk
|
Add gcl presubmit script to the dart src tree.
Currently I just added a tree status check but we can extend this over time.
Review URL: https://chromiumcodereview.appspot.com//10891021
git-svn-id: c93d8a2297af3b929165606efe145742a534bc71@11575 260f80e4-7a28-3924-810f-c04153c831b5
|
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Top-level presubmit script for Dart.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
def CheckChangeOnCommit(input_api, output_api):
results = []
status_check = input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://dart-status.appspot.com/current?format=json')
results.extend(status_check)
return results
|
<commit_before><commit_msg>Add gcl presubmit script to the dart src tree.
Currently I just added a tree status check but we can extend this over time.
Review URL: https://chromiumcodereview.appspot.com//10891021
git-svn-id: c93d8a2297af3b929165606efe145742a534bc71@11575 260f80e4-7a28-3924-810f-c04153c831b5<commit_after>
|
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Top-level presubmit script for Dart.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
def CheckChangeOnCommit(input_api, output_api):
results = []
status_check = input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://dart-status.appspot.com/current?format=json')
results.extend(status_check)
return results
|
Add gcl presubmit script to the dart src tree.
Currently I just added a tree status check but we can extend this over time.
Review URL: https://chromiumcodereview.appspot.com//10891021
git-svn-id: c93d8a2297af3b929165606efe145742a534bc71@11575 260f80e4-7a28-3924-810f-c04153c831b5# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Top-level presubmit script for Dart.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
def CheckChangeOnCommit(input_api, output_api):
results = []
status_check = input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://dart-status.appspot.com/current?format=json')
results.extend(status_check)
return results
|
<commit_before><commit_msg>Add gcl presubmit script to the dart src tree.
Currently I just added a tree status check but we can extend this over time.
Review URL: https://chromiumcodereview.appspot.com//10891021
git-svn-id: c93d8a2297af3b929165606efe145742a534bc71@11575 260f80e4-7a28-3924-810f-c04153c831b5<commit_after># Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Top-level presubmit script for Dart.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
def CheckChangeOnCommit(input_api, output_api):
results = []
status_check = input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://dart-status.appspot.com/current?format=json')
results.extend(status_check)
return results
|
|
8db65c9f6ec67e188dd6cd11f7a7933d371e323d
|
feed/tests/test_contactview.py
|
feed/tests/test_contactview.py
|
from django.contrib.auth.models import User
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from feed.views import ContactViewSet
from workflow.models import Contact, Country, Organization, TolaUser, \
WorkflowLevel1, WorkflowTeam
class ContactViewsTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('john', 'lennon@thebeatles.com',
'johnpassword')
self.user.is_superuser = True
self.user.is_staff = True
self.user.save()
self.country = Country.objects.create(country='Afghanistan', code='AF')
Contact.objects.bulk_create([
Contact(name='Contact_0', country=self.country),
Contact(name='Contact_1', country=self.country),
])
factory = APIRequestFactory()
self.request = factory.get('/api/contact/')
def test_list_contact_superuser(self):
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
def test_list_contact_normaluser(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
TolaUser.objects.create(user=self.user, organization=organization)
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_list_contact_normaluser_one_result(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
tola_user = TolaUser.objects.create(user=self.user,
organization=organization)
wflvl1 = WorkflowLevel1.objects.create(name='WorkflowLevel1',
organization=organization)
WorkflowTeam.objects.create(workflow_user=tola_user,
workflowlevel1=wflvl1)
Contact.objects.create(name='Contact_0', country=self.country,
organization=organization,
workflowlevel1=wflvl1)
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
|
Add unit test for Contact view
|
Add unit test for Contact view
|
Python
|
apache-2.0
|
toladata/TolaActivity,toladata/TolaActivity,toladata/TolaActivity,toladata/TolaActivity
|
Add unit test for Contact view
|
from django.contrib.auth.models import User
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from feed.views import ContactViewSet
from workflow.models import Contact, Country, Organization, TolaUser, \
WorkflowLevel1, WorkflowTeam
class ContactViewsTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('john', 'lennon@thebeatles.com',
'johnpassword')
self.user.is_superuser = True
self.user.is_staff = True
self.user.save()
self.country = Country.objects.create(country='Afghanistan', code='AF')
Contact.objects.bulk_create([
Contact(name='Contact_0', country=self.country),
Contact(name='Contact_1', country=self.country),
])
factory = APIRequestFactory()
self.request = factory.get('/api/contact/')
def test_list_contact_superuser(self):
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
def test_list_contact_normaluser(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
TolaUser.objects.create(user=self.user, organization=organization)
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_list_contact_normaluser_one_result(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
tola_user = TolaUser.objects.create(user=self.user,
organization=organization)
wflvl1 = WorkflowLevel1.objects.create(name='WorkflowLevel1',
organization=organization)
WorkflowTeam.objects.create(workflow_user=tola_user,
workflowlevel1=wflvl1)
Contact.objects.create(name='Contact_0', country=self.country,
organization=organization,
workflowlevel1=wflvl1)
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
|
<commit_before><commit_msg>Add unit test for Contact view<commit_after>
|
from django.contrib.auth.models import User
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from feed.views import ContactViewSet
from workflow.models import Contact, Country, Organization, TolaUser, \
WorkflowLevel1, WorkflowTeam
class ContactViewsTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('john', 'lennon@thebeatles.com',
'johnpassword')
self.user.is_superuser = True
self.user.is_staff = True
self.user.save()
self.country = Country.objects.create(country='Afghanistan', code='AF')
Contact.objects.bulk_create([
Contact(name='Contact_0', country=self.country),
Contact(name='Contact_1', country=self.country),
])
factory = APIRequestFactory()
self.request = factory.get('/api/contact/')
def test_list_contact_superuser(self):
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
def test_list_contact_normaluser(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
TolaUser.objects.create(user=self.user, organization=organization)
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_list_contact_normaluser_one_result(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
tola_user = TolaUser.objects.create(user=self.user,
organization=organization)
wflvl1 = WorkflowLevel1.objects.create(name='WorkflowLevel1',
organization=organization)
WorkflowTeam.objects.create(workflow_user=tola_user,
workflowlevel1=wflvl1)
Contact.objects.create(name='Contact_0', country=self.country,
organization=organization,
workflowlevel1=wflvl1)
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
|
Add unit test for Contact viewfrom django.contrib.auth.models import User
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from feed.views import ContactViewSet
from workflow.models import Contact, Country, Organization, TolaUser, \
WorkflowLevel1, WorkflowTeam
class ContactViewsTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('john', 'lennon@thebeatles.com',
'johnpassword')
self.user.is_superuser = True
self.user.is_staff = True
self.user.save()
self.country = Country.objects.create(country='Afghanistan', code='AF')
Contact.objects.bulk_create([
Contact(name='Contact_0', country=self.country),
Contact(name='Contact_1', country=self.country),
])
factory = APIRequestFactory()
self.request = factory.get('/api/contact/')
def test_list_contact_superuser(self):
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
def test_list_contact_normaluser(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
TolaUser.objects.create(user=self.user, organization=organization)
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_list_contact_normaluser_one_result(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
tola_user = TolaUser.objects.create(user=self.user,
organization=organization)
wflvl1 = WorkflowLevel1.objects.create(name='WorkflowLevel1',
organization=organization)
WorkflowTeam.objects.create(workflow_user=tola_user,
workflowlevel1=wflvl1)
Contact.objects.create(name='Contact_0', country=self.country,
organization=organization,
workflowlevel1=wflvl1)
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
|
<commit_before><commit_msg>Add unit test for Contact view<commit_after>from django.contrib.auth.models import User
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from feed.views import ContactViewSet
from workflow.models import Contact, Country, Organization, TolaUser, \
WorkflowLevel1, WorkflowTeam
class ContactViewsTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('john', 'lennon@thebeatles.com',
'johnpassword')
self.user.is_superuser = True
self.user.is_staff = True
self.user.save()
self.country = Country.objects.create(country='Afghanistan', code='AF')
Contact.objects.bulk_create([
Contact(name='Contact_0', country=self.country),
Contact(name='Contact_1', country=self.country),
])
factory = APIRequestFactory()
self.request = factory.get('/api/contact/')
def test_list_contact_superuser(self):
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
def test_list_contact_normaluser(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
TolaUser.objects.create(user=self.user, organization=organization)
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_list_contact_normaluser_one_result(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
tola_user = TolaUser.objects.create(user=self.user,
organization=organization)
wflvl1 = WorkflowLevel1.objects.create(name='WorkflowLevel1',
organization=organization)
WorkflowTeam.objects.create(workflow_user=tola_user,
workflowlevel1=wflvl1)
Contact.objects.create(name='Contact_0', country=self.country,
organization=organization,
workflowlevel1=wflvl1)
self.request.user = self.user
view = ContactViewSet.as_view({'get': 'list'})
response = view(self.request)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
|
|
f609654c78fb547d8de7545350f59562499d9397
|
ycml/scripts/partition_instances.py
|
ycml/scripts/partition_instances.py
|
"""
This script is different from `ycml.scripts.partition_lines` because it takes labels into account and produces stratified partitions.
"""
from argparse import ArgumentParser
import json
import logging
from sklearn.model_selection import train_test_split
from ycml.utils import load_instances
from ycml.utils import URIFileType
logger = logging.getLogger(__name__)
def main():
parser = ArgumentParser(description='Script to partition instances in a stratified manner.')
parser.add_argument('-i', '--instances', type=URIFileType('r'), nargs='+', metavar='<instances>', help='List of instance files to partition.')
parser.add_argument('-l', '--label-key', type=str, metavar='<key>', default='labels', help='The key name for the label.')
parser.add_argument('-s', '--train-size', type=float, required=True, default=0.7, metavar='<N>', help='Proportions of instances to use for training set.')
parser.add_argument('-o', '--output', type=URIFileType('w'), nargs=2, required=True, metavar='<output>', help='Save partitioned instances here.')
A = parser.parse_args()
logging.basicConfig(format='%(asctime)-15s [%(name)s-%(process)d] %(levelname)s: %(message)s', level=logging.INFO)
X, Y_labels = load_instances(A.instances, labels_field=A.label_key)
X_train, X_test = train_test_split(X, train_size=A.train_size, stratify=Y_labels)
for o in X_train:
A.output[0].write(json.dumps(o))
A.output[0].write('\n')
#end for
logger.info('{} training instances written to <{}>.'.format(A.output[0].name))
for o in X_test:
A.output[1].write(json.dumps(o))
A.output[1].write('\n')
#end for
logger.info('{} evaluation instances written to <{}>.'.format(A.output[1].name))
#end def
if __name__ == '__main__': main()
|
Add script for stratified partitioning instances
|
Add script for stratified partitioning instances
|
Python
|
apache-2.0
|
skylander86/ycml
|
Add script for stratified partitioning instances
|
"""
This script is different from `ycml.scripts.partition_lines` because it takes labels into account and produces stratified partitions.
"""
from argparse import ArgumentParser
import json
import logging
from sklearn.model_selection import train_test_split
from ycml.utils import load_instances
from ycml.utils import URIFileType
logger = logging.getLogger(__name__)
def main():
parser = ArgumentParser(description='Script to partition instances in a stratified manner.')
parser.add_argument('-i', '--instances', type=URIFileType('r'), nargs='+', metavar='<instances>', help='List of instance files to partition.')
parser.add_argument('-l', '--label-key', type=str, metavar='<key>', default='labels', help='The key name for the label.')
parser.add_argument('-s', '--train-size', type=float, required=True, default=0.7, metavar='<N>', help='Proportions of instances to use for training set.')
parser.add_argument('-o', '--output', type=URIFileType('w'), nargs=2, required=True, metavar='<output>', help='Save partitioned instances here.')
A = parser.parse_args()
logging.basicConfig(format='%(asctime)-15s [%(name)s-%(process)d] %(levelname)s: %(message)s', level=logging.INFO)
X, Y_labels = load_instances(A.instances, labels_field=A.label_key)
X_train, X_test = train_test_split(X, train_size=A.train_size, stratify=Y_labels)
for o in X_train:
A.output[0].write(json.dumps(o))
A.output[0].write('\n')
#end for
logger.info('{} training instances written to <{}>.'.format(A.output[0].name))
for o in X_test:
A.output[1].write(json.dumps(o))
A.output[1].write('\n')
#end for
logger.info('{} evaluation instances written to <{}>.'.format(A.output[1].name))
#end def
if __name__ == '__main__': main()
|
<commit_before><commit_msg>Add script for stratified partitioning instances<commit_after>
|
"""
This script is different from `ycml.scripts.partition_lines` because it takes labels into account and produces stratified partitions.
"""
from argparse import ArgumentParser
import json
import logging
from sklearn.model_selection import train_test_split
from ycml.utils import load_instances
from ycml.utils import URIFileType
logger = logging.getLogger(__name__)
def main():
parser = ArgumentParser(description='Script to partition instances in a stratified manner.')
parser.add_argument('-i', '--instances', type=URIFileType('r'), nargs='+', metavar='<instances>', help='List of instance files to partition.')
parser.add_argument('-l', '--label-key', type=str, metavar='<key>', default='labels', help='The key name for the label.')
parser.add_argument('-s', '--train-size', type=float, required=True, default=0.7, metavar='<N>', help='Proportions of instances to use for training set.')
parser.add_argument('-o', '--output', type=URIFileType('w'), nargs=2, required=True, metavar='<output>', help='Save partitioned instances here.')
A = parser.parse_args()
logging.basicConfig(format='%(asctime)-15s [%(name)s-%(process)d] %(levelname)s: %(message)s', level=logging.INFO)
X, Y_labels = load_instances(A.instances, labels_field=A.label_key)
X_train, X_test = train_test_split(X, train_size=A.train_size, stratify=Y_labels)
for o in X_train:
A.output[0].write(json.dumps(o))
A.output[0].write('\n')
#end for
logger.info('{} training instances written to <{}>.'.format(A.output[0].name))
for o in X_test:
A.output[1].write(json.dumps(o))
A.output[1].write('\n')
#end for
logger.info('{} evaluation instances written to <{}>.'.format(A.output[1].name))
#end def
if __name__ == '__main__': main()
|
Add script for stratified partitioning instances"""
This script is different from `ycml.scripts.partition_lines` because it takes labels into account and produces stratified partitions.
"""
from argparse import ArgumentParser
import json
import logging
from sklearn.model_selection import train_test_split
from ycml.utils import load_instances
from ycml.utils import URIFileType
logger = logging.getLogger(__name__)
def main():
parser = ArgumentParser(description='Script to partition instances in a stratified manner.')
parser.add_argument('-i', '--instances', type=URIFileType('r'), nargs='+', metavar='<instances>', help='List of instance files to partition.')
parser.add_argument('-l', '--label-key', type=str, metavar='<key>', default='labels', help='The key name for the label.')
parser.add_argument('-s', '--train-size', type=float, required=True, default=0.7, metavar='<N>', help='Proportions of instances to use for training set.')
parser.add_argument('-o', '--output', type=URIFileType('w'), nargs=2, required=True, metavar='<output>', help='Save partitioned instances here.')
A = parser.parse_args()
logging.basicConfig(format='%(asctime)-15s [%(name)s-%(process)d] %(levelname)s: %(message)s', level=logging.INFO)
X, Y_labels = load_instances(A.instances, labels_field=A.label_key)
X_train, X_test = train_test_split(X, train_size=A.train_size, stratify=Y_labels)
for o in X_train:
A.output[0].write(json.dumps(o))
A.output[0].write('\n')
#end for
logger.info('{} training instances written to <{}>.'.format(A.output[0].name))
for o in X_test:
A.output[1].write(json.dumps(o))
A.output[1].write('\n')
#end for
logger.info('{} evaluation instances written to <{}>.'.format(A.output[1].name))
#end def
if __name__ == '__main__': main()
|
<commit_before><commit_msg>Add script for stratified partitioning instances<commit_after>"""
This script is different from `ycml.scripts.partition_lines` because it takes labels into account and produces stratified partitions.
"""
from argparse import ArgumentParser
import json
import logging
from sklearn.model_selection import train_test_split
from ycml.utils import load_instances
from ycml.utils import URIFileType
logger = logging.getLogger(__name__)
def main():
parser = ArgumentParser(description='Script to partition instances in a stratified manner.')
parser.add_argument('-i', '--instances', type=URIFileType('r'), nargs='+', metavar='<instances>', help='List of instance files to partition.')
parser.add_argument('-l', '--label-key', type=str, metavar='<key>', default='labels', help='The key name for the label.')
parser.add_argument('-s', '--train-size', type=float, required=True, default=0.7, metavar='<N>', help='Proportions of instances to use for training set.')
parser.add_argument('-o', '--output', type=URIFileType('w'), nargs=2, required=True, metavar='<output>', help='Save partitioned instances here.')
A = parser.parse_args()
logging.basicConfig(format='%(asctime)-15s [%(name)s-%(process)d] %(levelname)s: %(message)s', level=logging.INFO)
X, Y_labels = load_instances(A.instances, labels_field=A.label_key)
X_train, X_test = train_test_split(X, train_size=A.train_size, stratify=Y_labels)
for o in X_train:
A.output[0].write(json.dumps(o))
A.output[0].write('\n')
#end for
logger.info('{} training instances written to <{}>.'.format(A.output[0].name))
for o in X_test:
A.output[1].write(json.dumps(o))
A.output[1].write('\n')
#end for
logger.info('{} evaluation instances written to <{}>.'.format(A.output[1].name))
#end def
if __name__ == '__main__': main()
|
|
76a4f530d433dac0bc409963384891abaa45bbbf
|
python/robotics/sensors/mcp3008_spi_reader.py
|
python/robotics/sensors/mcp3008_spi_reader.py
|
import spidev
class MCP3008SpiReader(object):
def __init__(self, device_id):
self.device = spidev.SpiDev()
self.device.open(0, device_id)
self.device.max_speed_hz = 1000000
def read(self, adc_id):
raw_data = self.device.xfer2([1, 8 + adc_id << 4, 0])
adc_out = ((raw_data[1] & 3) << 8) + raw_data[2]
return adc_out
|
Add MCP3008 spi interface implementation
|
Add MCP3008 spi interface implementation
|
Python
|
mit
|
asydorchuk/robotics,asydorchuk/robotics
|
Add MCP3008 spi interface implementation
|
import spidev
class MCP3008SpiReader(object):
def __init__(self, device_id):
self.device = spidev.SpiDev()
self.device.open(0, device_id)
self.device.max_speed_hz = 1000000
def read(self, adc_id):
raw_data = self.device.xfer2([1, 8 + adc_id << 4, 0])
adc_out = ((raw_data[1] & 3) << 8) + raw_data[2]
return adc_out
|
<commit_before><commit_msg>Add MCP3008 spi interface implementation<commit_after>
|
import spidev
class MCP3008SpiReader(object):
def __init__(self, device_id):
self.device = spidev.SpiDev()
self.device.open(0, device_id)
self.device.max_speed_hz = 1000000
def read(self, adc_id):
raw_data = self.device.xfer2([1, 8 + adc_id << 4, 0])
adc_out = ((raw_data[1] & 3) << 8) + raw_data[2]
return adc_out
|
Add MCP3008 spi interface implementationimport spidev
class MCP3008SpiReader(object):
def __init__(self, device_id):
self.device = spidev.SpiDev()
self.device.open(0, device_id)
self.device.max_speed_hz = 1000000
def read(self, adc_id):
raw_data = self.device.xfer2([1, 8 + adc_id << 4, 0])
adc_out = ((raw_data[1] & 3) << 8) + raw_data[2]
return adc_out
|
<commit_before><commit_msg>Add MCP3008 spi interface implementation<commit_after>import spidev
class MCP3008SpiReader(object):
def __init__(self, device_id):
self.device = spidev.SpiDev()
self.device.open(0, device_id)
self.device.max_speed_hz = 1000000
def read(self, adc_id):
raw_data = self.device.xfer2([1, 8 + adc_id << 4, 0])
adc_out = ((raw_data[1] & 3) << 8) + raw_data[2]
return adc_out
|
|
e1a799a5379a10336b73891b2f59bd9fe18f7a91
|
test/field/test_period.py
|
test/field/test_period.py
|
# encoding: utf-8
from __future__ import unicode_literals
from datetime import timedelta
from common import FieldExam
from marrow.mongo.field import Period
class TestHourPeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'hours': 1}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(hours=1)
class TestMinutePeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'minutes': 10}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(minutes=10)
class TestSecondPeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'seconds': 15}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(seconds=15)
|
Add Period delta extraction test.
|
Add Period delta extraction test.
|
Python
|
mit
|
marrow/mongo
|
Add Period delta extraction test.
|
# encoding: utf-8
from __future__ import unicode_literals
from datetime import timedelta
from common import FieldExam
from marrow.mongo.field import Period
class TestHourPeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'hours': 1}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(hours=1)
class TestMinutePeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'minutes': 10}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(minutes=10)
class TestSecondPeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'seconds': 15}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(seconds=15)
|
<commit_before><commit_msg>Add Period delta extraction test.<commit_after>
|
# encoding: utf-8
from __future__ import unicode_literals
from datetime import timedelta
from common import FieldExam
from marrow.mongo.field import Period
class TestHourPeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'hours': 1}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(hours=1)
class TestMinutePeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'minutes': 10}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(minutes=10)
class TestSecondPeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'seconds': 15}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(seconds=15)
|
Add Period delta extraction test.# encoding: utf-8
from __future__ import unicode_literals
from datetime import timedelta
from common import FieldExam
from marrow.mongo.field import Period
class TestHourPeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'hours': 1}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(hours=1)
class TestMinutePeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'minutes': 10}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(minutes=10)
class TestSecondPeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'seconds': 15}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(seconds=15)
|
<commit_before><commit_msg>Add Period delta extraction test.<commit_after># encoding: utf-8
from __future__ import unicode_literals
from datetime import timedelta
from common import FieldExam
from marrow.mongo.field import Period
class TestHourPeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'hours': 1}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(hours=1)
class TestMinutePeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'minutes': 10}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(minutes=10)
class TestSecondPeriodField(FieldExam):
__field__ = Period
__kwargs__ = {'seconds': 15}
def test_delta(self, Sample):
assert Sample.field.delta == timedelta(seconds=15)
|
|
2bdadadbfc50aa1a99752705f96358a1076e1951
|
openstack/tests/functional/telemetry/v2/test_resource.py
|
openstack/tests/functional/telemetry/v2/test_resource.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.functional import base
class TestResource(base.BaseFunctionalTest):
def test_list(self):
ids = [o.resource_id for o in self.conn.telemetry.resources()]
self.assertNotEqual(0, len(ids))
|
Add functional tests for telementry resource
|
Add functional tests for telementry resource
Change-Id: I8192452971a0f04fbd6a040c3c048f9284d58bb3
|
Python
|
apache-2.0
|
stackforge/python-openstacksdk,mtougeron/python-openstacksdk,briancurtin/python-openstacksdk,openstack/python-openstacksdk,briancurtin/python-openstacksdk,mtougeron/python-openstacksdk,openstack/python-openstacksdk,dudymas/python-openstacksdk,dudymas/python-openstacksdk,stackforge/python-openstacksdk,dtroyer/python-openstacksdk,dtroyer/python-openstacksdk
|
Add functional tests for telementry resource
Change-Id: I8192452971a0f04fbd6a040c3c048f9284d58bb3
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.functional import base
class TestResource(base.BaseFunctionalTest):
def test_list(self):
ids = [o.resource_id for o in self.conn.telemetry.resources()]
self.assertNotEqual(0, len(ids))
|
<commit_before><commit_msg>Add functional tests for telementry resource
Change-Id: I8192452971a0f04fbd6a040c3c048f9284d58bb3<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.functional import base
class TestResource(base.BaseFunctionalTest):
def test_list(self):
ids = [o.resource_id for o in self.conn.telemetry.resources()]
self.assertNotEqual(0, len(ids))
|
Add functional tests for telementry resource
Change-Id: I8192452971a0f04fbd6a040c3c048f9284d58bb3# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.functional import base
class TestResource(base.BaseFunctionalTest):
def test_list(self):
ids = [o.resource_id for o in self.conn.telemetry.resources()]
self.assertNotEqual(0, len(ids))
|
<commit_before><commit_msg>Add functional tests for telementry resource
Change-Id: I8192452971a0f04fbd6a040c3c048f9284d58bb3<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.functional import base
class TestResource(base.BaseFunctionalTest):
def test_list(self):
ids = [o.resource_id for o in self.conn.telemetry.resources()]
self.assertNotEqual(0, len(ids))
|
|
d621f68444ac9a8fb3bdffb86065297af0cb20ca
|
examples/plots/US_Counties.py
|
examples/plots/US_Counties.py
|
# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
US Counties
===========
Demonstrate how to plot US counties at all three available resolutions.
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from metpy.plots import USCOUNTIES
###########################################
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
axis.add_feature(USCOUNTIES.with_scale(scale), edgecolor='black')
|
Add example of using counties with different resolutions.
|
Add example of using counties with different resolutions.
|
Python
|
bsd-3-clause
|
jrleeman/MetPy,dopplershift/MetPy,dopplershift/MetPy,ahaberlie/MetPy,ahaberlie/MetPy,jrleeman/MetPy,Unidata/MetPy,Unidata/MetPy,ShawnMurd/MetPy
|
Add example of using counties with different resolutions.
|
# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
US Counties
===========
Demonstrate how to plot US counties at all three available resolutions.
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from metpy.plots import USCOUNTIES
###########################################
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
axis.add_feature(USCOUNTIES.with_scale(scale), edgecolor='black')
|
<commit_before><commit_msg>Add example of using counties with different resolutions.<commit_after>
|
# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
US Counties
===========
Demonstrate how to plot US counties at all three available resolutions.
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from metpy.plots import USCOUNTIES
###########################################
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
axis.add_feature(USCOUNTIES.with_scale(scale), edgecolor='black')
|
Add example of using counties with different resolutions.# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
US Counties
===========
Demonstrate how to plot US counties at all three available resolutions.
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from metpy.plots import USCOUNTIES
###########################################
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
axis.add_feature(USCOUNTIES.with_scale(scale), edgecolor='black')
|
<commit_before><commit_msg>Add example of using counties with different resolutions.<commit_after># Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
US Counties
===========
Demonstrate how to plot US counties at all three available resolutions.
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from metpy.plots import USCOUNTIES
###########################################
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
axis.add_feature(USCOUNTIES.with_scale(scale), edgecolor='black')
|
|
4cf369610a00b9c04727bfa61e83ae2961adc480
|
scripts/missing-layer-was-really-a-duplicate.py
|
scripts/missing-layer-was-really-a-duplicate.py
|
#!/usr/bin/python
# One missing layer was really a duplicate - so:
#
# - Remove that missing layer image directory
# - Rename all the higher directories down one
#
# You also need to remove references to that layer from the
# broken_slice table, and decrease all (location.location).z values
# that are greater than or equal to layer_to_remove_z, and change the
# dimensions of the stack in the stack table.
#
# DELETE FROM broken_slice WHERE index = 189 AND stack_id IN (4, 9);
# UPDATE location SET location.z = ((location.location).z - 50) WHERE (location.location).z >= 9450.0 AND project_id = 4;
# UPDATE stack SET dimension.z = ((stack.dimension).z - 1) WHERE id IN (4, 9);
#
# Coincidentally, Albert pointed out that the z calibration was set
# wrongly, so I subsequently used these commands to correct them:
#
# UPDATE stack SET resolution.z = ((stack.resolution).z * 0.9) WHERE id IN (4, 9);
# UPDATE location SET location.z = ((location.location).z * 0.9) WHERE project_id = 4;
import glob, os, re, sys, subprocess
layer_to_remove_z = 9450.0
layer_to_remove = int(round(layer_to_remove_z/50.0))
directories = filter(lambda x: re.match('\d+$', x), os.listdir('.'))
directories = [int(x, 10) for x in directories]
print directories
directories = [x for x in directories if x > layer_to_remove]
directories.sort()
directory_mapping = zip(directories, (x - 1 for x in directories))
subprocess.check_call(["rmdir", str(layer_to_remove)])
for t in directory_mapping:
print "Will rename", t[0], "to", t[1]
subprocess.check_call(["mv", str(t[0]), str(t[1])])
|
Add a script to rename directories if a missing layer was really a removed duplicate
|
Add a script to rename directories if a missing layer was really a removed duplicate
This is quite specific to a situation we encountered, where
a layer that I regarded as missing was really a duplicated
layer that was removed without adjusting the z coordinates.
This script contains in comments at the top the corresponding
SQL that should be used if you have to rename the layers in
this way.
|
Python
|
agpl-3.0
|
fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID,htem/CATMAID
|
Add a script to rename directories if a missing layer was really a removed duplicate
This is quite specific to a situation we encountered, where
a layer that I regarded as missing was really a duplicated
layer that was removed without adjusting the z coordinates.
This script contains in comments at the top the corresponding
SQL that should be used if you have to rename the layers in
this way.
|
#!/usr/bin/python
# One missing layer was really a duplicate - so:
#
# - Remove that missing layer image directory
# - Rename all the higher directories down one
#
# You also need to remove references to that layer from the
# broken_slice table, and decrease all (location.location).z values
# that are greater than or equal to layer_to_remove_z, and change the
# dimensions of the stack in the stack table.
#
# DELETE FROM broken_slice WHERE index = 189 AND stack_id IN (4, 9);
# UPDATE location SET location.z = ((location.location).z - 50) WHERE (location.location).z >= 9450.0 AND project_id = 4;
# UPDATE stack SET dimension.z = ((stack.dimension).z - 1) WHERE id IN (4, 9);
#
# Coincidentally, Albert pointed out that the z calibration was set
# wrongly, so I subsequently used these commands to correct them:
#
# UPDATE stack SET resolution.z = ((stack.resolution).z * 0.9) WHERE id IN (4, 9);
# UPDATE location SET location.z = ((location.location).z * 0.9) WHERE project_id = 4;
import glob, os, re, sys, subprocess
layer_to_remove_z = 9450.0
layer_to_remove = int(round(layer_to_remove_z/50.0))
directories = filter(lambda x: re.match('\d+$', x), os.listdir('.'))
directories = [int(x, 10) for x in directories]
print directories
directories = [x for x in directories if x > layer_to_remove]
directories.sort()
directory_mapping = zip(directories, (x - 1 for x in directories))
subprocess.check_call(["rmdir", str(layer_to_remove)])
for t in directory_mapping:
print "Will rename", t[0], "to", t[1]
subprocess.check_call(["mv", str(t[0]), str(t[1])])
|
<commit_before><commit_msg>Add a script to rename directories if a missing layer was really a removed duplicate
This is quite specific to a situation we encountered, where
a layer that I regarded as missing was really a duplicated
layer that was removed without adjusting the z coordinates.
This script contains in comments at the top the corresponding
SQL that should be used if you have to rename the layers in
this way.<commit_after>
|
#!/usr/bin/python
# One missing layer was really a duplicate - so:
#
# - Remove that missing layer image directory
# - Rename all the higher directories down one
#
# You also need to remove references to that layer from the
# broken_slice table, and decrease all (location.location).z values
# that are greater than or equal to layer_to_remove_z, and change the
# dimensions of the stack in the stack table.
#
# DELETE FROM broken_slice WHERE index = 189 AND stack_id IN (4, 9);
# UPDATE location SET location.z = ((location.location).z - 50) WHERE (location.location).z >= 9450.0 AND project_id = 4;
# UPDATE stack SET dimension.z = ((stack.dimension).z - 1) WHERE id IN (4, 9);
#
# Coincidentally, Albert pointed out that the z calibration was set
# wrongly, so I subsequently used these commands to correct them:
#
# UPDATE stack SET resolution.z = ((stack.resolution).z * 0.9) WHERE id IN (4, 9);
# UPDATE location SET location.z = ((location.location).z * 0.9) WHERE project_id = 4;
import glob, os, re, sys, subprocess
layer_to_remove_z = 9450.0
layer_to_remove = int(round(layer_to_remove_z/50.0))
directories = filter(lambda x: re.match('\d+$', x), os.listdir('.'))
directories = [int(x, 10) for x in directories]
print directories
directories = [x for x in directories if x > layer_to_remove]
directories.sort()
directory_mapping = zip(directories, (x - 1 for x in directories))
subprocess.check_call(["rmdir", str(layer_to_remove)])
for t in directory_mapping:
print "Will rename", t[0], "to", t[1]
subprocess.check_call(["mv", str(t[0]), str(t[1])])
|
Add a script to rename directories if a missing layer was really a removed duplicate
This is quite specific to a situation we encountered, where
a layer that I regarded as missing was really a duplicated
layer that was removed without adjusting the z coordinates.
This script contains in comments at the top the corresponding
SQL that should be used if you have to rename the layers in
this way.#!/usr/bin/python
# One missing layer was really a duplicate - so:
#
# - Remove that missing layer image directory
# - Rename all the higher directories down one
#
# You also need to remove references to that layer from the
# broken_slice table, and decrease all (location.location).z values
# that are greater than or equal to layer_to_remove_z, and change the
# dimensions of the stack in the stack table.
#
# DELETE FROM broken_slice WHERE index = 189 AND stack_id IN (4, 9);
# UPDATE location SET location.z = ((location.location).z - 50) WHERE (location.location).z >= 9450.0 AND project_id = 4;
# UPDATE stack SET dimension.z = ((stack.dimension).z - 1) WHERE id IN (4, 9);
#
# Coincidentally, Albert pointed out that the z calibration was set
# wrongly, so I subsequently used these commands to correct them:
#
# UPDATE stack SET resolution.z = ((stack.resolution).z * 0.9) WHERE id IN (4, 9);
# UPDATE location SET location.z = ((location.location).z * 0.9) WHERE project_id = 4;
import glob, os, re, sys, subprocess
layer_to_remove_z = 9450.0
layer_to_remove = int(round(layer_to_remove_z/50.0))
directories = filter(lambda x: re.match('\d+$', x), os.listdir('.'))
directories = [int(x, 10) for x in directories]
print directories
directories = [x for x in directories if x > layer_to_remove]
directories.sort()
directory_mapping = zip(directories, (x - 1 for x in directories))
subprocess.check_call(["rmdir", str(layer_to_remove)])
for t in directory_mapping:
print "Will rename", t[0], "to", t[1]
subprocess.check_call(["mv", str(t[0]), str(t[1])])
|
<commit_before><commit_msg>Add a script to rename directories if a missing layer was really a removed duplicate
This is quite specific to a situation we encountered, where
a layer that I regarded as missing was really a duplicated
layer that was removed without adjusting the z coordinates.
This script contains in comments at the top the corresponding
SQL that should be used if you have to rename the layers in
this way.<commit_after>#!/usr/bin/python
# One missing layer was really a duplicate - so:
#
# - Remove that missing layer image directory
# - Rename all the higher directories down one
#
# You also need to remove references to that layer from the
# broken_slice table, and decrease all (location.location).z values
# that are greater than or equal to layer_to_remove_z, and change the
# dimensions of the stack in the stack table.
#
# DELETE FROM broken_slice WHERE index = 189 AND stack_id IN (4, 9);
# UPDATE location SET location.z = ((location.location).z - 50) WHERE (location.location).z >= 9450.0 AND project_id = 4;
# UPDATE stack SET dimension.z = ((stack.dimension).z - 1) WHERE id IN (4, 9);
#
# Coincidentally, Albert pointed out that the z calibration was set
# wrongly, so I subsequently used these commands to correct them:
#
# UPDATE stack SET resolution.z = ((stack.resolution).z * 0.9) WHERE id IN (4, 9);
# UPDATE location SET location.z = ((location.location).z * 0.9) WHERE project_id = 4;
import glob, os, re, sys, subprocess
layer_to_remove_z = 9450.0
layer_to_remove = int(round(layer_to_remove_z/50.0))
directories = filter(lambda x: re.match('\d+$', x), os.listdir('.'))
directories = [int(x, 10) for x in directories]
print directories
directories = [x for x in directories if x > layer_to_remove]
directories.sort()
directory_mapping = zip(directories, (x - 1 for x in directories))
subprocess.check_call(["rmdir", str(layer_to_remove)])
for t in directory_mapping:
print "Will rename", t[0], "to", t[1]
subprocess.check_call(["mv", str(t[0]), str(t[1])])
|
|
e52a5864a651605a73a30f5a34708a7faa04343d
|
pivoteer/migrations/0003_migrate_pivoteer_indicator.py
|
pivoteer/migrations/0003_migrate_pivoteer_indicator.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Q
import datetime
def populate_pivoteer_indicator(apps, schema_editor):
print("entering populate_pivoteer_indicator")
IndicatorRecord = apps.get_model("pivoteer","IndicatorRecord")
TaskTracker = apps.get_model("pivoteer","TaskTracker")
time_frame = datetime.datetime.utcnow() + datetime.timedelta(days=-365)
taskcount = TaskTracker.objects.filter(type="Recent", date__gt=time_frame).count()
print("Tasks count: ", taskcount)
for task in TaskTracker.objects.filter(type="Recent", date__gt=time_frame):
indicator = task.keyword
print("processing indicator " + indicator)
IndicatorRecord.objects.filter(Q(record_type="HR"),
Q(indicator__isnull=True),
Q(info__contains=indicator)).update(indicator=indicator)
print("Updated indicator record for indicator", indicator)
print("Migration completed for Indicator Records")
class Migration(migrations.Migration):
dependencies = [
('pivoteer', '0002_addfield_indicator'),
]
operations = [
migrations.RunPython(populate_pivoteer_indicator),
]
|
Add migration script to update indicators
|
Add migration script to update indicators
|
Python
|
mit
|
gdit-cnd/RAPID,gdit-cnd/RAPID,LindaTNguyen/RAPID,gdit-cnd/RAPID,gdit-cnd/RAPID,LindaTNguyen/RAPID,LindaTNguyen/RAPID,LindaTNguyen/RAPID,LindaTNguyen/RAPID,gdit-cnd/RAPID
|
Add migration script to update indicators
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Q
import datetime
def populate_pivoteer_indicator(apps, schema_editor):
print("entering populate_pivoteer_indicator")
IndicatorRecord = apps.get_model("pivoteer","IndicatorRecord")
TaskTracker = apps.get_model("pivoteer","TaskTracker")
time_frame = datetime.datetime.utcnow() + datetime.timedelta(days=-365)
taskcount = TaskTracker.objects.filter(type="Recent", date__gt=time_frame).count()
print("Tasks count: ", taskcount)
for task in TaskTracker.objects.filter(type="Recent", date__gt=time_frame):
indicator = task.keyword
print("processing indicator " + indicator)
IndicatorRecord.objects.filter(Q(record_type="HR"),
Q(indicator__isnull=True),
Q(info__contains=indicator)).update(indicator=indicator)
print("Updated indicator record for indicator", indicator)
print("Migration completed for Indicator Records")
class Migration(migrations.Migration):
dependencies = [
('pivoteer', '0002_addfield_indicator'),
]
operations = [
migrations.RunPython(populate_pivoteer_indicator),
]
|
<commit_before><commit_msg>Add migration script to update indicators<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Q
import datetime
def populate_pivoteer_indicator(apps, schema_editor):
print("entering populate_pivoteer_indicator")
IndicatorRecord = apps.get_model("pivoteer","IndicatorRecord")
TaskTracker = apps.get_model("pivoteer","TaskTracker")
time_frame = datetime.datetime.utcnow() + datetime.timedelta(days=-365)
taskcount = TaskTracker.objects.filter(type="Recent", date__gt=time_frame).count()
print("Tasks count: ", taskcount)
for task in TaskTracker.objects.filter(type="Recent", date__gt=time_frame):
indicator = task.keyword
print("processing indicator " + indicator)
IndicatorRecord.objects.filter(Q(record_type="HR"),
Q(indicator__isnull=True),
Q(info__contains=indicator)).update(indicator=indicator)
print("Updated indicator record for indicator", indicator)
print("Migration completed for Indicator Records")
class Migration(migrations.Migration):
dependencies = [
('pivoteer', '0002_addfield_indicator'),
]
operations = [
migrations.RunPython(populate_pivoteer_indicator),
]
|
Add migration script to update indicators# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Q
import datetime
def populate_pivoteer_indicator(apps, schema_editor):
print("entering populate_pivoteer_indicator")
IndicatorRecord = apps.get_model("pivoteer","IndicatorRecord")
TaskTracker = apps.get_model("pivoteer","TaskTracker")
time_frame = datetime.datetime.utcnow() + datetime.timedelta(days=-365)
taskcount = TaskTracker.objects.filter(type="Recent", date__gt=time_frame).count()
print("Tasks count: ", taskcount)
for task in TaskTracker.objects.filter(type="Recent", date__gt=time_frame):
indicator = task.keyword
print("processing indicator " + indicator)
IndicatorRecord.objects.filter(Q(record_type="HR"),
Q(indicator__isnull=True),
Q(info__contains=indicator)).update(indicator=indicator)
print("Updated indicator record for indicator", indicator)
print("Migration completed for Indicator Records")
class Migration(migrations.Migration):
dependencies = [
('pivoteer', '0002_addfield_indicator'),
]
operations = [
migrations.RunPython(populate_pivoteer_indicator),
]
|
<commit_before><commit_msg>Add migration script to update indicators<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Q
import datetime
def populate_pivoteer_indicator(apps, schema_editor):
print("entering populate_pivoteer_indicator")
IndicatorRecord = apps.get_model("pivoteer","IndicatorRecord")
TaskTracker = apps.get_model("pivoteer","TaskTracker")
time_frame = datetime.datetime.utcnow() + datetime.timedelta(days=-365)
taskcount = TaskTracker.objects.filter(type="Recent", date__gt=time_frame).count()
print("Tasks count: ", taskcount)
for task in TaskTracker.objects.filter(type="Recent", date__gt=time_frame):
indicator = task.keyword
print("processing indicator " + indicator)
IndicatorRecord.objects.filter(Q(record_type="HR"),
Q(indicator__isnull=True),
Q(info__contains=indicator)).update(indicator=indicator)
print("Updated indicator record for indicator", indicator)
print("Migration completed for Indicator Records")
class Migration(migrations.Migration):
dependencies = [
('pivoteer', '0002_addfield_indicator'),
]
operations = [
migrations.RunPython(populate_pivoteer_indicator),
]
|
|
215f71b95911a579df919e9b81ae75feac208259
|
spraakbanken/s5/spr_local/make_count_files.py
|
spraakbanken/s5/spr_local/make_count_files.py
|
#!/usr/bin/env python3
import argparse
import collections
import os
from fractions import Fraction
def main(countfile, origcount, order, outdir):
vocab = {"<s>": 0, "</s>": 1}
counters = [None]
for _ in range(order+1):
counters.append(collections.Counter())
def map_vocab(line):
for w in set(line):
if w not in vocab:
vocab[w] = len(vocab)
return [vocab[w] for w in line]
for line in open(countfile, encoding='utf-8'):
parts = line.split()
words = parts[:-1]
count = int(parts[-1])
iwords = map_vocab(words)
always_one = False
if iwords[0] == 0 and iwords[-1] == 1:
always_one = True
for to in range(1, order+1):
for s in range(0, len(iwords)-to+1):
tt = tuple(iwords[s:s+to])
parts = 1
if iwords[0] == 0 and s+to < origcount:
parts += origcount - (s+to)
if iwords[-1] == 1 and s > 0:
parts += s
if 0 in tt or 1 in tt or always_one:
counters[to][tt] += count
else:
counters[to][tt] += parts * Fraction(count,(origcount-to+1))
rev_vocab = [k for k,v in sorted(vocab.items(), key=lambda x: x[1])]
for i in range(1, order+1):
with open(os.path.join(outdir, '{}count'.format(i)), 'w', encoding='utf-8') as of:
for k, c in counters[i].most_common():
print("{} {}".format(" ".join(rev_vocab[j] for j in k), int(c)), file=of)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Make count files')
parser.add_argument('countfile')
parser.add_argument('origcount', type=int)
parser.add_argument('order', type=int)
parser.add_argument('outdir')
args = parser.parse_args()
main(args.countfile, args.origcount, args.order, args.outdir)
|
Make count files which works correctly for words, but not yet for morphed count files
|
Make count files which works correctly for words, but not yet for morphed count files
|
Python
|
apache-2.0
|
psmit/kaldi-recipes,phsmit/kaldi-recipes,psmit/kaldi-recipes,phsmit/kaldi-recipes,psmit/kaldi-recipes
|
Make count files which works correctly for words, but not yet for morphed count files
|
#!/usr/bin/env python3
import argparse
import collections
import os
from fractions import Fraction
def main(countfile, origcount, order, outdir):
vocab = {"<s>": 0, "</s>": 1}
counters = [None]
for _ in range(order+1):
counters.append(collections.Counter())
def map_vocab(line):
for w in set(line):
if w not in vocab:
vocab[w] = len(vocab)
return [vocab[w] for w in line]
for line in open(countfile, encoding='utf-8'):
parts = line.split()
words = parts[:-1]
count = int(parts[-1])
iwords = map_vocab(words)
always_one = False
if iwords[0] == 0 and iwords[-1] == 1:
always_one = True
for to in range(1, order+1):
for s in range(0, len(iwords)-to+1):
tt = tuple(iwords[s:s+to])
parts = 1
if iwords[0] == 0 and s+to < origcount:
parts += origcount - (s+to)
if iwords[-1] == 1 and s > 0:
parts += s
if 0 in tt or 1 in tt or always_one:
counters[to][tt] += count
else:
counters[to][tt] += parts * Fraction(count,(origcount-to+1))
rev_vocab = [k for k,v in sorted(vocab.items(), key=lambda x: x[1])]
for i in range(1, order+1):
with open(os.path.join(outdir, '{}count'.format(i)), 'w', encoding='utf-8') as of:
for k, c in counters[i].most_common():
print("{} {}".format(" ".join(rev_vocab[j] for j in k), int(c)), file=of)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Make count files')
parser.add_argument('countfile')
parser.add_argument('origcount', type=int)
parser.add_argument('order', type=int)
parser.add_argument('outdir')
args = parser.parse_args()
main(args.countfile, args.origcount, args.order, args.outdir)
|
<commit_before><commit_msg>Make count files which works correctly for words, but not yet for morphed count files<commit_after>
|
#!/usr/bin/env python3
import argparse
import collections
import os
from fractions import Fraction
def main(countfile, origcount, order, outdir):
vocab = {"<s>": 0, "</s>": 1}
counters = [None]
for _ in range(order+1):
counters.append(collections.Counter())
def map_vocab(line):
for w in set(line):
if w not in vocab:
vocab[w] = len(vocab)
return [vocab[w] for w in line]
for line in open(countfile, encoding='utf-8'):
parts = line.split()
words = parts[:-1]
count = int(parts[-1])
iwords = map_vocab(words)
always_one = False
if iwords[0] == 0 and iwords[-1] == 1:
always_one = True
for to in range(1, order+1):
for s in range(0, len(iwords)-to+1):
tt = tuple(iwords[s:s+to])
parts = 1
if iwords[0] == 0 and s+to < origcount:
parts += origcount - (s+to)
if iwords[-1] == 1 and s > 0:
parts += s
if 0 in tt or 1 in tt or always_one:
counters[to][tt] += count
else:
counters[to][tt] += parts * Fraction(count,(origcount-to+1))
rev_vocab = [k for k,v in sorted(vocab.items(), key=lambda x: x[1])]
for i in range(1, order+1):
with open(os.path.join(outdir, '{}count'.format(i)), 'w', encoding='utf-8') as of:
for k, c in counters[i].most_common():
print("{} {}".format(" ".join(rev_vocab[j] for j in k), int(c)), file=of)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Make count files')
parser.add_argument('countfile')
parser.add_argument('origcount', type=int)
parser.add_argument('order', type=int)
parser.add_argument('outdir')
args = parser.parse_args()
main(args.countfile, args.origcount, args.order, args.outdir)
|
Make count files which works correctly for words, but not yet for morphed count files#!/usr/bin/env python3
import argparse
import collections
import os
from fractions import Fraction
def main(countfile, origcount, order, outdir):
vocab = {"<s>": 0, "</s>": 1}
counters = [None]
for _ in range(order+1):
counters.append(collections.Counter())
def map_vocab(line):
for w in set(line):
if w not in vocab:
vocab[w] = len(vocab)
return [vocab[w] for w in line]
for line in open(countfile, encoding='utf-8'):
parts = line.split()
words = parts[:-1]
count = int(parts[-1])
iwords = map_vocab(words)
always_one = False
if iwords[0] == 0 and iwords[-1] == 1:
always_one = True
for to in range(1, order+1):
for s in range(0, len(iwords)-to+1):
tt = tuple(iwords[s:s+to])
parts = 1
if iwords[0] == 0 and s+to < origcount:
parts += origcount - (s+to)
if iwords[-1] == 1 and s > 0:
parts += s
if 0 in tt or 1 in tt or always_one:
counters[to][tt] += count
else:
counters[to][tt] += parts * Fraction(count,(origcount-to+1))
rev_vocab = [k for k,v in sorted(vocab.items(), key=lambda x: x[1])]
for i in range(1, order+1):
with open(os.path.join(outdir, '{}count'.format(i)), 'w', encoding='utf-8') as of:
for k, c in counters[i].most_common():
print("{} {}".format(" ".join(rev_vocab[j] for j in k), int(c)), file=of)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Make count files')
parser.add_argument('countfile')
parser.add_argument('origcount', type=int)
parser.add_argument('order', type=int)
parser.add_argument('outdir')
args = parser.parse_args()
main(args.countfile, args.origcount, args.order, args.outdir)
|
<commit_before><commit_msg>Make count files which works correctly for words, but not yet for morphed count files<commit_after>#!/usr/bin/env python3
import argparse
import collections
import os
from fractions import Fraction
def main(countfile, origcount, order, outdir):
vocab = {"<s>": 0, "</s>": 1}
counters = [None]
for _ in range(order+1):
counters.append(collections.Counter())
def map_vocab(line):
for w in set(line):
if w not in vocab:
vocab[w] = len(vocab)
return [vocab[w] for w in line]
for line in open(countfile, encoding='utf-8'):
parts = line.split()
words = parts[:-1]
count = int(parts[-1])
iwords = map_vocab(words)
always_one = False
if iwords[0] == 0 and iwords[-1] == 1:
always_one = True
for to in range(1, order+1):
for s in range(0, len(iwords)-to+1):
tt = tuple(iwords[s:s+to])
parts = 1
if iwords[0] == 0 and s+to < origcount:
parts += origcount - (s+to)
if iwords[-1] == 1 and s > 0:
parts += s
if 0 in tt or 1 in tt or always_one:
counters[to][tt] += count
else:
counters[to][tt] += parts * Fraction(count,(origcount-to+1))
rev_vocab = [k for k,v in sorted(vocab.items(), key=lambda x: x[1])]
for i in range(1, order+1):
with open(os.path.join(outdir, '{}count'.format(i)), 'w', encoding='utf-8') as of:
for k, c in counters[i].most_common():
print("{} {}".format(" ".join(rev_vocab[j] for j in k), int(c)), file=of)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Make count files')
parser.add_argument('countfile')
parser.add_argument('origcount', type=int)
parser.add_argument('order', type=int)
parser.add_argument('outdir')
args = parser.parse_args()
main(args.countfile, args.origcount, args.order, args.outdir)
|
|
3f61c9a552720db462ecfdecc652caac469754b4
|
add-alias.py
|
add-alias.py
|
#!/usr/bin/python
import MySQLdb
# Ask for root password in mysql
passwd = raw_input("Enter password for user root in mysql: ")
# Open database connection
db = MySQLdb.connect("localhost","root", passwd,"servermail" )
# Input file to read from
filename = raw_input("Enter file to read for entries: ")
fh = open(filename)
# Enter name of mailing list
alias = raw_input("Enter name of alias: ")
# prepare a cursor object using cursor() method
cursor = db.cursor()
for line in fh:
line = line.strip()
# Read last id number
# cursor.execute("SELECT * FROM virtual_aliases")
# id = cursor.rowcount + 1
# Prepare SQL query to INSERT a record into the database.
sql = "INSERT INTO virtual_aliases (domain_id, source, destination) \
VALUES ('%d', '%s', '%s')" % \
(1, alias, line)
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except:
# Rollback in case there is any error
db.rollback()
# disconnect from server
db.close()
|
Add script for adding addresses from file to mailing list on server
|
Add script for adding addresses from file to mailing list on server
|
Python
|
mit
|
svenpruefer/scripts,svenpruefer/scripts
|
Add script for adding addresses from file to mailing list on server
|
#!/usr/bin/python
import MySQLdb
# Ask for root password in mysql
passwd = raw_input("Enter password for user root in mysql: ")
# Open database connection
db = MySQLdb.connect("localhost","root", passwd,"servermail" )
# Input file to read from
filename = raw_input("Enter file to read for entries: ")
fh = open(filename)
# Enter name of mailing list
alias = raw_input("Enter name of alias: ")
# prepare a cursor object using cursor() method
cursor = db.cursor()
for line in fh:
line = line.strip()
# Read last id number
# cursor.execute("SELECT * FROM virtual_aliases")
# id = cursor.rowcount + 1
# Prepare SQL query to INSERT a record into the database.
sql = "INSERT INTO virtual_aliases (domain_id, source, destination) \
VALUES ('%d', '%s', '%s')" % \
(1, alias, line)
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except:
# Rollback in case there is any error
db.rollback()
# disconnect from server
db.close()
|
<commit_before><commit_msg>Add script for adding addresses from file to mailing list on server<commit_after>
|
#!/usr/bin/python
import MySQLdb
# Ask for root password in mysql
passwd = raw_input("Enter password for user root in mysql: ")
# Open database connection
db = MySQLdb.connect("localhost","root", passwd,"servermail" )
# Input file to read from
filename = raw_input("Enter file to read for entries: ")
fh = open(filename)
# Enter name of mailing list
alias = raw_input("Enter name of alias: ")
# prepare a cursor object using cursor() method
cursor = db.cursor()
for line in fh:
line = line.strip()
# Read last id number
# cursor.execute("SELECT * FROM virtual_aliases")
# id = cursor.rowcount + 1
# Prepare SQL query to INSERT a record into the database.
sql = "INSERT INTO virtual_aliases (domain_id, source, destination) \
VALUES ('%d', '%s', '%s')" % \
(1, alias, line)
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except:
# Rollback in case there is any error
db.rollback()
# disconnect from server
db.close()
|
Add script for adding addresses from file to mailing list on server#!/usr/bin/python
import MySQLdb
# Ask for root password in mysql
passwd = raw_input("Enter password for user root in mysql: ")
# Open database connection
db = MySQLdb.connect("localhost","root", passwd,"servermail" )
# Input file to read from
filename = raw_input("Enter file to read for entries: ")
fh = open(filename)
# Enter name of mailing list
alias = raw_input("Enter name of alias: ")
# prepare a cursor object using cursor() method
cursor = db.cursor()
for line in fh:
line = line.strip()
# Read last id number
# cursor.execute("SELECT * FROM virtual_aliases")
# id = cursor.rowcount + 1
# Prepare SQL query to INSERT a record into the database.
sql = "INSERT INTO virtual_aliases (domain_id, source, destination) \
VALUES ('%d', '%s', '%s')" % \
(1, alias, line)
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except:
# Rollback in case there is any error
db.rollback()
# disconnect from server
db.close()
|
<commit_before><commit_msg>Add script for adding addresses from file to mailing list on server<commit_after>#!/usr/bin/python
import MySQLdb
# Ask for root password in mysql
passwd = raw_input("Enter password for user root in mysql: ")
# Open database connection
db = MySQLdb.connect("localhost","root", passwd,"servermail" )
# Input file to read from
filename = raw_input("Enter file to read for entries: ")
fh = open(filename)
# Enter name of mailing list
alias = raw_input("Enter name of alias: ")
# prepare a cursor object using cursor() method
cursor = db.cursor()
for line in fh:
line = line.strip()
# Read last id number
# cursor.execute("SELECT * FROM virtual_aliases")
# id = cursor.rowcount + 1
# Prepare SQL query to INSERT a record into the database.
sql = "INSERT INTO virtual_aliases (domain_id, source, destination) \
VALUES ('%d', '%s', '%s')" % \
(1, alias, line)
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
except:
# Rollback in case there is any error
db.rollback()
# disconnect from server
db.close()
|
|
545f3e613d759483e0dc87f58e915da903209861
|
distance_to_camera.py
|
distance_to_camera.py
|
# Import image library necessary for file
import SimpleCV
import sys
from SimpleCV import Image
def check_image(image_path):
#Find file by path and import. File currently resides in same directory.
image = Image(image_path)
# grey = image.grayscale()
instruction = "go"
array_bounds_possible_widths = [image.width / 4, image.width / 4 * 3]
shapes_pic_attributes = image.size()
shapes_pic_size = shapes_pic_attributes[0] * shapes_pic_attributes[1]
# dist = img.colorDistance(SimpleCV.Color.Black).dilate(2)
blobs = image.findBlobs()
#check if the blob is in the middle 1/2 of screen and is too high
for blob in blobs[:-1]:
# print blob.contains()
if (blob.coordinates()[0] > array_bounds_possible_widths[0] and blob.coordinates()[0] < array_bounds_possible_widths[1]) and (blob.height() > image.height / 5 and blob.coordinates()[1] > image.height / 5 * 2):
# print grey.height
print blob.coordinates()[1]
print "Blob is in the way!!"
blob.draw(color=(255,0,0))
instruction = "stop"
# Display the blob until you click on the left side of the picture.
display = SimpleCV.Display()
while display.isNotDone():
image.show()
if display.mouseLeft:
break
return instruction
print sys.argv[1]
instruction = check_image(sys.argv[1])
print instruction
# def check_width(image, blob):
# print check_width
# def check_height(image, blob):
# return (blob.height() > image.height / 1.5) and blob
|
Add distance to camera document
|
Add distance to camera document
|
Python
|
mit
|
jwarshaw/RaspberryDrive
|
Add distance to camera document
|
# Import image library necessary for file
import SimpleCV
import sys
from SimpleCV import Image
def check_image(image_path):
#Find file by path and import. File currently resides in same directory.
image = Image(image_path)
# grey = image.grayscale()
instruction = "go"
array_bounds_possible_widths = [image.width / 4, image.width / 4 * 3]
shapes_pic_attributes = image.size()
shapes_pic_size = shapes_pic_attributes[0] * shapes_pic_attributes[1]
# dist = img.colorDistance(SimpleCV.Color.Black).dilate(2)
blobs = image.findBlobs()
#check if the blob is in the middle 1/2 of screen and is too high
for blob in blobs[:-1]:
# print blob.contains()
if (blob.coordinates()[0] > array_bounds_possible_widths[0] and blob.coordinates()[0] < array_bounds_possible_widths[1]) and (blob.height() > image.height / 5 and blob.coordinates()[1] > image.height / 5 * 2):
# print grey.height
print blob.coordinates()[1]
print "Blob is in the way!!"
blob.draw(color=(255,0,0))
instruction = "stop"
# Display the blob until you click on the left side of the picture.
display = SimpleCV.Display()
while display.isNotDone():
image.show()
if display.mouseLeft:
break
return instruction
print sys.argv[1]
instruction = check_image(sys.argv[1])
print instruction
# def check_width(image, blob):
# print check_width
# def check_height(image, blob):
# return (blob.height() > image.height / 1.5) and blob
|
<commit_before><commit_msg>Add distance to camera document<commit_after>
|
# Import image library necessary for file
import SimpleCV
import sys
from SimpleCV import Image
def check_image(image_path):
#Find file by path and import. File currently resides in same directory.
image = Image(image_path)
# grey = image.grayscale()
instruction = "go"
array_bounds_possible_widths = [image.width / 4, image.width / 4 * 3]
shapes_pic_attributes = image.size()
shapes_pic_size = shapes_pic_attributes[0] * shapes_pic_attributes[1]
# dist = img.colorDistance(SimpleCV.Color.Black).dilate(2)
blobs = image.findBlobs()
#check if the blob is in the middle 1/2 of screen and is too high
for blob in blobs[:-1]:
# print blob.contains()
if (blob.coordinates()[0] > array_bounds_possible_widths[0] and blob.coordinates()[0] < array_bounds_possible_widths[1]) and (blob.height() > image.height / 5 and blob.coordinates()[1] > image.height / 5 * 2):
# print grey.height
print blob.coordinates()[1]
print "Blob is in the way!!"
blob.draw(color=(255,0,0))
instruction = "stop"
# Display the blob until you click on the left side of the picture.
display = SimpleCV.Display()
while display.isNotDone():
image.show()
if display.mouseLeft:
break
return instruction
print sys.argv[1]
instruction = check_image(sys.argv[1])
print instruction
# def check_width(image, blob):
# print check_width
# def check_height(image, blob):
# return (blob.height() > image.height / 1.5) and blob
|
Add distance to camera document# Import image library necessary for file
import SimpleCV
import sys
from SimpleCV import Image
def check_image(image_path):
#Find file by path and import. File currently resides in same directory.
image = Image(image_path)
# grey = image.grayscale()
instruction = "go"
array_bounds_possible_widths = [image.width / 4, image.width / 4 * 3]
shapes_pic_attributes = image.size()
shapes_pic_size = shapes_pic_attributes[0] * shapes_pic_attributes[1]
# dist = img.colorDistance(SimpleCV.Color.Black).dilate(2)
blobs = image.findBlobs()
#check if the blob is in the middle 1/2 of screen and is too high
for blob in blobs[:-1]:
# print blob.contains()
if (blob.coordinates()[0] > array_bounds_possible_widths[0] and blob.coordinates()[0] < array_bounds_possible_widths[1]) and (blob.height() > image.height / 5 and blob.coordinates()[1] > image.height / 5 * 2):
# print grey.height
print blob.coordinates()[1]
print "Blob is in the way!!"
blob.draw(color=(255,0,0))
instruction = "stop"
# Display the blob until you click on the left side of the picture.
display = SimpleCV.Display()
while display.isNotDone():
image.show()
if display.mouseLeft:
break
return instruction
print sys.argv[1]
instruction = check_image(sys.argv[1])
print instruction
# def check_width(image, blob):
# print check_width
# def check_height(image, blob):
# return (blob.height() > image.height / 1.5) and blob
|
<commit_before><commit_msg>Add distance to camera document<commit_after># Import image library necessary for file
import SimpleCV
import sys
from SimpleCV import Image
def check_image(image_path):
#Find file by path and import. File currently resides in same directory.
image = Image(image_path)
# grey = image.grayscale()
instruction = "go"
array_bounds_possible_widths = [image.width / 4, image.width / 4 * 3]
shapes_pic_attributes = image.size()
shapes_pic_size = shapes_pic_attributes[0] * shapes_pic_attributes[1]
# dist = img.colorDistance(SimpleCV.Color.Black).dilate(2)
blobs = image.findBlobs()
#check if the blob is in the middle 1/2 of screen and is too high
for blob in blobs[:-1]:
# print blob.contains()
if (blob.coordinates()[0] > array_bounds_possible_widths[0] and blob.coordinates()[0] < array_bounds_possible_widths[1]) and (blob.height() > image.height / 5 and blob.coordinates()[1] > image.height / 5 * 2):
# print grey.height
print blob.coordinates()[1]
print "Blob is in the way!!"
blob.draw(color=(255,0,0))
instruction = "stop"
# Display the blob until you click on the left side of the picture.
display = SimpleCV.Display()
while display.isNotDone():
image.show()
if display.mouseLeft:
break
return instruction
print sys.argv[1]
instruction = check_image(sys.argv[1])
print instruction
# def check_width(image, blob):
# print check_width
# def check_height(image, blob):
# return (blob.height() > image.height / 1.5) and blob
|
|
7cd93733913d4f221afed7c437f5e6ce9c0d8986
|
test/run_tests.py
|
test/run_tests.py
|
#!/usr/bin/python
from common import *
from test_cpu_collector import *
from test_disk import *
from test_disk_space_collector import *
from test_disk_usage_collector import *
from test_filestat_collector import *
from test_load_average_collector import *
from test_memory_collector import *
from test_network_collector import *
from test_sockstat_collector import *
from test_tcp_collector import *
from test_vmstat_collector import *
################################################################################
if __name__ == "__main__":
unittest.main()
|
Add a way to run all the tests at once
|
Add a way to run all the tests at once
|
Python
|
mit
|
mfriedenhagen/Diamond,MediaMath/Diamond,skbkontur/Diamond,rtoma/Diamond,zoidbergwill/Diamond,disqus/Diamond,jriguera/Diamond,hamelg/Diamond,szibis/Diamond,skbkontur/Diamond,rtoma/Diamond,bmhatfield/Diamond,hamelg/Diamond,tusharmakkar08/Diamond,datafiniti/Diamond,MichaelDoyle/Diamond,timchenxiaoyu/Diamond,TAKEALOT/Diamond,jaingaurav/Diamond,ramjothikumar/Diamond,EzyInsights/Diamond,zoidbergwill/Diamond,datafiniti/Diamond,hvnsweeting/Diamond,Basis/Diamond,h00dy/Diamond,socialwareinc/Diamond,krbaker/Diamond,jriguera/Diamond,codepython/Diamond,tellapart/Diamond,anandbhoraskar/Diamond,janisz/Diamond-1,eMerzh/Diamond-1,tuenti/Diamond,mfriedenhagen/Diamond,cannium/Diamond,TAKEALOT/Diamond,tuenti/Diamond,ceph/Diamond,Ensighten/Diamond,Ensighten/Diamond,gg7/diamond,dcsquared13/Diamond,sebbrandt87/Diamond,rtoma/Diamond,szibis/Diamond,rtoma/Diamond,MichaelDoyle/Diamond,works-mobile/Diamond,eMerzh/Diamond-1,Clever/Diamond,sebbrandt87/Diamond,Ensighten/Diamond,h00dy/Diamond,dcsquared13/Diamond,Slach/Diamond,Ssawa/Diamond,joel-airspring/Diamond,saucelabs/Diamond,signalfx/Diamond,krbaker/Diamond,tellapart/Diamond,Netuitive/netuitive-diamond,dcsquared13/Diamond,MediaMath/Diamond,zoidbergwill/Diamond,MediaMath/Diamond,timchenxiaoyu/Diamond,Nihn/Diamond-1,eMerzh/Diamond-1,TinLe/Diamond,janisz/Diamond-1,actmd/Diamond,jumping/Diamond,sebbrandt87/Diamond,Ssawa/Diamond,Precis/Diamond,mzupan/Diamond,janisz/Diamond-1,MichaelDoyle/Diamond,codepython/Diamond,bmhatfield/Diamond,thardie/Diamond,python-diamond/Diamond,Nihn/Diamond-1,TinLe/Diamond,cannium/Diamond,stuartbfox/Diamond,cannium/Diamond,hamelg/Diamond,mzupan/Diamond,Netuitive/netuitive-diamond,Ssawa/Diamond,works-mobile/Diamond,eMerzh/Diamond-1,mzupan/Diamond,cannium/Diamond,Precis/Diamond,Slach/Diamond,jumping/Diamond,CYBERBUGJR/Diamond,actmd/Diamond,hvnsweeting/Diamond,socialwareinc/Diamond,Nihn/Diamond-1,saucelabs/Diamond,acquia/Diamond,Slach/Diamond,h00dy/Diamond,Netuitive/netuitive-diamond,krbaker/Diamond,MichaelDoyle/Diamond,EzyInsights/Diamond,Slach/Diamond,disqus/Diamond,jriguera/Diamond,tuenti/Diamond,codepython/Diamond,mfriedenhagen/Diamond,tusharmakkar08/Diamond,skbkontur/Diamond,jaingaurav/Diamond,skbkontur/Diamond,Ormod/Diamond,TinLe/Diamond,works-mobile/Diamond,joel-airspring/Diamond,signalfx/Diamond,h00dy/Diamond,TinLe/Diamond,codepython/Diamond,actmd/Diamond,ramjothikumar/Diamond,datafiniti/Diamond,russss/Diamond,Nihn/Diamond-1,bmhatfield/Diamond,szibis/Diamond,datafiniti/Diamond,joel-airspring/Diamond,russss/Diamond,sebbrandt87/Diamond,tuenti/Diamond,Clever/Diamond,jaingaurav/Diamond,thardie/Diamond,Clever/Diamond,krbaker/Diamond,jaingaurav/Diamond,signalfx/Diamond,disqus/Diamond,ramjothikumar/Diamond,tellapart/Diamond,anandbhoraskar/Diamond,CYBERBUGJR/Diamond,joel-airspring/Diamond,hvnsweeting/Diamond,actmd/Diamond,gg7/diamond,bmhatfield/Diamond,Ormod/Diamond,Precis/Diamond,EzyInsights/Diamond,janisz/Diamond-1,stuartbfox/Diamond,jriguera/Diamond,anandbhoraskar/Diamond,stuartbfox/Diamond,Ssawa/Diamond,acquia/Diamond,zoidbergwill/Diamond,Ensighten/Diamond,Basis/Diamond,dcsquared13/Diamond,tusharmakkar08/Diamond,Netuitive/Diamond,ceph/Diamond,acquia/Diamond,thardie/Diamond,Precis/Diamond,Netuitive/netuitive-diamond,timchenxiaoyu/Diamond,tusharmakkar08/Diamond,jumping/Diamond,saucelabs/Diamond,mzupan/Diamond,MediaMath/Diamond,python-diamond/Diamond,gg7/diamond,Ormod/Diamond,socialwareinc/Diamond,TAKEALOT/Diamond,Netuitive/Diamond,russss/Diamond,TAKEALOT/Diamond,works-mobile/Diamond,ceph/Diamond,timchenxiaoyu/Diamond,metamx/Diamond,signalfx/Diamond,anandbhoraskar/Diamond,jumping/Diamond,Clever/Diamond,Basis/Diamond,russss/Diamond,metamx/Diamond,Netuitive/Diamond,EzyInsights/Diamond,CYBERBUGJR/Diamond,acquia/Diamond,Basis/Diamond,saucelabs/Diamond,socialwareinc/Diamond,ceph/Diamond,Ormod/Diamond,Netuitive/Diamond,metamx/Diamond,hvnsweeting/Diamond,tellapart/Diamond,mfriedenhagen/Diamond,thardie/Diamond,python-diamond/Diamond,stuartbfox/Diamond,gg7/diamond,CYBERBUGJR/Diamond,ramjothikumar/Diamond,szibis/Diamond,hamelg/Diamond
|
Add a way to run all the tests at once
|
#!/usr/bin/python
from common import *
from test_cpu_collector import *
from test_disk import *
from test_disk_space_collector import *
from test_disk_usage_collector import *
from test_filestat_collector import *
from test_load_average_collector import *
from test_memory_collector import *
from test_network_collector import *
from test_sockstat_collector import *
from test_tcp_collector import *
from test_vmstat_collector import *
################################################################################
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a way to run all the tests at once<commit_after>
|
#!/usr/bin/python
from common import *
from test_cpu_collector import *
from test_disk import *
from test_disk_space_collector import *
from test_disk_usage_collector import *
from test_filestat_collector import *
from test_load_average_collector import *
from test_memory_collector import *
from test_network_collector import *
from test_sockstat_collector import *
from test_tcp_collector import *
from test_vmstat_collector import *
################################################################################
if __name__ == "__main__":
unittest.main()
|
Add a way to run all the tests at once#!/usr/bin/python
from common import *
from test_cpu_collector import *
from test_disk import *
from test_disk_space_collector import *
from test_disk_usage_collector import *
from test_filestat_collector import *
from test_load_average_collector import *
from test_memory_collector import *
from test_network_collector import *
from test_sockstat_collector import *
from test_tcp_collector import *
from test_vmstat_collector import *
################################################################################
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a way to run all the tests at once<commit_after>#!/usr/bin/python
from common import *
from test_cpu_collector import *
from test_disk import *
from test_disk_space_collector import *
from test_disk_usage_collector import *
from test_filestat_collector import *
from test_load_average_collector import *
from test_memory_collector import *
from test_network_collector import *
from test_sockstat_collector import *
from test_tcp_collector import *
from test_vmstat_collector import *
################################################################################
if __name__ == "__main__":
unittest.main()
|
|
6eedc6c949a4c9abfe2cbe72370788f3955daa8e
|
scripts/set_turbines.py
|
scripts/set_turbines.py
|
#!/usr/bin/env python
"""
Generate fvOptions and topoSetDict for turbines
"""
from __future__ import division, print_function
import numpy as np
import os
import sys
import argparse
def make_fvOptions(args):
"""Create `fvOptions` for turbines from template."""
print("Generating fvOptions with:")
for k, v in args.items():
print(" " + k + ":", v)
with open("system/fvOptions.template") as f:
template = f.read()
with open("system/fvOptions", "w") as f:
f.write(template.format(**args))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--turbine1_active", default="on")
parser.add_argument("--turbine1_x", default=0)
parser.add_argument("--turbine1_tsr", default=6.0)
parser.add_argument("--turbine2_active", default="on")
parser.add_argument("--turbine2_x", default=2.682)
parser.add_argument("--turbine2_tsr", default=4.0)
args = parser.parse_args()
make_fvOptions(vars(args))
|
Add script to set fvOptions
|
Add script to set fvOptions
|
Python
|
mit
|
petebachant/NTNU-HAWT-turbinesFoam,petebachant/NTNU-HAWT-turbinesFoam,petebachant/NTNU-HAWT-turbinesFoam
|
Add script to set fvOptions
|
#!/usr/bin/env python
"""
Generate fvOptions and topoSetDict for turbines
"""
from __future__ import division, print_function
import numpy as np
import os
import sys
import argparse
def make_fvOptions(args):
"""Create `fvOptions` for turbines from template."""
print("Generating fvOptions with:")
for k, v in args.items():
print(" " + k + ":", v)
with open("system/fvOptions.template") as f:
template = f.read()
with open("system/fvOptions", "w") as f:
f.write(template.format(**args))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--turbine1_active", default="on")
parser.add_argument("--turbine1_x", default=0)
parser.add_argument("--turbine1_tsr", default=6.0)
parser.add_argument("--turbine2_active", default="on")
parser.add_argument("--turbine2_x", default=2.682)
parser.add_argument("--turbine2_tsr", default=4.0)
args = parser.parse_args()
make_fvOptions(vars(args))
|
<commit_before><commit_msg>Add script to set fvOptions<commit_after>
|
#!/usr/bin/env python
"""
Generate fvOptions and topoSetDict for turbines
"""
from __future__ import division, print_function
import numpy as np
import os
import sys
import argparse
def make_fvOptions(args):
"""Create `fvOptions` for turbines from template."""
print("Generating fvOptions with:")
for k, v in args.items():
print(" " + k + ":", v)
with open("system/fvOptions.template") as f:
template = f.read()
with open("system/fvOptions", "w") as f:
f.write(template.format(**args))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--turbine1_active", default="on")
parser.add_argument("--turbine1_x", default=0)
parser.add_argument("--turbine1_tsr", default=6.0)
parser.add_argument("--turbine2_active", default="on")
parser.add_argument("--turbine2_x", default=2.682)
parser.add_argument("--turbine2_tsr", default=4.0)
args = parser.parse_args()
make_fvOptions(vars(args))
|
Add script to set fvOptions#!/usr/bin/env python
"""
Generate fvOptions and topoSetDict for turbines
"""
from __future__ import division, print_function
import numpy as np
import os
import sys
import argparse
def make_fvOptions(args):
"""Create `fvOptions` for turbines from template."""
print("Generating fvOptions with:")
for k, v in args.items():
print(" " + k + ":", v)
with open("system/fvOptions.template") as f:
template = f.read()
with open("system/fvOptions", "w") as f:
f.write(template.format(**args))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--turbine1_active", default="on")
parser.add_argument("--turbine1_x", default=0)
parser.add_argument("--turbine1_tsr", default=6.0)
parser.add_argument("--turbine2_active", default="on")
parser.add_argument("--turbine2_x", default=2.682)
parser.add_argument("--turbine2_tsr", default=4.0)
args = parser.parse_args()
make_fvOptions(vars(args))
|
<commit_before><commit_msg>Add script to set fvOptions<commit_after>#!/usr/bin/env python
"""
Generate fvOptions and topoSetDict for turbines
"""
from __future__ import division, print_function
import numpy as np
import os
import sys
import argparse
def make_fvOptions(args):
"""Create `fvOptions` for turbines from template."""
print("Generating fvOptions with:")
for k, v in args.items():
print(" " + k + ":", v)
with open("system/fvOptions.template") as f:
template = f.read()
with open("system/fvOptions", "w") as f:
f.write(template.format(**args))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--turbine1_active", default="on")
parser.add_argument("--turbine1_x", default=0)
parser.add_argument("--turbine1_tsr", default=6.0)
parser.add_argument("--turbine2_active", default="on")
parser.add_argument("--turbine2_x", default=2.682)
parser.add_argument("--turbine2_tsr", default=4.0)
args = parser.parse_args()
make_fvOptions(vars(args))
|
|
917a5e4385d7ec58fe6e867d581bfde0a8cfe174
|
tools/pip-scan.py
|
tools/pip-scan.py
|
import os
import sys
import re
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'anvil',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from anvil import shell as sh
from anvil.packaging.helpers import pip_helper
from anvil import utils
def main():
if len(sys.argv) < 3:
print("%s distro_yaml root_dir ..." % sys.argv[0])
return 1
root_dirs = sys.argv[2:]
requires_files = []
for d in root_dirs:
all_contents = sh.listdir(d, recursive=True, files_only=True)
requires_files = [sh.abspth(f) for f in all_contents
if re.search("(test|pip)[-]requires", f, re.I)]
requires_files = sorted(list(set(requires_files)))
requirements = []
for fn in requires_files:
requirements.extend(pip_helper.parse_requirements(sh.load_file(fn)))
requirements = set(requirements)
yaml_fn = sh.abspth(sys.argv[1])
distro_yaml = utils.load_yaml(yaml_fn)
print("Comparing pips/pip2pkgs in %s to those found in %s" % (sys.argv[1], requires_files))
components = distro_yaml.get('components', {})
all_known_names = []
for (_c, details) in components.items():
pip2pkgs = details.get('pip_to_package', [])
pips = details.get('pips', [])
for item in pip2pkgs:
all_known_names.append(item['name'].lower().strip())
for item in pips:
all_known_names.append(item['name'].lower().strip())
all_known_names = sorted(list(set(all_known_names)))
not_needed = []
for n in all_known_names:
if n not in requirements:
not_needed.append(n)
if not_needed:
print("The following distro yaml mappings may not be needed:")
for n in sorted(not_needed):
print(" + %s" % (n))
not_found = []
for n in requirements:
name = n.key.lower().strip()
if name not in all_known_names:
not_found.append(name)
if not_found:
print("The following distro yaml mappings may be required but where not found:")
for n in sorted(not_found):
print(" + %s" % (n))
return len(not_found) + len(not_needed)
if __name__ == "__main__":
sys.exit(main())
|
Add a pip scanning tool that can compare whats in a distro yaml against what source pip/test-requires may desire
|
Add a pip scanning tool that can compare whats in a distro yaml against what source pip/test-requires may desire
|
Python
|
apache-2.0
|
stackforge/anvil,mc2014/anvil,mc2014/anvil,stackforge/anvil
|
Add a pip scanning tool that can compare whats in a distro yaml against what source pip/test-requires may desire
|
import os
import sys
import re
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'anvil',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from anvil import shell as sh
from anvil.packaging.helpers import pip_helper
from anvil import utils
def main():
if len(sys.argv) < 3:
print("%s distro_yaml root_dir ..." % sys.argv[0])
return 1
root_dirs = sys.argv[2:]
requires_files = []
for d in root_dirs:
all_contents = sh.listdir(d, recursive=True, files_only=True)
requires_files = [sh.abspth(f) for f in all_contents
if re.search("(test|pip)[-]requires", f, re.I)]
requires_files = sorted(list(set(requires_files)))
requirements = []
for fn in requires_files:
requirements.extend(pip_helper.parse_requirements(sh.load_file(fn)))
requirements = set(requirements)
yaml_fn = sh.abspth(sys.argv[1])
distro_yaml = utils.load_yaml(yaml_fn)
print("Comparing pips/pip2pkgs in %s to those found in %s" % (sys.argv[1], requires_files))
components = distro_yaml.get('components', {})
all_known_names = []
for (_c, details) in components.items():
pip2pkgs = details.get('pip_to_package', [])
pips = details.get('pips', [])
for item in pip2pkgs:
all_known_names.append(item['name'].lower().strip())
for item in pips:
all_known_names.append(item['name'].lower().strip())
all_known_names = sorted(list(set(all_known_names)))
not_needed = []
for n in all_known_names:
if n not in requirements:
not_needed.append(n)
if not_needed:
print("The following distro yaml mappings may not be needed:")
for n in sorted(not_needed):
print(" + %s" % (n))
not_found = []
for n in requirements:
name = n.key.lower().strip()
if name not in all_known_names:
not_found.append(name)
if not_found:
print("The following distro yaml mappings may be required but where not found:")
for n in sorted(not_found):
print(" + %s" % (n))
return len(not_found) + len(not_needed)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add a pip scanning tool that can compare whats in a distro yaml against what source pip/test-requires may desire<commit_after>
|
import os
import sys
import re
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'anvil',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from anvil import shell as sh
from anvil.packaging.helpers import pip_helper
from anvil import utils
def main():
if len(sys.argv) < 3:
print("%s distro_yaml root_dir ..." % sys.argv[0])
return 1
root_dirs = sys.argv[2:]
requires_files = []
for d in root_dirs:
all_contents = sh.listdir(d, recursive=True, files_only=True)
requires_files = [sh.abspth(f) for f in all_contents
if re.search("(test|pip)[-]requires", f, re.I)]
requires_files = sorted(list(set(requires_files)))
requirements = []
for fn in requires_files:
requirements.extend(pip_helper.parse_requirements(sh.load_file(fn)))
requirements = set(requirements)
yaml_fn = sh.abspth(sys.argv[1])
distro_yaml = utils.load_yaml(yaml_fn)
print("Comparing pips/pip2pkgs in %s to those found in %s" % (sys.argv[1], requires_files))
components = distro_yaml.get('components', {})
all_known_names = []
for (_c, details) in components.items():
pip2pkgs = details.get('pip_to_package', [])
pips = details.get('pips', [])
for item in pip2pkgs:
all_known_names.append(item['name'].lower().strip())
for item in pips:
all_known_names.append(item['name'].lower().strip())
all_known_names = sorted(list(set(all_known_names)))
not_needed = []
for n in all_known_names:
if n not in requirements:
not_needed.append(n)
if not_needed:
print("The following distro yaml mappings may not be needed:")
for n in sorted(not_needed):
print(" + %s" % (n))
not_found = []
for n in requirements:
name = n.key.lower().strip()
if name not in all_known_names:
not_found.append(name)
if not_found:
print("The following distro yaml mappings may be required but where not found:")
for n in sorted(not_found):
print(" + %s" % (n))
return len(not_found) + len(not_needed)
if __name__ == "__main__":
sys.exit(main())
|
Add a pip scanning tool that can compare whats in a distro yaml against what source pip/test-requires may desireimport os
import sys
import re
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'anvil',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from anvil import shell as sh
from anvil.packaging.helpers import pip_helper
from anvil import utils
def main():
if len(sys.argv) < 3:
print("%s distro_yaml root_dir ..." % sys.argv[0])
return 1
root_dirs = sys.argv[2:]
requires_files = []
for d in root_dirs:
all_contents = sh.listdir(d, recursive=True, files_only=True)
requires_files = [sh.abspth(f) for f in all_contents
if re.search("(test|pip)[-]requires", f, re.I)]
requires_files = sorted(list(set(requires_files)))
requirements = []
for fn in requires_files:
requirements.extend(pip_helper.parse_requirements(sh.load_file(fn)))
requirements = set(requirements)
yaml_fn = sh.abspth(sys.argv[1])
distro_yaml = utils.load_yaml(yaml_fn)
print("Comparing pips/pip2pkgs in %s to those found in %s" % (sys.argv[1], requires_files))
components = distro_yaml.get('components', {})
all_known_names = []
for (_c, details) in components.items():
pip2pkgs = details.get('pip_to_package', [])
pips = details.get('pips', [])
for item in pip2pkgs:
all_known_names.append(item['name'].lower().strip())
for item in pips:
all_known_names.append(item['name'].lower().strip())
all_known_names = sorted(list(set(all_known_names)))
not_needed = []
for n in all_known_names:
if n not in requirements:
not_needed.append(n)
if not_needed:
print("The following distro yaml mappings may not be needed:")
for n in sorted(not_needed):
print(" + %s" % (n))
not_found = []
for n in requirements:
name = n.key.lower().strip()
if name not in all_known_names:
not_found.append(name)
if not_found:
print("The following distro yaml mappings may be required but where not found:")
for n in sorted(not_found):
print(" + %s" % (n))
return len(not_found) + len(not_needed)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add a pip scanning tool that can compare whats in a distro yaml against what source pip/test-requires may desire<commit_after>import os
import sys
import re
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'anvil',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from anvil import shell as sh
from anvil.packaging.helpers import pip_helper
from anvil import utils
def main():
if len(sys.argv) < 3:
print("%s distro_yaml root_dir ..." % sys.argv[0])
return 1
root_dirs = sys.argv[2:]
requires_files = []
for d in root_dirs:
all_contents = sh.listdir(d, recursive=True, files_only=True)
requires_files = [sh.abspth(f) for f in all_contents
if re.search("(test|pip)[-]requires", f, re.I)]
requires_files = sorted(list(set(requires_files)))
requirements = []
for fn in requires_files:
requirements.extend(pip_helper.parse_requirements(sh.load_file(fn)))
requirements = set(requirements)
yaml_fn = sh.abspth(sys.argv[1])
distro_yaml = utils.load_yaml(yaml_fn)
print("Comparing pips/pip2pkgs in %s to those found in %s" % (sys.argv[1], requires_files))
components = distro_yaml.get('components', {})
all_known_names = []
for (_c, details) in components.items():
pip2pkgs = details.get('pip_to_package', [])
pips = details.get('pips', [])
for item in pip2pkgs:
all_known_names.append(item['name'].lower().strip())
for item in pips:
all_known_names.append(item['name'].lower().strip())
all_known_names = sorted(list(set(all_known_names)))
not_needed = []
for n in all_known_names:
if n not in requirements:
not_needed.append(n)
if not_needed:
print("The following distro yaml mappings may not be needed:")
for n in sorted(not_needed):
print(" + %s" % (n))
not_found = []
for n in requirements:
name = n.key.lower().strip()
if name not in all_known_names:
not_found.append(name)
if not_found:
print("The following distro yaml mappings may be required but where not found:")
for n in sorted(not_found):
print(" + %s" % (n))
return len(not_found) + len(not_needed)
if __name__ == "__main__":
sys.exit(main())
|
|
dd296e1de08e512d5f308736e9f5767c7034d457
|
support/compute-powers.py
|
support/compute-powers.py
|
#!/usr/bin/env python
# Compute 10 ** exp with exp in the range [min_exponent, max_exponent] and print
# normalized (with most-significant bit equal to 1) significands in hexadecimal.
from __future__ import print_function
min_exponent = -348
max_exponent = 340
step = 8
significand_size = 64
for exp in range(min_exponent, max_exponent + 1, step):
n = 10 ** exp if exp >= 0 else 2 ** 2000 / 10 ** -exp
k = significand_size + 1
# Convert to binary and round.
n = (int('{:0<{}b}'.format(n, k)[:k], 2) + 1) / 2
print('{:0<#16x}ull'.format(n))
|
Add a script to compute powers of 10
|
Add a script to compute powers of 10
|
Python
|
bsd-2-clause
|
alabuzhev/fmt,alabuzhev/fmt,alabuzhev/fmt,cppformat/cppformat,cppformat/cppformat,cppformat/cppformat
|
Add a script to compute powers of 10
|
#!/usr/bin/env python
# Compute 10 ** exp with exp in the range [min_exponent, max_exponent] and print
# normalized (with most-significant bit equal to 1) significands in hexadecimal.
from __future__ import print_function
min_exponent = -348
max_exponent = 340
step = 8
significand_size = 64
for exp in range(min_exponent, max_exponent + 1, step):
n = 10 ** exp if exp >= 0 else 2 ** 2000 / 10 ** -exp
k = significand_size + 1
# Convert to binary and round.
n = (int('{:0<{}b}'.format(n, k)[:k], 2) + 1) / 2
print('{:0<#16x}ull'.format(n))
|
<commit_before><commit_msg>Add a script to compute powers of 10<commit_after>
|
#!/usr/bin/env python
# Compute 10 ** exp with exp in the range [min_exponent, max_exponent] and print
# normalized (with most-significant bit equal to 1) significands in hexadecimal.
from __future__ import print_function
min_exponent = -348
max_exponent = 340
step = 8
significand_size = 64
for exp in range(min_exponent, max_exponent + 1, step):
n = 10 ** exp if exp >= 0 else 2 ** 2000 / 10 ** -exp
k = significand_size + 1
# Convert to binary and round.
n = (int('{:0<{}b}'.format(n, k)[:k], 2) + 1) / 2
print('{:0<#16x}ull'.format(n))
|
Add a script to compute powers of 10#!/usr/bin/env python
# Compute 10 ** exp with exp in the range [min_exponent, max_exponent] and print
# normalized (with most-significant bit equal to 1) significands in hexadecimal.
from __future__ import print_function
min_exponent = -348
max_exponent = 340
step = 8
significand_size = 64
for exp in range(min_exponent, max_exponent + 1, step):
n = 10 ** exp if exp >= 0 else 2 ** 2000 / 10 ** -exp
k = significand_size + 1
# Convert to binary and round.
n = (int('{:0<{}b}'.format(n, k)[:k], 2) + 1) / 2
print('{:0<#16x}ull'.format(n))
|
<commit_before><commit_msg>Add a script to compute powers of 10<commit_after>#!/usr/bin/env python
# Compute 10 ** exp with exp in the range [min_exponent, max_exponent] and print
# normalized (with most-significant bit equal to 1) significands in hexadecimal.
from __future__ import print_function
min_exponent = -348
max_exponent = 340
step = 8
significand_size = 64
for exp in range(min_exponent, max_exponent + 1, step):
n = 10 ** exp if exp >= 0 else 2 ** 2000 / 10 ** -exp
k = significand_size + 1
# Convert to binary and round.
n = (int('{:0<{}b}'.format(n, k)[:k], 2) + 1) / 2
print('{:0<#16x}ull'.format(n))
|
|
02b661fda99e5fb44f07fd139f138b2534454b2e
|
tests/test_sorting_and_searching/test_counting_sort.py
|
tests/test_sorting_and_searching/test_counting_sort.py
|
import unittest
from aids.sorting_and_searching.counting_sort import counting_sort
class CountingSortTestCase(unittest.TestCase):
'''
Unit tests for counting sort
'''
def setUp(self):
self.example = [4,3,2,1,4,3,2,4,3,4]
def test_selection_sort(self):
self.assertEqual(counting_sort(self.example), [1, 2, 2, 3, 3, 3, 4, 4, 4, 4])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
Add unit tests for counting sort
|
Add unit tests for counting sort
|
Python
|
mit
|
ueg1990/aids
|
Add unit tests for counting sort
|
import unittest
from aids.sorting_and_searching.counting_sort import counting_sort
class CountingSortTestCase(unittest.TestCase):
'''
Unit tests for counting sort
'''
def setUp(self):
self.example = [4,3,2,1,4,3,2,4,3,4]
def test_selection_sort(self):
self.assertEqual(counting_sort(self.example), [1, 2, 2, 3, 3, 3, 4, 4, 4, 4])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for counting sort<commit_after>
|
import unittest
from aids.sorting_and_searching.counting_sort import counting_sort
class CountingSortTestCase(unittest.TestCase):
'''
Unit tests for counting sort
'''
def setUp(self):
self.example = [4,3,2,1,4,3,2,4,3,4]
def test_selection_sort(self):
self.assertEqual(counting_sort(self.example), [1, 2, 2, 3, 3, 3, 4, 4, 4, 4])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
Add unit tests for counting sortimport unittest
from aids.sorting_and_searching.counting_sort import counting_sort
class CountingSortTestCase(unittest.TestCase):
'''
Unit tests for counting sort
'''
def setUp(self):
self.example = [4,3,2,1,4,3,2,4,3,4]
def test_selection_sort(self):
self.assertEqual(counting_sort(self.example), [1, 2, 2, 3, 3, 3, 4, 4, 4, 4])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for counting sort<commit_after>import unittest
from aids.sorting_and_searching.counting_sort import counting_sort
class CountingSortTestCase(unittest.TestCase):
'''
Unit tests for counting sort
'''
def setUp(self):
self.example = [4,3,2,1,4,3,2,4,3,4]
def test_selection_sort(self):
self.assertEqual(counting_sort(self.example), [1, 2, 2, 3, 3, 3, 4, 4, 4, 4])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
|
1b639f98af0070e9b5ba0bc12d536be22a219402
|
chrome/PRESUBMIT.py
|
chrome/PRESUBMIT.py
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
Call the new presubmit checks from chrome/ code, with a blacklist.
|
Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: dd90618784b6a4b323ea0c23a071cb1c9e6f2ac7@32190 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
|
Python
|
bsd-3-clause
|
wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser
|
Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: dd90618784b6a4b323ea0c23a071cb1c9e6f2ac7@32190 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
<commit_before><commit_msg>Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: dd90618784b6a4b323ea0c23a071cb1c9e6f2ac7@32190 4ff67af0-8c30-449e-8e8b-ad334ec8d88c<commit_after>
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: dd90618784b6a4b323ea0c23a071cb1c9e6f2ac7@32190 4ff67af0-8c30-449e-8e8b-ad334ec8d88c# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
<commit_before><commit_msg>Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: dd90618784b6a4b323ea0c23a071cb1c9e6f2ac7@32190 4ff67af0-8c30-449e-8e8b-ad334ec8d88c<commit_after># Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
|
de0cd54000878638ea88a48b4c8b7dcc6ecfe04d
|
pySDC/projects/PinTSimE/switch_estimator.py
|
pySDC/projects/PinTSimE/switch_estimator.py
|
import numpy as np
import scipy as sp
from pySDC.core.ConvergenceController import ConvergenceController
from pySDC.core.Lagrange import LagrangeApproximation
class SwitchEstimator(ConvergenceController):
"""
Method to estimate a discrete event (switch)
"""
def setup(self, controller, params, description):
self.switch_detected = False
return {'control_order': 100, **params}
def get_new_step_size(self, controller, S):
self.switch_detected = False # reset between steps
L = S.levels[0]
for m in range(len(L.u)):
if L.u[m][1] - L.prob.params.V_ref < 0:
m_guess = m - 1
self.switch_detected = True
break
if self.switch_detected:
t_interp = [L.time + L.dt * L.sweep.coll.nodes[m] for m in range(len(L.sweep.coll.nodes))]
vC = []
for m in range(1, len(L.u)):
vC.append(L.u[m][1])
#p = sp.interpolate.interp1d(t_interp, vC, 'cubic', bounds_error=False)
approx = LagrangeApproximation(t_interp, weightComputation='AUTO')
def switch_examiner(x):
"""
Routine to define root problem
"""
#return L.prob.params.V_ref - p(x)
return L.prob.params.V_ref - approx.getInterpolationMatrix(x).dot(vC)
t_switch, info, _, _ = sp.optimize.fsolve(switch_examiner, t_interp[m_guess], full_output=True)
self.t_switch = t_switch[0]
if L.time < self.t_switch < L.time + L.dt and not np.isclose(self.t_switch - L.time, L.dt, atol=1e-2):
print('Switch located at time: {}'.format(self.t_switch))
L.status.dt_new = self.t_switch - L.time
else:
self.switch_detected = False
def determine_restart(self, controller, S):
if self.switch_detected:
S.status.restart = True
S.status.force_done = True
super(SwitchEstimator, self).determine_restart(controller, S)
|
Change interpolation using TL's LagrangeApproximation
|
Change interpolation using TL's LagrangeApproximation
|
Python
|
bsd-2-clause
|
Parallel-in-Time/pySDC,Parallel-in-Time/pySDC
|
Change interpolation using TL's LagrangeApproximation
|
import numpy as np
import scipy as sp
from pySDC.core.ConvergenceController import ConvergenceController
from pySDC.core.Lagrange import LagrangeApproximation
class SwitchEstimator(ConvergenceController):
"""
Method to estimate a discrete event (switch)
"""
def setup(self, controller, params, description):
self.switch_detected = False
return {'control_order': 100, **params}
def get_new_step_size(self, controller, S):
self.switch_detected = False # reset between steps
L = S.levels[0]
for m in range(len(L.u)):
if L.u[m][1] - L.prob.params.V_ref < 0:
m_guess = m - 1
self.switch_detected = True
break
if self.switch_detected:
t_interp = [L.time + L.dt * L.sweep.coll.nodes[m] for m in range(len(L.sweep.coll.nodes))]
vC = []
for m in range(1, len(L.u)):
vC.append(L.u[m][1])
#p = sp.interpolate.interp1d(t_interp, vC, 'cubic', bounds_error=False)
approx = LagrangeApproximation(t_interp, weightComputation='AUTO')
def switch_examiner(x):
"""
Routine to define root problem
"""
#return L.prob.params.V_ref - p(x)
return L.prob.params.V_ref - approx.getInterpolationMatrix(x).dot(vC)
t_switch, info, _, _ = sp.optimize.fsolve(switch_examiner, t_interp[m_guess], full_output=True)
self.t_switch = t_switch[0]
if L.time < self.t_switch < L.time + L.dt and not np.isclose(self.t_switch - L.time, L.dt, atol=1e-2):
print('Switch located at time: {}'.format(self.t_switch))
L.status.dt_new = self.t_switch - L.time
else:
self.switch_detected = False
def determine_restart(self, controller, S):
if self.switch_detected:
S.status.restart = True
S.status.force_done = True
super(SwitchEstimator, self).determine_restart(controller, S)
|
<commit_before><commit_msg>Change interpolation using TL's LagrangeApproximation<commit_after>
|
import numpy as np
import scipy as sp
from pySDC.core.ConvergenceController import ConvergenceController
from pySDC.core.Lagrange import LagrangeApproximation
class SwitchEstimator(ConvergenceController):
"""
Method to estimate a discrete event (switch)
"""
def setup(self, controller, params, description):
self.switch_detected = False
return {'control_order': 100, **params}
def get_new_step_size(self, controller, S):
self.switch_detected = False # reset between steps
L = S.levels[0]
for m in range(len(L.u)):
if L.u[m][1] - L.prob.params.V_ref < 0:
m_guess = m - 1
self.switch_detected = True
break
if self.switch_detected:
t_interp = [L.time + L.dt * L.sweep.coll.nodes[m] for m in range(len(L.sweep.coll.nodes))]
vC = []
for m in range(1, len(L.u)):
vC.append(L.u[m][1])
#p = sp.interpolate.interp1d(t_interp, vC, 'cubic', bounds_error=False)
approx = LagrangeApproximation(t_interp, weightComputation='AUTO')
def switch_examiner(x):
"""
Routine to define root problem
"""
#return L.prob.params.V_ref - p(x)
return L.prob.params.V_ref - approx.getInterpolationMatrix(x).dot(vC)
t_switch, info, _, _ = sp.optimize.fsolve(switch_examiner, t_interp[m_guess], full_output=True)
self.t_switch = t_switch[0]
if L.time < self.t_switch < L.time + L.dt and not np.isclose(self.t_switch - L.time, L.dt, atol=1e-2):
print('Switch located at time: {}'.format(self.t_switch))
L.status.dt_new = self.t_switch - L.time
else:
self.switch_detected = False
def determine_restart(self, controller, S):
if self.switch_detected:
S.status.restart = True
S.status.force_done = True
super(SwitchEstimator, self).determine_restart(controller, S)
|
Change interpolation using TL's LagrangeApproximationimport numpy as np
import scipy as sp
from pySDC.core.ConvergenceController import ConvergenceController
from pySDC.core.Lagrange import LagrangeApproximation
class SwitchEstimator(ConvergenceController):
"""
Method to estimate a discrete event (switch)
"""
def setup(self, controller, params, description):
self.switch_detected = False
return {'control_order': 100, **params}
def get_new_step_size(self, controller, S):
self.switch_detected = False # reset between steps
L = S.levels[0]
for m in range(len(L.u)):
if L.u[m][1] - L.prob.params.V_ref < 0:
m_guess = m - 1
self.switch_detected = True
break
if self.switch_detected:
t_interp = [L.time + L.dt * L.sweep.coll.nodes[m] for m in range(len(L.sweep.coll.nodes))]
vC = []
for m in range(1, len(L.u)):
vC.append(L.u[m][1])
#p = sp.interpolate.interp1d(t_interp, vC, 'cubic', bounds_error=False)
approx = LagrangeApproximation(t_interp, weightComputation='AUTO')
def switch_examiner(x):
"""
Routine to define root problem
"""
#return L.prob.params.V_ref - p(x)
return L.prob.params.V_ref - approx.getInterpolationMatrix(x).dot(vC)
t_switch, info, _, _ = sp.optimize.fsolve(switch_examiner, t_interp[m_guess], full_output=True)
self.t_switch = t_switch[0]
if L.time < self.t_switch < L.time + L.dt and not np.isclose(self.t_switch - L.time, L.dt, atol=1e-2):
print('Switch located at time: {}'.format(self.t_switch))
L.status.dt_new = self.t_switch - L.time
else:
self.switch_detected = False
def determine_restart(self, controller, S):
if self.switch_detected:
S.status.restart = True
S.status.force_done = True
super(SwitchEstimator, self).determine_restart(controller, S)
|
<commit_before><commit_msg>Change interpolation using TL's LagrangeApproximation<commit_after>import numpy as np
import scipy as sp
from pySDC.core.ConvergenceController import ConvergenceController
from pySDC.core.Lagrange import LagrangeApproximation
class SwitchEstimator(ConvergenceController):
"""
Method to estimate a discrete event (switch)
"""
def setup(self, controller, params, description):
self.switch_detected = False
return {'control_order': 100, **params}
def get_new_step_size(self, controller, S):
self.switch_detected = False # reset between steps
L = S.levels[0]
for m in range(len(L.u)):
if L.u[m][1] - L.prob.params.V_ref < 0:
m_guess = m - 1
self.switch_detected = True
break
if self.switch_detected:
t_interp = [L.time + L.dt * L.sweep.coll.nodes[m] for m in range(len(L.sweep.coll.nodes))]
vC = []
for m in range(1, len(L.u)):
vC.append(L.u[m][1])
#p = sp.interpolate.interp1d(t_interp, vC, 'cubic', bounds_error=False)
approx = LagrangeApproximation(t_interp, weightComputation='AUTO')
def switch_examiner(x):
"""
Routine to define root problem
"""
#return L.prob.params.V_ref - p(x)
return L.prob.params.V_ref - approx.getInterpolationMatrix(x).dot(vC)
t_switch, info, _, _ = sp.optimize.fsolve(switch_examiner, t_interp[m_guess], full_output=True)
self.t_switch = t_switch[0]
if L.time < self.t_switch < L.time + L.dt and not np.isclose(self.t_switch - L.time, L.dt, atol=1e-2):
print('Switch located at time: {}'.format(self.t_switch))
L.status.dt_new = self.t_switch - L.time
else:
self.switch_detected = False
def determine_restart(self, controller, S):
if self.switch_detected:
S.status.restart = True
S.status.force_done = True
super(SwitchEstimator, self).determine_restart(controller, S)
|
|
09647b8726e95c84cee4ed4e35e14c514b145577
|
Utils/py/BallDetection/Evaluation/evaluate_image_log.py
|
Utils/py/BallDetection/Evaluation/evaluate_image_log.py
|
import os
import cppyy
def get_naoth_dir():
script_path = os.path.abspath(__file__)
return os.path.abspath(os.path.join(script_path, "../../../../../"))
def init_simulator():
naoth_dir = get_naoth_dir()
# load shared library: all depending libraries should be found automatically
cppyy.load_library(os.path.join(naoth_dir, "NaoTHSoccer/dist/Native/libdummysimulator.so"))
# add relevant include paths to allow mapping our code
cppyy.add_include_path(os.path.join(naoth_dir, "Framework/Commons/Source"))
cppyy.add_include_path(os.path.join(naoth_dir, "NaoTHSoccer/Source"))
# include platform
cppyy.include(os.path.join(naoth_dir, "Framework/Commons/Source/PlatformInterface/Platform.h"))
cppyy.include(os.path.join(naoth_dir, "Framework/Platforms/Source/DummySimulator/DummySimulator.h"))
# change working directory so that the configuration is found
os.chdir(os.path.join(naoth_dir, "NaoTHSoccer"))
# start dummy simulator
sim = cppyy.gbl.DummySimulator(False, False, 5401)
cppyy.gbl.naoth.Platform.getInstance().init(sim)
if __name__ == "__main__":
init_simulator()
|
Add initial code for whole image evaluation script
|
Add initial code for whole image evaluation script
|
Python
|
apache-2.0
|
BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH
|
Add initial code for whole image evaluation script
|
import os
import cppyy
def get_naoth_dir():
script_path = os.path.abspath(__file__)
return os.path.abspath(os.path.join(script_path, "../../../../../"))
def init_simulator():
naoth_dir = get_naoth_dir()
# load shared library: all depending libraries should be found automatically
cppyy.load_library(os.path.join(naoth_dir, "NaoTHSoccer/dist/Native/libdummysimulator.so"))
# add relevant include paths to allow mapping our code
cppyy.add_include_path(os.path.join(naoth_dir, "Framework/Commons/Source"))
cppyy.add_include_path(os.path.join(naoth_dir, "NaoTHSoccer/Source"))
# include platform
cppyy.include(os.path.join(naoth_dir, "Framework/Commons/Source/PlatformInterface/Platform.h"))
cppyy.include(os.path.join(naoth_dir, "Framework/Platforms/Source/DummySimulator/DummySimulator.h"))
# change working directory so that the configuration is found
os.chdir(os.path.join(naoth_dir, "NaoTHSoccer"))
# start dummy simulator
sim = cppyy.gbl.DummySimulator(False, False, 5401)
cppyy.gbl.naoth.Platform.getInstance().init(sim)
if __name__ == "__main__":
init_simulator()
|
<commit_before><commit_msg>Add initial code for whole image evaluation script<commit_after>
|
import os
import cppyy
def get_naoth_dir():
script_path = os.path.abspath(__file__)
return os.path.abspath(os.path.join(script_path, "../../../../../"))
def init_simulator():
naoth_dir = get_naoth_dir()
# load shared library: all depending libraries should be found automatically
cppyy.load_library(os.path.join(naoth_dir, "NaoTHSoccer/dist/Native/libdummysimulator.so"))
# add relevant include paths to allow mapping our code
cppyy.add_include_path(os.path.join(naoth_dir, "Framework/Commons/Source"))
cppyy.add_include_path(os.path.join(naoth_dir, "NaoTHSoccer/Source"))
# include platform
cppyy.include(os.path.join(naoth_dir, "Framework/Commons/Source/PlatformInterface/Platform.h"))
cppyy.include(os.path.join(naoth_dir, "Framework/Platforms/Source/DummySimulator/DummySimulator.h"))
# change working directory so that the configuration is found
os.chdir(os.path.join(naoth_dir, "NaoTHSoccer"))
# start dummy simulator
sim = cppyy.gbl.DummySimulator(False, False, 5401)
cppyy.gbl.naoth.Platform.getInstance().init(sim)
if __name__ == "__main__":
init_simulator()
|
Add initial code for whole image evaluation scriptimport os
import cppyy
def get_naoth_dir():
script_path = os.path.abspath(__file__)
return os.path.abspath(os.path.join(script_path, "../../../../../"))
def init_simulator():
naoth_dir = get_naoth_dir()
# load shared library: all depending libraries should be found automatically
cppyy.load_library(os.path.join(naoth_dir, "NaoTHSoccer/dist/Native/libdummysimulator.so"))
# add relevant include paths to allow mapping our code
cppyy.add_include_path(os.path.join(naoth_dir, "Framework/Commons/Source"))
cppyy.add_include_path(os.path.join(naoth_dir, "NaoTHSoccer/Source"))
# include platform
cppyy.include(os.path.join(naoth_dir, "Framework/Commons/Source/PlatformInterface/Platform.h"))
cppyy.include(os.path.join(naoth_dir, "Framework/Platforms/Source/DummySimulator/DummySimulator.h"))
# change working directory so that the configuration is found
os.chdir(os.path.join(naoth_dir, "NaoTHSoccer"))
# start dummy simulator
sim = cppyy.gbl.DummySimulator(False, False, 5401)
cppyy.gbl.naoth.Platform.getInstance().init(sim)
if __name__ == "__main__":
init_simulator()
|
<commit_before><commit_msg>Add initial code for whole image evaluation script<commit_after>import os
import cppyy
def get_naoth_dir():
script_path = os.path.abspath(__file__)
return os.path.abspath(os.path.join(script_path, "../../../../../"))
def init_simulator():
naoth_dir = get_naoth_dir()
# load shared library: all depending libraries should be found automatically
cppyy.load_library(os.path.join(naoth_dir, "NaoTHSoccer/dist/Native/libdummysimulator.so"))
# add relevant include paths to allow mapping our code
cppyy.add_include_path(os.path.join(naoth_dir, "Framework/Commons/Source"))
cppyy.add_include_path(os.path.join(naoth_dir, "NaoTHSoccer/Source"))
# include platform
cppyy.include(os.path.join(naoth_dir, "Framework/Commons/Source/PlatformInterface/Platform.h"))
cppyy.include(os.path.join(naoth_dir, "Framework/Platforms/Source/DummySimulator/DummySimulator.h"))
# change working directory so that the configuration is found
os.chdir(os.path.join(naoth_dir, "NaoTHSoccer"))
# start dummy simulator
sim = cppyy.gbl.DummySimulator(False, False, 5401)
cppyy.gbl.naoth.Platform.getInstance().init(sim)
if __name__ == "__main__":
init_simulator()
|
|
4dc9c81a5a1917310a8fa0da688c1c6c94e746f3
|
dbconnect.py
|
dbconnect.py
|
import os
from shutil import copyfile
from datetime import datetime
from infi.clickhouse_orm import models, fields, engines
from infi.clickhouse_orm.database import Database
if not os.path.isfile("settings.py"):
copyfile("settings.py.example", "settings.py")
from settings import (CLICKHOUSEIP, CLICKHOUSEPORT,
CLICKHOUSEUSER, CLICKHOUSEPASSWORD)
class BearRequests(models.Model):
EventDate = fields.DateField()
RequestTime = fields.DateTimeField()
RequestPath = fields.StringField()
RequestCommand = fields.StringField()
RequestVersion = fields.StringField()
RequestRaw = fields.StringField()
ProbeName = fields.StringField()
RequestDetectionID = fields.Int32Field()
BotIP = fields.StringField()
BotCountry = fields.StringField()
BotUA = fields.StringField()
BotContinent = fields.StringField()
BotTracert = fields.StringField()
BotDNSName = fields.StringField()
engine = engines.MergeTree('EventDate', ('RequestTime', 'BotIP'))
def Insert(Bear):
date = Bear.timestamp.replace(';', ':')
date = datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f")
db = Database('Honeypot', db_url=CLICKHOUSEIP + ':' + CLICKHOUSEPORT,
username=CLICKHOUSEUSER, password=CLICKHOUSEPASSWORD)
DBBear = BearRequests(
EventDate=date.date(),
RequestTime=date,
RequestPath=Bear.path,
RequestCommand=Bear.command,
RequestVersion=Bear.version,
RequestRaw=Bear.rawrequest,
ProbeName=Bear.hostname,
RequestDetectionID=Bear.isDetected,
BotIP=Bear.ip,
BotCountry=Bear.country,
BotUA=Bear.ua,
BotContinent=Bear.continent,
BotTracert=Bear.tracert,
BotDNSName=Bear.dnsname,
)
db.insert({DBBear, })
# db = Database('Honeypot')
# print db.select("SELECT * FROM $table", model_class=BearRequests)
|
Add db connector and insert function
|
Add db connector and insert function
|
Python
|
mit
|
Zloool/manyfaced-honeypot
|
Add db connector and insert function
|
import os
from shutil import copyfile
from datetime import datetime
from infi.clickhouse_orm import models, fields, engines
from infi.clickhouse_orm.database import Database
if not os.path.isfile("settings.py"):
copyfile("settings.py.example", "settings.py")
from settings import (CLICKHOUSEIP, CLICKHOUSEPORT,
CLICKHOUSEUSER, CLICKHOUSEPASSWORD)
class BearRequests(models.Model):
EventDate = fields.DateField()
RequestTime = fields.DateTimeField()
RequestPath = fields.StringField()
RequestCommand = fields.StringField()
RequestVersion = fields.StringField()
RequestRaw = fields.StringField()
ProbeName = fields.StringField()
RequestDetectionID = fields.Int32Field()
BotIP = fields.StringField()
BotCountry = fields.StringField()
BotUA = fields.StringField()
BotContinent = fields.StringField()
BotTracert = fields.StringField()
BotDNSName = fields.StringField()
engine = engines.MergeTree('EventDate', ('RequestTime', 'BotIP'))
def Insert(Bear):
date = Bear.timestamp.replace(';', ':')
date = datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f")
db = Database('Honeypot', db_url=CLICKHOUSEIP + ':' + CLICKHOUSEPORT,
username=CLICKHOUSEUSER, password=CLICKHOUSEPASSWORD)
DBBear = BearRequests(
EventDate=date.date(),
RequestTime=date,
RequestPath=Bear.path,
RequestCommand=Bear.command,
RequestVersion=Bear.version,
RequestRaw=Bear.rawrequest,
ProbeName=Bear.hostname,
RequestDetectionID=Bear.isDetected,
BotIP=Bear.ip,
BotCountry=Bear.country,
BotUA=Bear.ua,
BotContinent=Bear.continent,
BotTracert=Bear.tracert,
BotDNSName=Bear.dnsname,
)
db.insert({DBBear, })
# db = Database('Honeypot')
# print db.select("SELECT * FROM $table", model_class=BearRequests)
|
<commit_before><commit_msg>Add db connector and insert function<commit_after>
|
import os
from shutil import copyfile
from datetime import datetime
from infi.clickhouse_orm import models, fields, engines
from infi.clickhouse_orm.database import Database
if not os.path.isfile("settings.py"):
copyfile("settings.py.example", "settings.py")
from settings import (CLICKHOUSEIP, CLICKHOUSEPORT,
CLICKHOUSEUSER, CLICKHOUSEPASSWORD)
class BearRequests(models.Model):
EventDate = fields.DateField()
RequestTime = fields.DateTimeField()
RequestPath = fields.StringField()
RequestCommand = fields.StringField()
RequestVersion = fields.StringField()
RequestRaw = fields.StringField()
ProbeName = fields.StringField()
RequestDetectionID = fields.Int32Field()
BotIP = fields.StringField()
BotCountry = fields.StringField()
BotUA = fields.StringField()
BotContinent = fields.StringField()
BotTracert = fields.StringField()
BotDNSName = fields.StringField()
engine = engines.MergeTree('EventDate', ('RequestTime', 'BotIP'))
def Insert(Bear):
date = Bear.timestamp.replace(';', ':')
date = datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f")
db = Database('Honeypot', db_url=CLICKHOUSEIP + ':' + CLICKHOUSEPORT,
username=CLICKHOUSEUSER, password=CLICKHOUSEPASSWORD)
DBBear = BearRequests(
EventDate=date.date(),
RequestTime=date,
RequestPath=Bear.path,
RequestCommand=Bear.command,
RequestVersion=Bear.version,
RequestRaw=Bear.rawrequest,
ProbeName=Bear.hostname,
RequestDetectionID=Bear.isDetected,
BotIP=Bear.ip,
BotCountry=Bear.country,
BotUA=Bear.ua,
BotContinent=Bear.continent,
BotTracert=Bear.tracert,
BotDNSName=Bear.dnsname,
)
db.insert({DBBear, })
# db = Database('Honeypot')
# print db.select("SELECT * FROM $table", model_class=BearRequests)
|
Add db connector and insert functionimport os
from shutil import copyfile
from datetime import datetime
from infi.clickhouse_orm import models, fields, engines
from infi.clickhouse_orm.database import Database
if not os.path.isfile("settings.py"):
copyfile("settings.py.example", "settings.py")
from settings import (CLICKHOUSEIP, CLICKHOUSEPORT,
CLICKHOUSEUSER, CLICKHOUSEPASSWORD)
class BearRequests(models.Model):
EventDate = fields.DateField()
RequestTime = fields.DateTimeField()
RequestPath = fields.StringField()
RequestCommand = fields.StringField()
RequestVersion = fields.StringField()
RequestRaw = fields.StringField()
ProbeName = fields.StringField()
RequestDetectionID = fields.Int32Field()
BotIP = fields.StringField()
BotCountry = fields.StringField()
BotUA = fields.StringField()
BotContinent = fields.StringField()
BotTracert = fields.StringField()
BotDNSName = fields.StringField()
engine = engines.MergeTree('EventDate', ('RequestTime', 'BotIP'))
def Insert(Bear):
date = Bear.timestamp.replace(';', ':')
date = datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f")
db = Database('Honeypot', db_url=CLICKHOUSEIP + ':' + CLICKHOUSEPORT,
username=CLICKHOUSEUSER, password=CLICKHOUSEPASSWORD)
DBBear = BearRequests(
EventDate=date.date(),
RequestTime=date,
RequestPath=Bear.path,
RequestCommand=Bear.command,
RequestVersion=Bear.version,
RequestRaw=Bear.rawrequest,
ProbeName=Bear.hostname,
RequestDetectionID=Bear.isDetected,
BotIP=Bear.ip,
BotCountry=Bear.country,
BotUA=Bear.ua,
BotContinent=Bear.continent,
BotTracert=Bear.tracert,
BotDNSName=Bear.dnsname,
)
db.insert({DBBear, })
# db = Database('Honeypot')
# print db.select("SELECT * FROM $table", model_class=BearRequests)
|
<commit_before><commit_msg>Add db connector and insert function<commit_after>import os
from shutil import copyfile
from datetime import datetime
from infi.clickhouse_orm import models, fields, engines
from infi.clickhouse_orm.database import Database
if not os.path.isfile("settings.py"):
copyfile("settings.py.example", "settings.py")
from settings import (CLICKHOUSEIP, CLICKHOUSEPORT,
CLICKHOUSEUSER, CLICKHOUSEPASSWORD)
class BearRequests(models.Model):
EventDate = fields.DateField()
RequestTime = fields.DateTimeField()
RequestPath = fields.StringField()
RequestCommand = fields.StringField()
RequestVersion = fields.StringField()
RequestRaw = fields.StringField()
ProbeName = fields.StringField()
RequestDetectionID = fields.Int32Field()
BotIP = fields.StringField()
BotCountry = fields.StringField()
BotUA = fields.StringField()
BotContinent = fields.StringField()
BotTracert = fields.StringField()
BotDNSName = fields.StringField()
engine = engines.MergeTree('EventDate', ('RequestTime', 'BotIP'))
def Insert(Bear):
date = Bear.timestamp.replace(';', ':')
date = datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f")
db = Database('Honeypot', db_url=CLICKHOUSEIP + ':' + CLICKHOUSEPORT,
username=CLICKHOUSEUSER, password=CLICKHOUSEPASSWORD)
DBBear = BearRequests(
EventDate=date.date(),
RequestTime=date,
RequestPath=Bear.path,
RequestCommand=Bear.command,
RequestVersion=Bear.version,
RequestRaw=Bear.rawrequest,
ProbeName=Bear.hostname,
RequestDetectionID=Bear.isDetected,
BotIP=Bear.ip,
BotCountry=Bear.country,
BotUA=Bear.ua,
BotContinent=Bear.continent,
BotTracert=Bear.tracert,
BotDNSName=Bear.dnsname,
)
db.insert({DBBear, })
# db = Database('Honeypot')
# print db.select("SELECT * FROM $table", model_class=BearRequests)
|
|
830496bda1f6ba196471ae2b848d756effc2d02b
|
apps/feedback/migrations/0002_auto_20150429_1759.py
|
apps/feedback/migrations/0002_auto_20150429_1759.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('feedback', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='ratinganswer',
name='answer',
field=models.SmallIntegerField(default=0, verbose_name='karakter', choices=[(b'', b''), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6')]),
preserve_default=True,
),
]
|
Add auto migration for responsive images
|
Add auto migration for responsive images
|
Python
|
mit
|
dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4
|
Add auto migration for responsive images
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('feedback', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='ratinganswer',
name='answer',
field=models.SmallIntegerField(default=0, verbose_name='karakter', choices=[(b'', b''), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add auto migration for responsive images<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('feedback', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='ratinganswer',
name='answer',
field=models.SmallIntegerField(default=0, verbose_name='karakter', choices=[(b'', b''), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6')]),
preserve_default=True,
),
]
|
Add auto migration for responsive images# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('feedback', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='ratinganswer',
name='answer',
field=models.SmallIntegerField(default=0, verbose_name='karakter', choices=[(b'', b''), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add auto migration for responsive images<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('feedback', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='ratinganswer',
name='answer',
field=models.SmallIntegerField(default=0, verbose_name='karakter', choices=[(b'', b''), (1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6')]),
preserve_default=True,
),
]
|
|
a48c6a0edaa4b64cb0f93ab5c95f635318b8fc72
|
iotendpoints/endpoints/plugins/platecamera.py
|
iotendpoints/endpoints/plugins/platecamera.py
|
"""
PLATECAMERA endpoint.
You must declare environment variable PLATECAMERA_URL to activate this plugin.
If you are running development server (`python manage.py runserver`), you can
`export PLATECAMERA_URL=path_without_leading_slash`
before starting runserver.
If you use supervisord to keep gunicorn or similar running, you can add line
`environment = PLATECAMERA_URL=path_without_leading_slash` in your superisor/site.conf.
Raw gunicorn seems to accept `--env PLATECAMERA_URL=path_without_leading_slash` command line argument.
"""
import os
from django.conf.urls import url
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from endpoints.utils import BasePlugin
from endpoints.utils import get_setting
from endpoints.views import dump_request
ENV_NAME = 'PLATECAMERA_URL'
URL = get_setting(ENV_NAME)
class Plugin(BasePlugin):
"""
Example plugin. Checks if endpoint's URL has been set in env.
"""
name = 'PLATECAMERA'
viewname = 'platecamerahandler'
def __init__(self):
"""Check that `ENV_NAME` is in env variables."""
super().__init__()
if URL is not None:
self.in_use = True
def register(self):
print('Registering plugin "{}"'.format(self.name))
def get_urlpatterns(self):
if self.in_use is False:
print('{} environment variable is not set. PLATECAMERA endpoint is not in use.'.format(ENV_NAME))
urlpatterns = []
else:
url_pattern = r'^{}$'.format(URL)
urlpatterns = [
url(url_pattern, self.view_func, name=self.viewname),
]
return urlpatterns
@csrf_exempt
def view_func(self, request):
res = dump_request(request, postfix='democam')
return HttpResponse('OK')
|
Add register plate camera endpoint placeholder
|
Add register plate camera endpoint placeholder
|
Python
|
mit
|
aapris/IoT-Web-Experiments
|
Add register plate camera endpoint placeholder
|
"""
PLATECAMERA endpoint.
You must declare environment variable PLATECAMERA_URL to activate this plugin.
If you are running development server (`python manage.py runserver`), you can
`export PLATECAMERA_URL=path_without_leading_slash`
before starting runserver.
If you use supervisord to keep gunicorn or similar running, you can add line
`environment = PLATECAMERA_URL=path_without_leading_slash` in your superisor/site.conf.
Raw gunicorn seems to accept `--env PLATECAMERA_URL=path_without_leading_slash` command line argument.
"""
import os
from django.conf.urls import url
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from endpoints.utils import BasePlugin
from endpoints.utils import get_setting
from endpoints.views import dump_request
ENV_NAME = 'PLATECAMERA_URL'
URL = get_setting(ENV_NAME)
class Plugin(BasePlugin):
"""
Example plugin. Checks if endpoint's URL has been set in env.
"""
name = 'PLATECAMERA'
viewname = 'platecamerahandler'
def __init__(self):
"""Check that `ENV_NAME` is in env variables."""
super().__init__()
if URL is not None:
self.in_use = True
def register(self):
print('Registering plugin "{}"'.format(self.name))
def get_urlpatterns(self):
if self.in_use is False:
print('{} environment variable is not set. PLATECAMERA endpoint is not in use.'.format(ENV_NAME))
urlpatterns = []
else:
url_pattern = r'^{}$'.format(URL)
urlpatterns = [
url(url_pattern, self.view_func, name=self.viewname),
]
return urlpatterns
@csrf_exempt
def view_func(self, request):
res = dump_request(request, postfix='democam')
return HttpResponse('OK')
|
<commit_before><commit_msg>Add register plate camera endpoint placeholder<commit_after>
|
"""
PLATECAMERA endpoint.
You must declare environment variable PLATECAMERA_URL to activate this plugin.
If you are running development server (`python manage.py runserver`), you can
`export PLATECAMERA_URL=path_without_leading_slash`
before starting runserver.
If you use supervisord to keep gunicorn or similar running, you can add line
`environment = PLATECAMERA_URL=path_without_leading_slash` in your superisor/site.conf.
Raw gunicorn seems to accept `--env PLATECAMERA_URL=path_without_leading_slash` command line argument.
"""
import os
from django.conf.urls import url
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from endpoints.utils import BasePlugin
from endpoints.utils import get_setting
from endpoints.views import dump_request
ENV_NAME = 'PLATECAMERA_URL'
URL = get_setting(ENV_NAME)
class Plugin(BasePlugin):
"""
Example plugin. Checks if endpoint's URL has been set in env.
"""
name = 'PLATECAMERA'
viewname = 'platecamerahandler'
def __init__(self):
"""Check that `ENV_NAME` is in env variables."""
super().__init__()
if URL is not None:
self.in_use = True
def register(self):
print('Registering plugin "{}"'.format(self.name))
def get_urlpatterns(self):
if self.in_use is False:
print('{} environment variable is not set. PLATECAMERA endpoint is not in use.'.format(ENV_NAME))
urlpatterns = []
else:
url_pattern = r'^{}$'.format(URL)
urlpatterns = [
url(url_pattern, self.view_func, name=self.viewname),
]
return urlpatterns
@csrf_exempt
def view_func(self, request):
res = dump_request(request, postfix='democam')
return HttpResponse('OK')
|
Add register plate camera endpoint placeholder"""
PLATECAMERA endpoint.
You must declare environment variable PLATECAMERA_URL to activate this plugin.
If you are running development server (`python manage.py runserver`), you can
`export PLATECAMERA_URL=path_without_leading_slash`
before starting runserver.
If you use supervisord to keep gunicorn or similar running, you can add line
`environment = PLATECAMERA_URL=path_without_leading_slash` in your superisor/site.conf.
Raw gunicorn seems to accept `--env PLATECAMERA_URL=path_without_leading_slash` command line argument.
"""
import os
from django.conf.urls import url
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from endpoints.utils import BasePlugin
from endpoints.utils import get_setting
from endpoints.views import dump_request
ENV_NAME = 'PLATECAMERA_URL'
URL = get_setting(ENV_NAME)
class Plugin(BasePlugin):
"""
Example plugin. Checks if endpoint's URL has been set in env.
"""
name = 'PLATECAMERA'
viewname = 'platecamerahandler'
def __init__(self):
"""Check that `ENV_NAME` is in env variables."""
super().__init__()
if URL is not None:
self.in_use = True
def register(self):
print('Registering plugin "{}"'.format(self.name))
def get_urlpatterns(self):
if self.in_use is False:
print('{} environment variable is not set. PLATECAMERA endpoint is not in use.'.format(ENV_NAME))
urlpatterns = []
else:
url_pattern = r'^{}$'.format(URL)
urlpatterns = [
url(url_pattern, self.view_func, name=self.viewname),
]
return urlpatterns
@csrf_exempt
def view_func(self, request):
res = dump_request(request, postfix='democam')
return HttpResponse('OK')
|
<commit_before><commit_msg>Add register plate camera endpoint placeholder<commit_after>"""
PLATECAMERA endpoint.
You must declare environment variable PLATECAMERA_URL to activate this plugin.
If you are running development server (`python manage.py runserver`), you can
`export PLATECAMERA_URL=path_without_leading_slash`
before starting runserver.
If you use supervisord to keep gunicorn or similar running, you can add line
`environment = PLATECAMERA_URL=path_without_leading_slash` in your superisor/site.conf.
Raw gunicorn seems to accept `--env PLATECAMERA_URL=path_without_leading_slash` command line argument.
"""
import os
from django.conf.urls import url
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from endpoints.utils import BasePlugin
from endpoints.utils import get_setting
from endpoints.views import dump_request
ENV_NAME = 'PLATECAMERA_URL'
URL = get_setting(ENV_NAME)
class Plugin(BasePlugin):
"""
Example plugin. Checks if endpoint's URL has been set in env.
"""
name = 'PLATECAMERA'
viewname = 'platecamerahandler'
def __init__(self):
"""Check that `ENV_NAME` is in env variables."""
super().__init__()
if URL is not None:
self.in_use = True
def register(self):
print('Registering plugin "{}"'.format(self.name))
def get_urlpatterns(self):
if self.in_use is False:
print('{} environment variable is not set. PLATECAMERA endpoint is not in use.'.format(ENV_NAME))
urlpatterns = []
else:
url_pattern = r'^{}$'.format(URL)
urlpatterns = [
url(url_pattern, self.view_func, name=self.viewname),
]
return urlpatterns
@csrf_exempt
def view_func(self, request):
res = dump_request(request, postfix='democam')
return HttpResponse('OK')
|
|
34f032df44ee34779dae7f9876b6ca80a935de55
|
bedrock/mozorg/models.py
|
bedrock/mozorg/models.py
|
from django.core.cache import cache
from django.db import models
from django.db.utils import DatabaseError
from picklefield import PickledObjectField
from django_extensions.db.fields import ModificationDateTimeField
class TwitterCacheManager(models.Manager):
def get_tweets_for(self, account):
cache_key = 'tweets-for-' + str(account)
tweets = cache.get(cache_key)
if tweets is None:
try:
tweets = TwitterCache.objects.get(account=account).tweets
except (TwitterCache.DoesNotExist, DatabaseError):
# TODO: see if we should catch other errors
tweets = []
cache.set(cache_key, tweets, 60 * 60 * 6) # 6 hours, same as cron
return tweets
class TwitterCache(models.Model):
account = models.CharField(max_length=100, db_index=True, unique=True)
tweets = PickledObjectField(default=list)
updated = ModificationDateTimeField()
objects = TwitterCacheManager()
def __unicode__(self):
return u'Tweets from @' + self.account
|
from django.core.cache import cache
from django.db import models
from django.db.utils import DatabaseError
from picklefield import PickledObjectField
from django_extensions.db.fields import ModificationDateTimeField
class TwitterCacheManager(models.Manager):
def get_tweets_for(self, account):
cache_key = 'tweets-for-' + str(account)
tweets = cache.get(cache_key)
if tweets is None:
try:
tweets = self.get(account=account).tweets
except (TwitterCache.DoesNotExist, DatabaseError):
# TODO: see if we should catch other errors
tweets = []
cache.set(cache_key, tweets, 60 * 60 * 6) # 6 hours, same as cron
return tweets
class TwitterCache(models.Model):
account = models.CharField(max_length=100, db_index=True, unique=True)
tweets = PickledObjectField(default=list)
updated = ModificationDateTimeField()
objects = TwitterCacheManager()
def __unicode__(self):
return u'Tweets from @' + self.account
|
Fix a potential error in the TwitterCacheManager.
|
Fix a potential error in the TwitterCacheManager.
This looks like it would fail at some point. Python
magic is keeping it going I guess. Referencing the manager
instance from the manager class seems bad though.
|
Python
|
mpl-2.0
|
TheoChevalier/bedrock,jacshfr/mozilla-bedrock,gerv/bedrock,dudepare/bedrock,jgmize/bedrock,Jobava/bedrock,glogiotatidis/bedrock,mermi/bedrock,malena/bedrock,schalkneethling/bedrock,jgmize/bedrock,kyoshino/bedrock,CSCI-462-01-2017/bedrock,davehunt/bedrock,TheJJ100100/bedrock,bensternthal/bedrock,malena/bedrock,mermi/bedrock,jacshfr/mozilla-bedrock,hoosteeno/bedrock,TheJJ100100/bedrock,jpetto/bedrock,TheJJ100100/bedrock,mahinthjoe/bedrock,analytics-pros/mozilla-bedrock,bensternthal/bedrock,CSCI-462-01-2017/bedrock,mozilla/bedrock,schalkneethling/bedrock,schalkneethling/bedrock,chirilo/bedrock,yglazko/bedrock,jpetto/bedrock,CSCI-462-01-2017/bedrock,sgarrity/bedrock,ericawright/bedrock,analytics-pros/mozilla-bedrock,bensternthal/bedrock,sylvestre/bedrock,ericawright/bedrock,hoosteeno/bedrock,jpetto/bedrock,hoosteeno/bedrock,petabyte/bedrock,Sancus/bedrock,pascalchevrel/bedrock,sgarrity/bedrock,yglazko/bedrock,rishiloyola/bedrock,pascalchevrel/bedrock,TheoChevalier/bedrock,yglazko/bedrock,Sancus/bedrock,hoosteeno/bedrock,mozilla/bedrock,pmclanahan/bedrock,sylvestre/bedrock,petabyte/bedrock,jacshfr/mozilla-bedrock,pmclanahan/bedrock,gauthierm/bedrock,flodolo/bedrock,andreadelrio/bedrock,ericawright/bedrock,kyoshino/bedrock,Jobava/bedrock,TheoChevalier/bedrock,craigcook/bedrock,jacshfr/mozilla-bedrock,SujaySKumar/bedrock,rishiloyola/bedrock,chirilo/bedrock,gauthierm/bedrock,l-hedgehog/bedrock,jacshfr/mozilla-bedrock,marcoscaceres/bedrock,alexgibson/bedrock,chirilo/bedrock,mkmelin/bedrock,malena/bedrock,mozilla/bedrock,davehunt/bedrock,yglazko/bedrock,pascalchevrel/bedrock,jgmize/bedrock,alexgibson/bedrock,marcoscaceres/bedrock,mermi/bedrock,marcoscaceres/bedrock,TheJJ100100/bedrock,TheoChevalier/bedrock,mahinthjoe/bedrock,SujaySKumar/bedrock,glogiotatidis/bedrock,alexgibson/bedrock,kyoshino/bedrock,gerv/bedrock,SujaySKumar/bedrock,petabyte/bedrock,gerv/bedrock,pascalchevrel/bedrock,gauthierm/bedrock,Jobava/bedrock,mkmelin/bedrock,chirilo/bedrock,rishiloyola/bedrock,bensternthal/bedrock,l-hedgehog/bedrock,MichaelKohler/bedrock,pmclanahan/bedrock,sgarrity/bedrock,malena/bedrock,glogiotatidis/bedrock,Sancus/bedrock,l-hedgehog/bedrock,MichaelKohler/bedrock,Sancus/bedrock,MichaelKohler/bedrock,dudepare/bedrock,MichaelKohler/bedrock,mermi/bedrock,alexgibson/bedrock,l-hedgehog/bedrock,analytics-pros/mozilla-bedrock,davehunt/bedrock,glogiotatidis/bedrock,flodolo/bedrock,flodolo/bedrock,CSCI-462-01-2017/bedrock,pmclanahan/bedrock,mahinthjoe/bedrock,craigcook/bedrock,dudepare/bedrock,SujaySKumar/bedrock,jpetto/bedrock,ericawright/bedrock,gerv/bedrock,andreadelrio/bedrock,andreadelrio/bedrock,analytics-pros/mozilla-bedrock,dudepare/bedrock,mkmelin/bedrock,marcoscaceres/bedrock,davehunt/bedrock,schalkneethling/bedrock,sgarrity/bedrock,craigcook/bedrock,andreadelrio/bedrock,mkmelin/bedrock,craigcook/bedrock,gauthierm/bedrock,sylvestre/bedrock,sylvestre/bedrock,kyoshino/bedrock,Jobava/bedrock,rishiloyola/bedrock,mozilla/bedrock,mahinthjoe/bedrock,flodolo/bedrock,petabyte/bedrock,jgmize/bedrock
|
from django.core.cache import cache
from django.db import models
from django.db.utils import DatabaseError
from picklefield import PickledObjectField
from django_extensions.db.fields import ModificationDateTimeField
class TwitterCacheManager(models.Manager):
def get_tweets_for(self, account):
cache_key = 'tweets-for-' + str(account)
tweets = cache.get(cache_key)
if tweets is None:
try:
tweets = TwitterCache.objects.get(account=account).tweets
except (TwitterCache.DoesNotExist, DatabaseError):
# TODO: see if we should catch other errors
tweets = []
cache.set(cache_key, tweets, 60 * 60 * 6) # 6 hours, same as cron
return tweets
class TwitterCache(models.Model):
account = models.CharField(max_length=100, db_index=True, unique=True)
tweets = PickledObjectField(default=list)
updated = ModificationDateTimeField()
objects = TwitterCacheManager()
def __unicode__(self):
return u'Tweets from @' + self.account
Fix a potential error in the TwitterCacheManager.
This looks like it would fail at some point. Python
magic is keeping it going I guess. Referencing the manager
instance from the manager class seems bad though.
|
from django.core.cache import cache
from django.db import models
from django.db.utils import DatabaseError
from picklefield import PickledObjectField
from django_extensions.db.fields import ModificationDateTimeField
class TwitterCacheManager(models.Manager):
def get_tweets_for(self, account):
cache_key = 'tweets-for-' + str(account)
tweets = cache.get(cache_key)
if tweets is None:
try:
tweets = self.get(account=account).tweets
except (TwitterCache.DoesNotExist, DatabaseError):
# TODO: see if we should catch other errors
tweets = []
cache.set(cache_key, tweets, 60 * 60 * 6) # 6 hours, same as cron
return tweets
class TwitterCache(models.Model):
account = models.CharField(max_length=100, db_index=True, unique=True)
tweets = PickledObjectField(default=list)
updated = ModificationDateTimeField()
objects = TwitterCacheManager()
def __unicode__(self):
return u'Tweets from @' + self.account
|
<commit_before>from django.core.cache import cache
from django.db import models
from django.db.utils import DatabaseError
from picklefield import PickledObjectField
from django_extensions.db.fields import ModificationDateTimeField
class TwitterCacheManager(models.Manager):
def get_tweets_for(self, account):
cache_key = 'tweets-for-' + str(account)
tweets = cache.get(cache_key)
if tweets is None:
try:
tweets = TwitterCache.objects.get(account=account).tweets
except (TwitterCache.DoesNotExist, DatabaseError):
# TODO: see if we should catch other errors
tweets = []
cache.set(cache_key, tweets, 60 * 60 * 6) # 6 hours, same as cron
return tweets
class TwitterCache(models.Model):
account = models.CharField(max_length=100, db_index=True, unique=True)
tweets = PickledObjectField(default=list)
updated = ModificationDateTimeField()
objects = TwitterCacheManager()
def __unicode__(self):
return u'Tweets from @' + self.account
<commit_msg>Fix a potential error in the TwitterCacheManager.
This looks like it would fail at some point. Python
magic is keeping it going I guess. Referencing the manager
instance from the manager class seems bad though.<commit_after>
|
from django.core.cache import cache
from django.db import models
from django.db.utils import DatabaseError
from picklefield import PickledObjectField
from django_extensions.db.fields import ModificationDateTimeField
class TwitterCacheManager(models.Manager):
def get_tweets_for(self, account):
cache_key = 'tweets-for-' + str(account)
tweets = cache.get(cache_key)
if tweets is None:
try:
tweets = self.get(account=account).tweets
except (TwitterCache.DoesNotExist, DatabaseError):
# TODO: see if we should catch other errors
tweets = []
cache.set(cache_key, tweets, 60 * 60 * 6) # 6 hours, same as cron
return tweets
class TwitterCache(models.Model):
account = models.CharField(max_length=100, db_index=True, unique=True)
tweets = PickledObjectField(default=list)
updated = ModificationDateTimeField()
objects = TwitterCacheManager()
def __unicode__(self):
return u'Tweets from @' + self.account
|
from django.core.cache import cache
from django.db import models
from django.db.utils import DatabaseError
from picklefield import PickledObjectField
from django_extensions.db.fields import ModificationDateTimeField
class TwitterCacheManager(models.Manager):
def get_tweets_for(self, account):
cache_key = 'tweets-for-' + str(account)
tweets = cache.get(cache_key)
if tweets is None:
try:
tweets = TwitterCache.objects.get(account=account).tweets
except (TwitterCache.DoesNotExist, DatabaseError):
# TODO: see if we should catch other errors
tweets = []
cache.set(cache_key, tweets, 60 * 60 * 6) # 6 hours, same as cron
return tweets
class TwitterCache(models.Model):
account = models.CharField(max_length=100, db_index=True, unique=True)
tweets = PickledObjectField(default=list)
updated = ModificationDateTimeField()
objects = TwitterCacheManager()
def __unicode__(self):
return u'Tweets from @' + self.account
Fix a potential error in the TwitterCacheManager.
This looks like it would fail at some point. Python
magic is keeping it going I guess. Referencing the manager
instance from the manager class seems bad though.from django.core.cache import cache
from django.db import models
from django.db.utils import DatabaseError
from picklefield import PickledObjectField
from django_extensions.db.fields import ModificationDateTimeField
class TwitterCacheManager(models.Manager):
def get_tweets_for(self, account):
cache_key = 'tweets-for-' + str(account)
tweets = cache.get(cache_key)
if tweets is None:
try:
tweets = self.get(account=account).tweets
except (TwitterCache.DoesNotExist, DatabaseError):
# TODO: see if we should catch other errors
tweets = []
cache.set(cache_key, tweets, 60 * 60 * 6) # 6 hours, same as cron
return tweets
class TwitterCache(models.Model):
account = models.CharField(max_length=100, db_index=True, unique=True)
tweets = PickledObjectField(default=list)
updated = ModificationDateTimeField()
objects = TwitterCacheManager()
def __unicode__(self):
return u'Tweets from @' + self.account
|
<commit_before>from django.core.cache import cache
from django.db import models
from django.db.utils import DatabaseError
from picklefield import PickledObjectField
from django_extensions.db.fields import ModificationDateTimeField
class TwitterCacheManager(models.Manager):
def get_tweets_for(self, account):
cache_key = 'tweets-for-' + str(account)
tweets = cache.get(cache_key)
if tweets is None:
try:
tweets = TwitterCache.objects.get(account=account).tweets
except (TwitterCache.DoesNotExist, DatabaseError):
# TODO: see if we should catch other errors
tweets = []
cache.set(cache_key, tweets, 60 * 60 * 6) # 6 hours, same as cron
return tweets
class TwitterCache(models.Model):
account = models.CharField(max_length=100, db_index=True, unique=True)
tweets = PickledObjectField(default=list)
updated = ModificationDateTimeField()
objects = TwitterCacheManager()
def __unicode__(self):
return u'Tweets from @' + self.account
<commit_msg>Fix a potential error in the TwitterCacheManager.
This looks like it would fail at some point. Python
magic is keeping it going I guess. Referencing the manager
instance from the manager class seems bad though.<commit_after>from django.core.cache import cache
from django.db import models
from django.db.utils import DatabaseError
from picklefield import PickledObjectField
from django_extensions.db.fields import ModificationDateTimeField
class TwitterCacheManager(models.Manager):
def get_tweets_for(self, account):
cache_key = 'tweets-for-' + str(account)
tweets = cache.get(cache_key)
if tweets is None:
try:
tweets = self.get(account=account).tweets
except (TwitterCache.DoesNotExist, DatabaseError):
# TODO: see if we should catch other errors
tweets = []
cache.set(cache_key, tweets, 60 * 60 * 6) # 6 hours, same as cron
return tweets
class TwitterCache(models.Model):
account = models.CharField(max_length=100, db_index=True, unique=True)
tweets = PickledObjectField(default=list)
updated = ModificationDateTimeField()
objects = TwitterCacheManager()
def __unicode__(self):
return u'Tweets from @' + self.account
|
84246e5a9e05b582f822b32789e3a05455556b00
|
py/testdir_single_jvm/test_rf_VA_simple_example.py
|
py/testdir_single_jvm/test_rf_VA_simple_example.py
|
import sys
import json
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_import as h2i
#
# This is intended to be the simplest possible RF example.
# Look at sandbox/commands.log for REST API requests to H2O.
#
print "--------------------------------------------------------------------------------"
print "BUILDING CLOUD"
print "--------------------------------------------------------------------------------"
h2o.build_cloud(node_count=2, java_heap_GB=2)
# False == Use VA form of algorithms (when available) (e.g. RF1).
# True == Use FVec form of algorithm (e.g. DRF2).
h2o.beta_features = False
print "--------------------------------------------------------------------------------"
print "PARSING DATASET"
print "--------------------------------------------------------------------------------"
#
# What this really ends up doing is a REST API PostFile.json request.
#
csvPathname = 'iris/iris2.csv'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put')
print "--------------------------------------------------------------------------------"
print "RUNNING RF"
print "--------------------------------------------------------------------------------"
#
# For valid kwargs, look at h2o.py random_forest() params_dict variable.
# beta_features==False means Value Array (e.g. RF1).
# beta_features==True means Fluid Vec (e.g. DRF2).
#
timeoutSecs = 20
if (h2o.beta_features):
kwargs = {"ntrees": 6}
else:
kwargs = {"ntree": 6}
rf_json_response = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print json.dumps(rf_json_response, indent=4)
print "--------------------------------------------------------------------------------"
print "SHUTTING DOWN"
print "--------------------------------------------------------------------------------"
h2o.check_sandbox_for_errors()
h2o.tear_down_cloud()
|
Add simple python RF example.
|
Add simple python RF example.
|
Python
|
apache-2.0
|
h2oai/h2o-2,calvingit21/h2o-2,h2oai/h2o-2,calvingit21/h2o-2,elkingtonmcb/h2o-2,eg-zhang/h2o-2,calvingit21/h2o-2,eg-zhang/h2o-2,vbelakov/h2o,100star/h2o,rowhit/h2o-2,100star/h2o,eg-zhang/h2o-2,111t8e/h2o-2,h2oai/h2o,elkingtonmcb/h2o-2,111t8e/h2o-2,rowhit/h2o-2,111t8e/h2o-2,100star/h2o,100star/h2o,elkingtonmcb/h2o-2,elkingtonmcb/h2o-2,h2oai/h2o,111t8e/h2o-2,rowhit/h2o-2,100star/h2o,h2oai/h2o,eg-zhang/h2o-2,eg-zhang/h2o-2,vbelakov/h2o,elkingtonmcb/h2o-2,rowhit/h2o-2,calvingit21/h2o-2,eg-zhang/h2o-2,h2oai/h2o,h2oai/h2o,h2oai/h2o-2,vbelakov/h2o,elkingtonmcb/h2o-2,eg-zhang/h2o-2,h2oai/h2o,calvingit21/h2o-2,rowhit/h2o-2,rowhit/h2o-2,vbelakov/h2o,h2oai/h2o,111t8e/h2o-2,100star/h2o,111t8e/h2o-2,vbelakov/h2o,rowhit/h2o-2,h2oai/h2o,h2oai/h2o,h2oai/h2o-2,100star/h2o,111t8e/h2o-2,h2oai/h2o-2,calvingit21/h2o-2,rowhit/h2o-2,calvingit21/h2o-2,h2oai/h2o-2,111t8e/h2o-2,vbelakov/h2o,elkingtonmcb/h2o-2,elkingtonmcb/h2o-2,elkingtonmcb/h2o-2,vbelakov/h2o,111t8e/h2o-2,rowhit/h2o-2,vbelakov/h2o,vbelakov/h2o,calvingit21/h2o-2,h2oai/h2o,eg-zhang/h2o-2,vbelakov/h2o,h2oai/h2o-2,elkingtonmcb/h2o-2,rowhit/h2o-2,h2oai/h2o-2,100star/h2o,eg-zhang/h2o-2,100star/h2o,111t8e/h2o-2,calvingit21/h2o-2,calvingit21/h2o-2,eg-zhang/h2o-2,h2oai/h2o-2,h2oai/h2o-2
|
Add simple python RF example.
|
import sys
import json
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_import as h2i
#
# This is intended to be the simplest possible RF example.
# Look at sandbox/commands.log for REST API requests to H2O.
#
print "--------------------------------------------------------------------------------"
print "BUILDING CLOUD"
print "--------------------------------------------------------------------------------"
h2o.build_cloud(node_count=2, java_heap_GB=2)
# False == Use VA form of algorithms (when available) (e.g. RF1).
# True == Use FVec form of algorithm (e.g. DRF2).
h2o.beta_features = False
print "--------------------------------------------------------------------------------"
print "PARSING DATASET"
print "--------------------------------------------------------------------------------"
#
# What this really ends up doing is a REST API PostFile.json request.
#
csvPathname = 'iris/iris2.csv'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put')
print "--------------------------------------------------------------------------------"
print "RUNNING RF"
print "--------------------------------------------------------------------------------"
#
# For valid kwargs, look at h2o.py random_forest() params_dict variable.
# beta_features==False means Value Array (e.g. RF1).
# beta_features==True means Fluid Vec (e.g. DRF2).
#
timeoutSecs = 20
if (h2o.beta_features):
kwargs = {"ntrees": 6}
else:
kwargs = {"ntree": 6}
rf_json_response = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print json.dumps(rf_json_response, indent=4)
print "--------------------------------------------------------------------------------"
print "SHUTTING DOWN"
print "--------------------------------------------------------------------------------"
h2o.check_sandbox_for_errors()
h2o.tear_down_cloud()
|
<commit_before><commit_msg>Add simple python RF example.<commit_after>
|
import sys
import json
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_import as h2i
#
# This is intended to be the simplest possible RF example.
# Look at sandbox/commands.log for REST API requests to H2O.
#
print "--------------------------------------------------------------------------------"
print "BUILDING CLOUD"
print "--------------------------------------------------------------------------------"
h2o.build_cloud(node_count=2, java_heap_GB=2)
# False == Use VA form of algorithms (when available) (e.g. RF1).
# True == Use FVec form of algorithm (e.g. DRF2).
h2o.beta_features = False
print "--------------------------------------------------------------------------------"
print "PARSING DATASET"
print "--------------------------------------------------------------------------------"
#
# What this really ends up doing is a REST API PostFile.json request.
#
csvPathname = 'iris/iris2.csv'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put')
print "--------------------------------------------------------------------------------"
print "RUNNING RF"
print "--------------------------------------------------------------------------------"
#
# For valid kwargs, look at h2o.py random_forest() params_dict variable.
# beta_features==False means Value Array (e.g. RF1).
# beta_features==True means Fluid Vec (e.g. DRF2).
#
timeoutSecs = 20
if (h2o.beta_features):
kwargs = {"ntrees": 6}
else:
kwargs = {"ntree": 6}
rf_json_response = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print json.dumps(rf_json_response, indent=4)
print "--------------------------------------------------------------------------------"
print "SHUTTING DOWN"
print "--------------------------------------------------------------------------------"
h2o.check_sandbox_for_errors()
h2o.tear_down_cloud()
|
Add simple python RF example.import sys
import json
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_import as h2i
#
# This is intended to be the simplest possible RF example.
# Look at sandbox/commands.log for REST API requests to H2O.
#
print "--------------------------------------------------------------------------------"
print "BUILDING CLOUD"
print "--------------------------------------------------------------------------------"
h2o.build_cloud(node_count=2, java_heap_GB=2)
# False == Use VA form of algorithms (when available) (e.g. RF1).
# True == Use FVec form of algorithm (e.g. DRF2).
h2o.beta_features = False
print "--------------------------------------------------------------------------------"
print "PARSING DATASET"
print "--------------------------------------------------------------------------------"
#
# What this really ends up doing is a REST API PostFile.json request.
#
csvPathname = 'iris/iris2.csv'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put')
print "--------------------------------------------------------------------------------"
print "RUNNING RF"
print "--------------------------------------------------------------------------------"
#
# For valid kwargs, look at h2o.py random_forest() params_dict variable.
# beta_features==False means Value Array (e.g. RF1).
# beta_features==True means Fluid Vec (e.g. DRF2).
#
timeoutSecs = 20
if (h2o.beta_features):
kwargs = {"ntrees": 6}
else:
kwargs = {"ntree": 6}
rf_json_response = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print json.dumps(rf_json_response, indent=4)
print "--------------------------------------------------------------------------------"
print "SHUTTING DOWN"
print "--------------------------------------------------------------------------------"
h2o.check_sandbox_for_errors()
h2o.tear_down_cloud()
|
<commit_before><commit_msg>Add simple python RF example.<commit_after>import sys
import json
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_import as h2i
#
# This is intended to be the simplest possible RF example.
# Look at sandbox/commands.log for REST API requests to H2O.
#
print "--------------------------------------------------------------------------------"
print "BUILDING CLOUD"
print "--------------------------------------------------------------------------------"
h2o.build_cloud(node_count=2, java_heap_GB=2)
# False == Use VA form of algorithms (when available) (e.g. RF1).
# True == Use FVec form of algorithm (e.g. DRF2).
h2o.beta_features = False
print "--------------------------------------------------------------------------------"
print "PARSING DATASET"
print "--------------------------------------------------------------------------------"
#
# What this really ends up doing is a REST API PostFile.json request.
#
csvPathname = 'iris/iris2.csv'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put')
print "--------------------------------------------------------------------------------"
print "RUNNING RF"
print "--------------------------------------------------------------------------------"
#
# For valid kwargs, look at h2o.py random_forest() params_dict variable.
# beta_features==False means Value Array (e.g. RF1).
# beta_features==True means Fluid Vec (e.g. DRF2).
#
timeoutSecs = 20
if (h2o.beta_features):
kwargs = {"ntrees": 6}
else:
kwargs = {"ntree": 6}
rf_json_response = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print json.dumps(rf_json_response, indent=4)
print "--------------------------------------------------------------------------------"
print "SHUTTING DOWN"
print "--------------------------------------------------------------------------------"
h2o.check_sandbox_for_errors()
h2o.tear_down_cloud()
|
|
a4369afae529297fd684eafbb75654f2bd27b780
|
python/check_arch_api.py
|
python/check_arch_api.py
|
""" Script to do Arch API sanity checking.
This python script can be used to do some sanity checking of either wire to
wire connectivity or bel pin wire connectivity.
Wire to wire connectivity is tested by supplying a source and destination wire
and verifing that a pip exists that connects those wires.
Bel pin wire connectivity is tested by supplying a bel and pin name and the
connected wire.
Invoke in a working directory that contains a file name "test_data.yaml":
${NEXTPNR} --run ${NEXTPNR_SRC}/check_arch_api.py
"test_data.yaml" should contain the test vectors for the wire to wire or bel
pin connectivity tests. Example test_data.yaml:
pip_test:
- src_wire: CLBLM_R_X11Y93/CLBLM_L_D3
dst_wire: SLICE_X15Y93.SLICEL/D3
bel_pin_test:
- bel: SLICE_X15Y93.SLICEL/D6LUT
pin: A3
wire: SLICE_X15Y93.SLICEL/D3
"""
import yaml
def check_arch_api(ctx):
pips_tested = 0
bel_pins_tested = 0
with open('test_data.yaml', 'r') as f:
test_data = yaml.safe_load(f.read())
if 'pip_test' in test_data:
for pip_test in test_data['pip_test']:
pip = None
for pip_name in ctx.getPipsDownhill(pip_test['src_wire']):
if ctx.getPipDstWire(pip_name) == pip_test['dst_wire']:
pip = pip_name
src_wire = ctx.getPipSrcWire(pip_name)
assert src_wire == pip_test['src_wire'], (
src_wire, pip_test['src_wire'])
assert pip is not None
pips_tested += 1
if 'bel_pin_test' in test_data:
for bel_pin_test in test_data['bel_pin_test']:
wire_name = ctx.getBelPinWire(bel_pin_test['bel'], bel_pin_test['pin'])
assert bel_pin_test['wire'] == wire_name, (bel_pin_test['wire'], wire_name)
bel_pins_tested += 1
print('Tested {} pips and {} bel pins'.format(pips_tested, bel_pins_tested))
check_arch_api(ctx)
|
Add simple python file for doing Arch API sanity checks.
|
Add simple python file for doing Arch API sanity checks.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com>
|
Python
|
isc
|
YosysHQ/nextpnr,SymbiFlow/nextpnr,YosysHQ/nextpnr,YosysHQ/nextpnr,SymbiFlow/nextpnr,YosysHQ/nextpnr,SymbiFlow/nextpnr,SymbiFlow/nextpnr
|
Add simple python file for doing Arch API sanity checks.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com>
|
""" Script to do Arch API sanity checking.
This python script can be used to do some sanity checking of either wire to
wire connectivity or bel pin wire connectivity.
Wire to wire connectivity is tested by supplying a source and destination wire
and verifing that a pip exists that connects those wires.
Bel pin wire connectivity is tested by supplying a bel and pin name and the
connected wire.
Invoke in a working directory that contains a file name "test_data.yaml":
${NEXTPNR} --run ${NEXTPNR_SRC}/check_arch_api.py
"test_data.yaml" should contain the test vectors for the wire to wire or bel
pin connectivity tests. Example test_data.yaml:
pip_test:
- src_wire: CLBLM_R_X11Y93/CLBLM_L_D3
dst_wire: SLICE_X15Y93.SLICEL/D3
bel_pin_test:
- bel: SLICE_X15Y93.SLICEL/D6LUT
pin: A3
wire: SLICE_X15Y93.SLICEL/D3
"""
import yaml
def check_arch_api(ctx):
pips_tested = 0
bel_pins_tested = 0
with open('test_data.yaml', 'r') as f:
test_data = yaml.safe_load(f.read())
if 'pip_test' in test_data:
for pip_test in test_data['pip_test']:
pip = None
for pip_name in ctx.getPipsDownhill(pip_test['src_wire']):
if ctx.getPipDstWire(pip_name) == pip_test['dst_wire']:
pip = pip_name
src_wire = ctx.getPipSrcWire(pip_name)
assert src_wire == pip_test['src_wire'], (
src_wire, pip_test['src_wire'])
assert pip is not None
pips_tested += 1
if 'bel_pin_test' in test_data:
for bel_pin_test in test_data['bel_pin_test']:
wire_name = ctx.getBelPinWire(bel_pin_test['bel'], bel_pin_test['pin'])
assert bel_pin_test['wire'] == wire_name, (bel_pin_test['wire'], wire_name)
bel_pins_tested += 1
print('Tested {} pips and {} bel pins'.format(pips_tested, bel_pins_tested))
check_arch_api(ctx)
|
<commit_before><commit_msg>Add simple python file for doing Arch API sanity checks.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com><commit_after>
|
""" Script to do Arch API sanity checking.
This python script can be used to do some sanity checking of either wire to
wire connectivity or bel pin wire connectivity.
Wire to wire connectivity is tested by supplying a source and destination wire
and verifing that a pip exists that connects those wires.
Bel pin wire connectivity is tested by supplying a bel and pin name and the
connected wire.
Invoke in a working directory that contains a file name "test_data.yaml":
${NEXTPNR} --run ${NEXTPNR_SRC}/check_arch_api.py
"test_data.yaml" should contain the test vectors for the wire to wire or bel
pin connectivity tests. Example test_data.yaml:
pip_test:
- src_wire: CLBLM_R_X11Y93/CLBLM_L_D3
dst_wire: SLICE_X15Y93.SLICEL/D3
bel_pin_test:
- bel: SLICE_X15Y93.SLICEL/D6LUT
pin: A3
wire: SLICE_X15Y93.SLICEL/D3
"""
import yaml
def check_arch_api(ctx):
pips_tested = 0
bel_pins_tested = 0
with open('test_data.yaml', 'r') as f:
test_data = yaml.safe_load(f.read())
if 'pip_test' in test_data:
for pip_test in test_data['pip_test']:
pip = None
for pip_name in ctx.getPipsDownhill(pip_test['src_wire']):
if ctx.getPipDstWire(pip_name) == pip_test['dst_wire']:
pip = pip_name
src_wire = ctx.getPipSrcWire(pip_name)
assert src_wire == pip_test['src_wire'], (
src_wire, pip_test['src_wire'])
assert pip is not None
pips_tested += 1
if 'bel_pin_test' in test_data:
for bel_pin_test in test_data['bel_pin_test']:
wire_name = ctx.getBelPinWire(bel_pin_test['bel'], bel_pin_test['pin'])
assert bel_pin_test['wire'] == wire_name, (bel_pin_test['wire'], wire_name)
bel_pins_tested += 1
print('Tested {} pips and {} bel pins'.format(pips_tested, bel_pins_tested))
check_arch_api(ctx)
|
Add simple python file for doing Arch API sanity checks.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com>""" Script to do Arch API sanity checking.
This python script can be used to do some sanity checking of either wire to
wire connectivity or bel pin wire connectivity.
Wire to wire connectivity is tested by supplying a source and destination wire
and verifing that a pip exists that connects those wires.
Bel pin wire connectivity is tested by supplying a bel and pin name and the
connected wire.
Invoke in a working directory that contains a file name "test_data.yaml":
${NEXTPNR} --run ${NEXTPNR_SRC}/check_arch_api.py
"test_data.yaml" should contain the test vectors for the wire to wire or bel
pin connectivity tests. Example test_data.yaml:
pip_test:
- src_wire: CLBLM_R_X11Y93/CLBLM_L_D3
dst_wire: SLICE_X15Y93.SLICEL/D3
bel_pin_test:
- bel: SLICE_X15Y93.SLICEL/D6LUT
pin: A3
wire: SLICE_X15Y93.SLICEL/D3
"""
import yaml
def check_arch_api(ctx):
pips_tested = 0
bel_pins_tested = 0
with open('test_data.yaml', 'r') as f:
test_data = yaml.safe_load(f.read())
if 'pip_test' in test_data:
for pip_test in test_data['pip_test']:
pip = None
for pip_name in ctx.getPipsDownhill(pip_test['src_wire']):
if ctx.getPipDstWire(pip_name) == pip_test['dst_wire']:
pip = pip_name
src_wire = ctx.getPipSrcWire(pip_name)
assert src_wire == pip_test['src_wire'], (
src_wire, pip_test['src_wire'])
assert pip is not None
pips_tested += 1
if 'bel_pin_test' in test_data:
for bel_pin_test in test_data['bel_pin_test']:
wire_name = ctx.getBelPinWire(bel_pin_test['bel'], bel_pin_test['pin'])
assert bel_pin_test['wire'] == wire_name, (bel_pin_test['wire'], wire_name)
bel_pins_tested += 1
print('Tested {} pips and {} bel pins'.format(pips_tested, bel_pins_tested))
check_arch_api(ctx)
|
<commit_before><commit_msg>Add simple python file for doing Arch API sanity checks.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com><commit_after>""" Script to do Arch API sanity checking.
This python script can be used to do some sanity checking of either wire to
wire connectivity or bel pin wire connectivity.
Wire to wire connectivity is tested by supplying a source and destination wire
and verifing that a pip exists that connects those wires.
Bel pin wire connectivity is tested by supplying a bel and pin name and the
connected wire.
Invoke in a working directory that contains a file name "test_data.yaml":
${NEXTPNR} --run ${NEXTPNR_SRC}/check_arch_api.py
"test_data.yaml" should contain the test vectors for the wire to wire or bel
pin connectivity tests. Example test_data.yaml:
pip_test:
- src_wire: CLBLM_R_X11Y93/CLBLM_L_D3
dst_wire: SLICE_X15Y93.SLICEL/D3
bel_pin_test:
- bel: SLICE_X15Y93.SLICEL/D6LUT
pin: A3
wire: SLICE_X15Y93.SLICEL/D3
"""
import yaml
def check_arch_api(ctx):
pips_tested = 0
bel_pins_tested = 0
with open('test_data.yaml', 'r') as f:
test_data = yaml.safe_load(f.read())
if 'pip_test' in test_data:
for pip_test in test_data['pip_test']:
pip = None
for pip_name in ctx.getPipsDownhill(pip_test['src_wire']):
if ctx.getPipDstWire(pip_name) == pip_test['dst_wire']:
pip = pip_name
src_wire = ctx.getPipSrcWire(pip_name)
assert src_wire == pip_test['src_wire'], (
src_wire, pip_test['src_wire'])
assert pip is not None
pips_tested += 1
if 'bel_pin_test' in test_data:
for bel_pin_test in test_data['bel_pin_test']:
wire_name = ctx.getBelPinWire(bel_pin_test['bel'], bel_pin_test['pin'])
assert bel_pin_test['wire'] == wire_name, (bel_pin_test['wire'], wire_name)
bel_pins_tested += 1
print('Tested {} pips and {} bel pins'.format(pips_tested, bel_pins_tested))
check_arch_api(ctx)
|
|
49da24fcd637735311810ffc531da757bb3f8666
|
scripts/monitoring/cron-send-node-taints-status.py
|
scripts/monitoring/cron-send-node-taints-status.py
|
#!/usr/bin/env python
""" Node taints check for OpenShift V3 """
# Adding the ignore because it does not like the naming of the script
# to be different than the class name
# pylint: disable=invalid-name
# pylint: disable=wrong-import-position
# pylint: disable=broad-except
# pylint: disable=line-too-long
import argparse
import time
import logging
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.WARN)
# Our jenkins server does not include these rpms.
# In the future we might move this to a container where these
# libs might exist
#pylint: disable=import-error
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
ocutil = OCUtil()
def runOCcmd_yaml(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
ocy_time = time.time()
ocy_result = ocutil.run_user_cmd_yaml(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - ocy_time))
return ocy_result
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Check all the nodes taints Status')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='verbosity level, specify multiple')
args = parser.parse_args()
if args.verbose > 0:
logger.setLevel(logging.INFO)
if args.verbose > 1:
logger.setLevel(logging.DEBUG)
return args
def check_taint_status():
"""get all the info of all node """
result_status = 0
node_info = runOCcmd_yaml("get node ")
for item in node_info['items']:
logger.info("Checking node: %s", item['metadata']['name'])
if "taints" in item['spec']:
taints = item['spec']['taints']
for taint in taints:
result_status = result_status + 1
logger.warn("Node: %s, have unexpected taint: %s=%s:%s", item['metadata']['name'], taint['key'], taint['value'], taint['effect'])
return result_status
def main():
""" check all the node taints see if any node have problem """
args = parse_args()
logger.debug("args: ")
logger.debug(args)
taint_status = check_taint_status()
#send the value to zabbix
mts = MetricSender(verbose=args.verbose)
mts.add_metric({'openshift.nodes.taint.status': taint_status})
mts.send_metrics()
if __name__ == "__main__":
main()
|
Add monitor script for taints
|
Add monitor script for taints
|
Python
|
apache-2.0
|
drewandersonnz/openshift-tools,openshift/openshift-tools,blrm/openshift-tools,drewandersonnz/openshift-tools,blrm/openshift-tools,blrm/openshift-tools,drewandersonnz/openshift-tools,blrm/openshift-tools,openshift/openshift-tools,drewandersonnz/openshift-tools,drewandersonnz/openshift-tools,blrm/openshift-tools,openshift/openshift-tools,openshift/openshift-tools,blrm/openshift-tools,openshift/openshift-tools,drewandersonnz/openshift-tools,openshift/openshift-tools
|
Add monitor script for taints
|
#!/usr/bin/env python
""" Node taints check for OpenShift V3 """
# Adding the ignore because it does not like the naming of the script
# to be different than the class name
# pylint: disable=invalid-name
# pylint: disable=wrong-import-position
# pylint: disable=broad-except
# pylint: disable=line-too-long
import argparse
import time
import logging
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.WARN)
# Our jenkins server does not include these rpms.
# In the future we might move this to a container where these
# libs might exist
#pylint: disable=import-error
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
ocutil = OCUtil()
def runOCcmd_yaml(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
ocy_time = time.time()
ocy_result = ocutil.run_user_cmd_yaml(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - ocy_time))
return ocy_result
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Check all the nodes taints Status')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='verbosity level, specify multiple')
args = parser.parse_args()
if args.verbose > 0:
logger.setLevel(logging.INFO)
if args.verbose > 1:
logger.setLevel(logging.DEBUG)
return args
def check_taint_status():
"""get all the info of all node """
result_status = 0
node_info = runOCcmd_yaml("get node ")
for item in node_info['items']:
logger.info("Checking node: %s", item['metadata']['name'])
if "taints" in item['spec']:
taints = item['spec']['taints']
for taint in taints:
result_status = result_status + 1
logger.warn("Node: %s, have unexpected taint: %s=%s:%s", item['metadata']['name'], taint['key'], taint['value'], taint['effect'])
return result_status
def main():
""" check all the node taints see if any node have problem """
args = parse_args()
logger.debug("args: ")
logger.debug(args)
taint_status = check_taint_status()
#send the value to zabbix
mts = MetricSender(verbose=args.verbose)
mts.add_metric({'openshift.nodes.taint.status': taint_status})
mts.send_metrics()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add monitor script for taints<commit_after>
|
#!/usr/bin/env python
""" Node taints check for OpenShift V3 """
# Adding the ignore because it does not like the naming of the script
# to be different than the class name
# pylint: disable=invalid-name
# pylint: disable=wrong-import-position
# pylint: disable=broad-except
# pylint: disable=line-too-long
import argparse
import time
import logging
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.WARN)
# Our jenkins server does not include these rpms.
# In the future we might move this to a container where these
# libs might exist
#pylint: disable=import-error
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
ocutil = OCUtil()
def runOCcmd_yaml(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
ocy_time = time.time()
ocy_result = ocutil.run_user_cmd_yaml(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - ocy_time))
return ocy_result
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Check all the nodes taints Status')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='verbosity level, specify multiple')
args = parser.parse_args()
if args.verbose > 0:
logger.setLevel(logging.INFO)
if args.verbose > 1:
logger.setLevel(logging.DEBUG)
return args
def check_taint_status():
"""get all the info of all node """
result_status = 0
node_info = runOCcmd_yaml("get node ")
for item in node_info['items']:
logger.info("Checking node: %s", item['metadata']['name'])
if "taints" in item['spec']:
taints = item['spec']['taints']
for taint in taints:
result_status = result_status + 1
logger.warn("Node: %s, have unexpected taint: %s=%s:%s", item['metadata']['name'], taint['key'], taint['value'], taint['effect'])
return result_status
def main():
""" check all the node taints see if any node have problem """
args = parse_args()
logger.debug("args: ")
logger.debug(args)
taint_status = check_taint_status()
#send the value to zabbix
mts = MetricSender(verbose=args.verbose)
mts.add_metric({'openshift.nodes.taint.status': taint_status})
mts.send_metrics()
if __name__ == "__main__":
main()
|
Add monitor script for taints#!/usr/bin/env python
""" Node taints check for OpenShift V3 """
# Adding the ignore because it does not like the naming of the script
# to be different than the class name
# pylint: disable=invalid-name
# pylint: disable=wrong-import-position
# pylint: disable=broad-except
# pylint: disable=line-too-long
import argparse
import time
import logging
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.WARN)
# Our jenkins server does not include these rpms.
# In the future we might move this to a container where these
# libs might exist
#pylint: disable=import-error
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
ocutil = OCUtil()
def runOCcmd_yaml(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
ocy_time = time.time()
ocy_result = ocutil.run_user_cmd_yaml(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - ocy_time))
return ocy_result
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Check all the nodes taints Status')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='verbosity level, specify multiple')
args = parser.parse_args()
if args.verbose > 0:
logger.setLevel(logging.INFO)
if args.verbose > 1:
logger.setLevel(logging.DEBUG)
return args
def check_taint_status():
"""get all the info of all node """
result_status = 0
node_info = runOCcmd_yaml("get node ")
for item in node_info['items']:
logger.info("Checking node: %s", item['metadata']['name'])
if "taints" in item['spec']:
taints = item['spec']['taints']
for taint in taints:
result_status = result_status + 1
logger.warn("Node: %s, have unexpected taint: %s=%s:%s", item['metadata']['name'], taint['key'], taint['value'], taint['effect'])
return result_status
def main():
""" check all the node taints see if any node have problem """
args = parse_args()
logger.debug("args: ")
logger.debug(args)
taint_status = check_taint_status()
#send the value to zabbix
mts = MetricSender(verbose=args.verbose)
mts.add_metric({'openshift.nodes.taint.status': taint_status})
mts.send_metrics()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add monitor script for taints<commit_after>#!/usr/bin/env python
""" Node taints check for OpenShift V3 """
# Adding the ignore because it does not like the naming of the script
# to be different than the class name
# pylint: disable=invalid-name
# pylint: disable=wrong-import-position
# pylint: disable=broad-except
# pylint: disable=line-too-long
import argparse
import time
import logging
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.WARN)
# Our jenkins server does not include these rpms.
# In the future we might move this to a container where these
# libs might exist
#pylint: disable=import-error
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
ocutil = OCUtil()
def runOCcmd_yaml(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
ocy_time = time.time()
ocy_result = ocutil.run_user_cmd_yaml(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - ocy_time))
return ocy_result
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Check all the nodes taints Status')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='verbosity level, specify multiple')
args = parser.parse_args()
if args.verbose > 0:
logger.setLevel(logging.INFO)
if args.verbose > 1:
logger.setLevel(logging.DEBUG)
return args
def check_taint_status():
"""get all the info of all node """
result_status = 0
node_info = runOCcmd_yaml("get node ")
for item in node_info['items']:
logger.info("Checking node: %s", item['metadata']['name'])
if "taints" in item['spec']:
taints = item['spec']['taints']
for taint in taints:
result_status = result_status + 1
logger.warn("Node: %s, have unexpected taint: %s=%s:%s", item['metadata']['name'], taint['key'], taint['value'], taint['effect'])
return result_status
def main():
""" check all the node taints see if any node have problem """
args = parse_args()
logger.debug("args: ")
logger.debug(args)
taint_status = check_taint_status()
#send the value to zabbix
mts = MetricSender(verbose=args.verbose)
mts.add_metric({'openshift.nodes.taint.status': taint_status})
mts.send_metrics()
if __name__ == "__main__":
main()
|
|
f263fc11503fd774eff63d8ebd716fe3c2b61218
|
iatidataquality/users.py
|
iatidataquality/users.py
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file
from flask.ext.sqlalchemy import SQLAlchemy
from iatidataquality import app
from iatidataquality import db
import usermanagement
from iatidq import dqusers
import unicodecsv
@app.route("/users/")
@app.route("/user/<username>/")
@usermanagement.perms_required()
def users(username=None):
if username:
user=dqusers.user_by_username(username)
return render_template("user.html", user=user)
else:
users=dqusers.user()
return render_template("users.html", users=users)
@app.route("/users/new/", methods=['POST', 'GET'])
@app.route("/users/<username>/edit/", methods=['POST', 'GET'])
@usermanagement.perms_required()
def users_edit(username=None):
if username:
user = dqusers.user_by_username(username)
if request.method == 'POST':
return "handling edit"
if aggregationtype:
flash('Successfully updated user.', 'success')
else:
aggregationtype = {}
flash('Could not update user.', 'error')
else:
if request.method == 'POST':
user = dqusers.addUser({
'username': request.form['username'],
'password': request.form['password'],
'name': request.form['name'],
'email_address': request.form['email_address']
})
if user:
flash('Successfully added new user', 'success')
else:
flash('Could not add user user', 'error')
else:
user = {}
return render_template("users_edit.html",
user=user)
|
Add basic user management controller
|
Add basic user management controller
|
Python
|
agpl-3.0
|
pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality
|
Add basic user management controller
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file
from flask.ext.sqlalchemy import SQLAlchemy
from iatidataquality import app
from iatidataquality import db
import usermanagement
from iatidq import dqusers
import unicodecsv
@app.route("/users/")
@app.route("/user/<username>/")
@usermanagement.perms_required()
def users(username=None):
if username:
user=dqusers.user_by_username(username)
return render_template("user.html", user=user)
else:
users=dqusers.user()
return render_template("users.html", users=users)
@app.route("/users/new/", methods=['POST', 'GET'])
@app.route("/users/<username>/edit/", methods=['POST', 'GET'])
@usermanagement.perms_required()
def users_edit(username=None):
if username:
user = dqusers.user_by_username(username)
if request.method == 'POST':
return "handling edit"
if aggregationtype:
flash('Successfully updated user.', 'success')
else:
aggregationtype = {}
flash('Could not update user.', 'error')
else:
if request.method == 'POST':
user = dqusers.addUser({
'username': request.form['username'],
'password': request.form['password'],
'name': request.form['name'],
'email_address': request.form['email_address']
})
if user:
flash('Successfully added new user', 'success')
else:
flash('Could not add user user', 'error')
else:
user = {}
return render_template("users_edit.html",
user=user)
|
<commit_before><commit_msg>Add basic user management controller<commit_after>
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file
from flask.ext.sqlalchemy import SQLAlchemy
from iatidataquality import app
from iatidataquality import db
import usermanagement
from iatidq import dqusers
import unicodecsv
@app.route("/users/")
@app.route("/user/<username>/")
@usermanagement.perms_required()
def users(username=None):
if username:
user=dqusers.user_by_username(username)
return render_template("user.html", user=user)
else:
users=dqusers.user()
return render_template("users.html", users=users)
@app.route("/users/new/", methods=['POST', 'GET'])
@app.route("/users/<username>/edit/", methods=['POST', 'GET'])
@usermanagement.perms_required()
def users_edit(username=None):
if username:
user = dqusers.user_by_username(username)
if request.method == 'POST':
return "handling edit"
if aggregationtype:
flash('Successfully updated user.', 'success')
else:
aggregationtype = {}
flash('Could not update user.', 'error')
else:
if request.method == 'POST':
user = dqusers.addUser({
'username': request.form['username'],
'password': request.form['password'],
'name': request.form['name'],
'email_address': request.form['email_address']
})
if user:
flash('Successfully added new user', 'success')
else:
flash('Could not add user user', 'error')
else:
user = {}
return render_template("users_edit.html",
user=user)
|
Add basic user management controller
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file
from flask.ext.sqlalchemy import SQLAlchemy
from iatidataquality import app
from iatidataquality import db
import usermanagement
from iatidq import dqusers
import unicodecsv
@app.route("/users/")
@app.route("/user/<username>/")
@usermanagement.perms_required()
def users(username=None):
if username:
user=dqusers.user_by_username(username)
return render_template("user.html", user=user)
else:
users=dqusers.user()
return render_template("users.html", users=users)
@app.route("/users/new/", methods=['POST', 'GET'])
@app.route("/users/<username>/edit/", methods=['POST', 'GET'])
@usermanagement.perms_required()
def users_edit(username=None):
if username:
user = dqusers.user_by_username(username)
if request.method == 'POST':
return "handling edit"
if aggregationtype:
flash('Successfully updated user.', 'success')
else:
aggregationtype = {}
flash('Could not update user.', 'error')
else:
if request.method == 'POST':
user = dqusers.addUser({
'username': request.form['username'],
'password': request.form['password'],
'name': request.form['name'],
'email_address': request.form['email_address']
})
if user:
flash('Successfully added new user', 'success')
else:
flash('Could not add user user', 'error')
else:
user = {}
return render_template("users_edit.html",
user=user)
|
<commit_before><commit_msg>Add basic user management controller<commit_after>
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file
from flask.ext.sqlalchemy import SQLAlchemy
from iatidataquality import app
from iatidataquality import db
import usermanagement
from iatidq import dqusers
import unicodecsv
@app.route("/users/")
@app.route("/user/<username>/")
@usermanagement.perms_required()
def users(username=None):
if username:
user=dqusers.user_by_username(username)
return render_template("user.html", user=user)
else:
users=dqusers.user()
return render_template("users.html", users=users)
@app.route("/users/new/", methods=['POST', 'GET'])
@app.route("/users/<username>/edit/", methods=['POST', 'GET'])
@usermanagement.perms_required()
def users_edit(username=None):
if username:
user = dqusers.user_by_username(username)
if request.method == 'POST':
return "handling edit"
if aggregationtype:
flash('Successfully updated user.', 'success')
else:
aggregationtype = {}
flash('Could not update user.', 'error')
else:
if request.method == 'POST':
user = dqusers.addUser({
'username': request.form['username'],
'password': request.form['password'],
'name': request.form['name'],
'email_address': request.form['email_address']
})
if user:
flash('Successfully added new user', 'success')
else:
flash('Could not add user user', 'error')
else:
user = {}
return render_template("users_edit.html",
user=user)
|
|
1dab0e7ae96dcf14ee5edfcb88d1af08d327345e
|
jacquard/experiments/tests/test_specialise_constraints.py
|
jacquard/experiments/tests/test_specialise_constraints.py
|
from unittest.mock import Mock
from jacquard.cli import main
from jacquard.storage import DummyStore
DUMMY_DATA_PRE_LAUNCH = {
'experiments/foo': {
'branches': [
{'id': 'bar', 'settings': {'key': 'value'}},
],
'constraints': {
'era': 'new',
},
},
}
def test_constraints_are_specialised_on_launch():
config = Mock()
config.storage = DummyStore('', data=DUMMY_DATA_PRE_LAUNCH)
main(('launch', 'foo'), config=config)
bucket_zero = config.storage['buckets/0']
(name, settings, constraints) = bucket_zero['entries'][0]
assert 'era' not in constraints
|
Add a test that proves that constraints are not specialised
|
Add a test that proves that constraints are not specialised
|
Python
|
mit
|
prophile/jacquard,prophile/jacquard
|
Add a test that proves that constraints are not specialised
|
from unittest.mock import Mock
from jacquard.cli import main
from jacquard.storage import DummyStore
DUMMY_DATA_PRE_LAUNCH = {
'experiments/foo': {
'branches': [
{'id': 'bar', 'settings': {'key': 'value'}},
],
'constraints': {
'era': 'new',
},
},
}
def test_constraints_are_specialised_on_launch():
config = Mock()
config.storage = DummyStore('', data=DUMMY_DATA_PRE_LAUNCH)
main(('launch', 'foo'), config=config)
bucket_zero = config.storage['buckets/0']
(name, settings, constraints) = bucket_zero['entries'][0]
assert 'era' not in constraints
|
<commit_before><commit_msg>Add a test that proves that constraints are not specialised<commit_after>
|
from unittest.mock import Mock
from jacquard.cli import main
from jacquard.storage import DummyStore
DUMMY_DATA_PRE_LAUNCH = {
'experiments/foo': {
'branches': [
{'id': 'bar', 'settings': {'key': 'value'}},
],
'constraints': {
'era': 'new',
},
},
}
def test_constraints_are_specialised_on_launch():
config = Mock()
config.storage = DummyStore('', data=DUMMY_DATA_PRE_LAUNCH)
main(('launch', 'foo'), config=config)
bucket_zero = config.storage['buckets/0']
(name, settings, constraints) = bucket_zero['entries'][0]
assert 'era' not in constraints
|
Add a test that proves that constraints are not specialisedfrom unittest.mock import Mock
from jacquard.cli import main
from jacquard.storage import DummyStore
DUMMY_DATA_PRE_LAUNCH = {
'experiments/foo': {
'branches': [
{'id': 'bar', 'settings': {'key': 'value'}},
],
'constraints': {
'era': 'new',
},
},
}
def test_constraints_are_specialised_on_launch():
config = Mock()
config.storage = DummyStore('', data=DUMMY_DATA_PRE_LAUNCH)
main(('launch', 'foo'), config=config)
bucket_zero = config.storage['buckets/0']
(name, settings, constraints) = bucket_zero['entries'][0]
assert 'era' not in constraints
|
<commit_before><commit_msg>Add a test that proves that constraints are not specialised<commit_after>from unittest.mock import Mock
from jacquard.cli import main
from jacquard.storage import DummyStore
DUMMY_DATA_PRE_LAUNCH = {
'experiments/foo': {
'branches': [
{'id': 'bar', 'settings': {'key': 'value'}},
],
'constraints': {
'era': 'new',
},
},
}
def test_constraints_are_specialised_on_launch():
config = Mock()
config.storage = DummyStore('', data=DUMMY_DATA_PRE_LAUNCH)
main(('launch', 'foo'), config=config)
bucket_zero = config.storage['buckets/0']
(name, settings, constraints) = bucket_zero['entries'][0]
assert 'era' not in constraints
|
|
b84c137c131ad309997bd77e47b929cfdcf2eb3c
|
Week01/Problem03/cyu_03.py
|
Week01/Problem03/cyu_03.py
|
#!/usr/bin/env python3
"""This script is written by Chuanping Yu, on Jul 24, 2017,
for the Assignment#1 in IDEaS workshop"""
#Problem 3
I = 3
N = 600851475143
F = []
while I*I <= N:
if N%I == 0:
N = int(N/I)
F.append(I)
else:
I = I + 1
if N > 1:
F.append(N)
print(F)
|
Add Chuanping Yu's solutions to Problem03
|
Add Chuanping Yu's solutions to Problem03
|
Python
|
bsd-3-clause
|
GT-IDEaS/SkillsWorkshop2017,GT-IDEaS/SkillsWorkshop2017,GT-IDEaS/SkillsWorkshop2017
|
Add Chuanping Yu's solutions to Problem03
|
#!/usr/bin/env python3
"""This script is written by Chuanping Yu, on Jul 24, 2017,
for the Assignment#1 in IDEaS workshop"""
#Problem 3
I = 3
N = 600851475143
F = []
while I*I <= N:
if N%I == 0:
N = int(N/I)
F.append(I)
else:
I = I + 1
if N > 1:
F.append(N)
print(F)
|
<commit_before><commit_msg>Add Chuanping Yu's solutions to Problem03<commit_after>
|
#!/usr/bin/env python3
"""This script is written by Chuanping Yu, on Jul 24, 2017,
for the Assignment#1 in IDEaS workshop"""
#Problem 3
I = 3
N = 600851475143
F = []
while I*I <= N:
if N%I == 0:
N = int(N/I)
F.append(I)
else:
I = I + 1
if N > 1:
F.append(N)
print(F)
|
Add Chuanping Yu's solutions to Problem03#!/usr/bin/env python3
"""This script is written by Chuanping Yu, on Jul 24, 2017,
for the Assignment#1 in IDEaS workshop"""
#Problem 3
I = 3
N = 600851475143
F = []
while I*I <= N:
if N%I == 0:
N = int(N/I)
F.append(I)
else:
I = I + 1
if N > 1:
F.append(N)
print(F)
|
<commit_before><commit_msg>Add Chuanping Yu's solutions to Problem03<commit_after>#!/usr/bin/env python3
"""This script is written by Chuanping Yu, on Jul 24, 2017,
for the Assignment#1 in IDEaS workshop"""
#Problem 3
I = 3
N = 600851475143
F = []
while I*I <= N:
if N%I == 0:
N = int(N/I)
F.append(I)
else:
I = I + 1
if N > 1:
F.append(N)
print(F)
|
|
9f47f2174215eb49f3060669db66c7145e2ee7e5
|
tests/qtgui/qgraphicsitem_isblocked_test.py
|
tests/qtgui/qgraphicsitem_isblocked_test.py
|
#!/usr/bin/python
import unittest
from PySide import QtGui, QtCore
from helper import UsesQApplication
class Item(QtGui.QGraphicsItem):
def __init__(self):
QtGui.QGraphicsItem.__init__(self)
def boundingRect(self):
return QtCore.QRectF(0, 0, 100, 100)
def paint(self, painter, option, widget):
painter.setBrush(QtGui.QColor(255, 255, 255))
painter.drawRect(0, 0, 100, 100)
class QGraphicsViewIsBlockedTest(UsesQApplication):
def testIsBlockedByModalPanel(self):
item = Item()
item.isBlockedByModalPanel()
if __name__ == "__main__":
unittest.main()
|
Add unit test for QGraphicsItem.isBlockedByModalPanel()
|
Add unit test for QGraphicsItem.isBlockedByModalPanel()
|
Python
|
lgpl-2.1
|
gbaty/pyside2,qtproject/pyside-pyside,pankajp/pyside,RobinD42/pyside,enthought/pyside,BadSingleton/pyside2,pankajp/pyside,M4rtinK/pyside-bb10,BadSingleton/pyside2,M4rtinK/pyside-android,PySide/PySide,gbaty/pyside2,RobinD42/pyside,qtproject/pyside-pyside,M4rtinK/pyside-bb10,IronManMark20/pyside2,BadSingleton/pyside2,M4rtinK/pyside-bb10,RobinD42/pyside,PySide/PySide,gbaty/pyside2,M4rtinK/pyside-android,pankajp/pyside,M4rtinK/pyside-android,pankajp/pyside,M4rtinK/pyside-android,IronManMark20/pyside2,enthought/pyside,RobinD42/pyside,BadSingleton/pyside2,RobinD42/pyside,enthought/pyside,M4rtinK/pyside-bb10,RobinD42/pyside,IronManMark20/pyside2,enthought/pyside,qtproject/pyside-pyside,pankajp/pyside,IronManMark20/pyside2,enthought/pyside,BadSingleton/pyside2,enthought/pyside,enthought/pyside,PySide/PySide,qtproject/pyside-pyside,PySide/PySide,IronManMark20/pyside2,gbaty/pyside2,RobinD42/pyside,M4rtinK/pyside-android,M4rtinK/pyside-bb10,M4rtinK/pyside-bb10,qtproject/pyside-pyside,gbaty/pyside2,PySide/PySide,M4rtinK/pyside-android
|
Add unit test for QGraphicsItem.isBlockedByModalPanel()
|
#!/usr/bin/python
import unittest
from PySide import QtGui, QtCore
from helper import UsesQApplication
class Item(QtGui.QGraphicsItem):
def __init__(self):
QtGui.QGraphicsItem.__init__(self)
def boundingRect(self):
return QtCore.QRectF(0, 0, 100, 100)
def paint(self, painter, option, widget):
painter.setBrush(QtGui.QColor(255, 255, 255))
painter.drawRect(0, 0, 100, 100)
class QGraphicsViewIsBlockedTest(UsesQApplication):
def testIsBlockedByModalPanel(self):
item = Item()
item.isBlockedByModalPanel()
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit test for QGraphicsItem.isBlockedByModalPanel()<commit_after>
|
#!/usr/bin/python
import unittest
from PySide import QtGui, QtCore
from helper import UsesQApplication
class Item(QtGui.QGraphicsItem):
def __init__(self):
QtGui.QGraphicsItem.__init__(self)
def boundingRect(self):
return QtCore.QRectF(0, 0, 100, 100)
def paint(self, painter, option, widget):
painter.setBrush(QtGui.QColor(255, 255, 255))
painter.drawRect(0, 0, 100, 100)
class QGraphicsViewIsBlockedTest(UsesQApplication):
def testIsBlockedByModalPanel(self):
item = Item()
item.isBlockedByModalPanel()
if __name__ == "__main__":
unittest.main()
|
Add unit test for QGraphicsItem.isBlockedByModalPanel()#!/usr/bin/python
import unittest
from PySide import QtGui, QtCore
from helper import UsesQApplication
class Item(QtGui.QGraphicsItem):
def __init__(self):
QtGui.QGraphicsItem.__init__(self)
def boundingRect(self):
return QtCore.QRectF(0, 0, 100, 100)
def paint(self, painter, option, widget):
painter.setBrush(QtGui.QColor(255, 255, 255))
painter.drawRect(0, 0, 100, 100)
class QGraphicsViewIsBlockedTest(UsesQApplication):
def testIsBlockedByModalPanel(self):
item = Item()
item.isBlockedByModalPanel()
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit test for QGraphicsItem.isBlockedByModalPanel()<commit_after>#!/usr/bin/python
import unittest
from PySide import QtGui, QtCore
from helper import UsesQApplication
class Item(QtGui.QGraphicsItem):
def __init__(self):
QtGui.QGraphicsItem.__init__(self)
def boundingRect(self):
return QtCore.QRectF(0, 0, 100, 100)
def paint(self, painter, option, widget):
painter.setBrush(QtGui.QColor(255, 255, 255))
painter.drawRect(0, 0, 100, 100)
class QGraphicsViewIsBlockedTest(UsesQApplication):
def testIsBlockedByModalPanel(self):
item = Item()
item.isBlockedByModalPanel()
if __name__ == "__main__":
unittest.main()
|
|
348171aa88ff4c7117613491bb98368dd0053778
|
tests/test_dockerizer/test_docker_images.py
|
tests/test_dockerizer/test_docker_images.py
|
import pytest
from django.conf import settings
from docker_images.image_info import get_image_name, get_image_info, get_tagged_image
from factories.factory_build_jobs import BuildJobFactory
from tests.utils import BaseTest
@pytest.mark.dockerizer_mark
class TestDockerImageInfo(BaseTest):
def setUp(self):
super().setUp()
self.build_job = BuildJobFactory()
def test_get_image_name(self):
image_name = get_image_name(self.build_job)
expected_name = '{}/{}'.format(settings.REGISTRY_HOST, self.build_job.project.name)
assert image_name == expected_name
def test_get_image_image_info(self):
image_info = get_image_info(self.build_job)
assert image_info[0] == get_image_name(self.build_job)
assert image_info[1] == self.build_job.uuid.hex
def test_get_tagged_image(self):
tagged_image = get_tagged_image(self.build_job)
image_name = get_image_info(self.build_job)
assert tagged_image == ':'.join(image_name)
|
Add docker image info tests
|
Add docker image info tests
|
Python
|
apache-2.0
|
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
|
Add docker image info tests
|
import pytest
from django.conf import settings
from docker_images.image_info import get_image_name, get_image_info, get_tagged_image
from factories.factory_build_jobs import BuildJobFactory
from tests.utils import BaseTest
@pytest.mark.dockerizer_mark
class TestDockerImageInfo(BaseTest):
def setUp(self):
super().setUp()
self.build_job = BuildJobFactory()
def test_get_image_name(self):
image_name = get_image_name(self.build_job)
expected_name = '{}/{}'.format(settings.REGISTRY_HOST, self.build_job.project.name)
assert image_name == expected_name
def test_get_image_image_info(self):
image_info = get_image_info(self.build_job)
assert image_info[0] == get_image_name(self.build_job)
assert image_info[1] == self.build_job.uuid.hex
def test_get_tagged_image(self):
tagged_image = get_tagged_image(self.build_job)
image_name = get_image_info(self.build_job)
assert tagged_image == ':'.join(image_name)
|
<commit_before><commit_msg>Add docker image info tests<commit_after>
|
import pytest
from django.conf import settings
from docker_images.image_info import get_image_name, get_image_info, get_tagged_image
from factories.factory_build_jobs import BuildJobFactory
from tests.utils import BaseTest
@pytest.mark.dockerizer_mark
class TestDockerImageInfo(BaseTest):
def setUp(self):
super().setUp()
self.build_job = BuildJobFactory()
def test_get_image_name(self):
image_name = get_image_name(self.build_job)
expected_name = '{}/{}'.format(settings.REGISTRY_HOST, self.build_job.project.name)
assert image_name == expected_name
def test_get_image_image_info(self):
image_info = get_image_info(self.build_job)
assert image_info[0] == get_image_name(self.build_job)
assert image_info[1] == self.build_job.uuid.hex
def test_get_tagged_image(self):
tagged_image = get_tagged_image(self.build_job)
image_name = get_image_info(self.build_job)
assert tagged_image == ':'.join(image_name)
|
Add docker image info testsimport pytest
from django.conf import settings
from docker_images.image_info import get_image_name, get_image_info, get_tagged_image
from factories.factory_build_jobs import BuildJobFactory
from tests.utils import BaseTest
@pytest.mark.dockerizer_mark
class TestDockerImageInfo(BaseTest):
def setUp(self):
super().setUp()
self.build_job = BuildJobFactory()
def test_get_image_name(self):
image_name = get_image_name(self.build_job)
expected_name = '{}/{}'.format(settings.REGISTRY_HOST, self.build_job.project.name)
assert image_name == expected_name
def test_get_image_image_info(self):
image_info = get_image_info(self.build_job)
assert image_info[0] == get_image_name(self.build_job)
assert image_info[1] == self.build_job.uuid.hex
def test_get_tagged_image(self):
tagged_image = get_tagged_image(self.build_job)
image_name = get_image_info(self.build_job)
assert tagged_image == ':'.join(image_name)
|
<commit_before><commit_msg>Add docker image info tests<commit_after>import pytest
from django.conf import settings
from docker_images.image_info import get_image_name, get_image_info, get_tagged_image
from factories.factory_build_jobs import BuildJobFactory
from tests.utils import BaseTest
@pytest.mark.dockerizer_mark
class TestDockerImageInfo(BaseTest):
def setUp(self):
super().setUp()
self.build_job = BuildJobFactory()
def test_get_image_name(self):
image_name = get_image_name(self.build_job)
expected_name = '{}/{}'.format(settings.REGISTRY_HOST, self.build_job.project.name)
assert image_name == expected_name
def test_get_image_image_info(self):
image_info = get_image_info(self.build_job)
assert image_info[0] == get_image_name(self.build_job)
assert image_info[1] == self.build_job.uuid.hex
def test_get_tagged_image(self):
tagged_image = get_tagged_image(self.build_job)
image_name = get_image_info(self.build_job)
assert tagged_image == ':'.join(image_name)
|
|
46a88692a4e90ae2fa40a71243c49f0530067bed
|
thinc/tests/integration/test_roundtrip_bytes.py
|
thinc/tests/integration/test_roundtrip_bytes.py
|
from ...neural import Maxout
from ...api import chain
def test_simple_model_roundtrip_bytes():
model = Maxout(5, 10, pieces=2)
model.b += 1
data = model.to_bytes()
model.b -= 1
model = model.from_bytes(data)
assert model.b[0, 0] == 1
def test_multi_model_roundtrip_bytes():
model = chain(Maxout(5, 10, pieces=2), Maxout(2, 3))
model._layers[0].b += 1
model._layers[1].b += 2
data = model.to_bytes()
model._layers[0].b -= 1
model._layers[1].b -= 2
model = model.from_bytes(data)
assert model._layers[0].b[0, 0] == 1
assert model._layers[1].b[0, 0] == 2
def test_multi_model_load_missing_dims():
model = chain(Maxout(5, 10, pieces=2), Maxout(2, 3))
model._layers[0].b += 1
model._layers[1].b += 2
data = model.to_bytes()
model2 = chain(Maxout(5), Maxout())
model2 = model2.from_bytes(data)
assert model2._layers[0].b[0, 0] == 1
assert model2._layers[1].b[0, 0] == 2
|
Add test for byte serialisation
|
Add test for byte serialisation
|
Python
|
mit
|
explosion/thinc,explosion/thinc,spacy-io/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc
|
Add test for byte serialisation
|
from ...neural import Maxout
from ...api import chain
def test_simple_model_roundtrip_bytes():
model = Maxout(5, 10, pieces=2)
model.b += 1
data = model.to_bytes()
model.b -= 1
model = model.from_bytes(data)
assert model.b[0, 0] == 1
def test_multi_model_roundtrip_bytes():
model = chain(Maxout(5, 10, pieces=2), Maxout(2, 3))
model._layers[0].b += 1
model._layers[1].b += 2
data = model.to_bytes()
model._layers[0].b -= 1
model._layers[1].b -= 2
model = model.from_bytes(data)
assert model._layers[0].b[0, 0] == 1
assert model._layers[1].b[0, 0] == 2
def test_multi_model_load_missing_dims():
model = chain(Maxout(5, 10, pieces=2), Maxout(2, 3))
model._layers[0].b += 1
model._layers[1].b += 2
data = model.to_bytes()
model2 = chain(Maxout(5), Maxout())
model2 = model2.from_bytes(data)
assert model2._layers[0].b[0, 0] == 1
assert model2._layers[1].b[0, 0] == 2
|
<commit_before><commit_msg>Add test for byte serialisation<commit_after>
|
from ...neural import Maxout
from ...api import chain
def test_simple_model_roundtrip_bytes():
model = Maxout(5, 10, pieces=2)
model.b += 1
data = model.to_bytes()
model.b -= 1
model = model.from_bytes(data)
assert model.b[0, 0] == 1
def test_multi_model_roundtrip_bytes():
model = chain(Maxout(5, 10, pieces=2), Maxout(2, 3))
model._layers[0].b += 1
model._layers[1].b += 2
data = model.to_bytes()
model._layers[0].b -= 1
model._layers[1].b -= 2
model = model.from_bytes(data)
assert model._layers[0].b[0, 0] == 1
assert model._layers[1].b[0, 0] == 2
def test_multi_model_load_missing_dims():
model = chain(Maxout(5, 10, pieces=2), Maxout(2, 3))
model._layers[0].b += 1
model._layers[1].b += 2
data = model.to_bytes()
model2 = chain(Maxout(5), Maxout())
model2 = model2.from_bytes(data)
assert model2._layers[0].b[0, 0] == 1
assert model2._layers[1].b[0, 0] == 2
|
Add test for byte serialisationfrom ...neural import Maxout
from ...api import chain
def test_simple_model_roundtrip_bytes():
model = Maxout(5, 10, pieces=2)
model.b += 1
data = model.to_bytes()
model.b -= 1
model = model.from_bytes(data)
assert model.b[0, 0] == 1
def test_multi_model_roundtrip_bytes():
model = chain(Maxout(5, 10, pieces=2), Maxout(2, 3))
model._layers[0].b += 1
model._layers[1].b += 2
data = model.to_bytes()
model._layers[0].b -= 1
model._layers[1].b -= 2
model = model.from_bytes(data)
assert model._layers[0].b[0, 0] == 1
assert model._layers[1].b[0, 0] == 2
def test_multi_model_load_missing_dims():
model = chain(Maxout(5, 10, pieces=2), Maxout(2, 3))
model._layers[0].b += 1
model._layers[1].b += 2
data = model.to_bytes()
model2 = chain(Maxout(5), Maxout())
model2 = model2.from_bytes(data)
assert model2._layers[0].b[0, 0] == 1
assert model2._layers[1].b[0, 0] == 2
|
<commit_before><commit_msg>Add test for byte serialisation<commit_after>from ...neural import Maxout
from ...api import chain
def test_simple_model_roundtrip_bytes():
model = Maxout(5, 10, pieces=2)
model.b += 1
data = model.to_bytes()
model.b -= 1
model = model.from_bytes(data)
assert model.b[0, 0] == 1
def test_multi_model_roundtrip_bytes():
model = chain(Maxout(5, 10, pieces=2), Maxout(2, 3))
model._layers[0].b += 1
model._layers[1].b += 2
data = model.to_bytes()
model._layers[0].b -= 1
model._layers[1].b -= 2
model = model.from_bytes(data)
assert model._layers[0].b[0, 0] == 1
assert model._layers[1].b[0, 0] == 2
def test_multi_model_load_missing_dims():
model = chain(Maxout(5, 10, pieces=2), Maxout(2, 3))
model._layers[0].b += 1
model._layers[1].b += 2
data = model.to_bytes()
model2 = chain(Maxout(5), Maxout())
model2 = model2.from_bytes(data)
assert model2._layers[0].b[0, 0] == 1
assert model2._layers[1].b[0, 0] == 2
|
|
871ec80a78ef2caaaea8882e9c2846b064eb7b96
|
trytond_nereid/tests/__init__.py
|
trytond_nereid/tests/__init__.py
|
# -*- coding: utf-8 -*-
"""
__init__
Nereid Tryton module test cases
:copyright: (c) 2011 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
from unittest import TestSuite
from .configuration import suite as configuration_test_suite
from .test_currency import suite as currency_test_suite
from .test_language import suite as language_test_suite
suite = TestSuite()
suite.addTests([
configuration_test_suite,
currency_test_suite,
language_test_suite
])
|
Add a consolidated test suite which could be imported by the tryton test suite
|
Add a consolidated test suite which could be imported by the tryton test suite
|
Python
|
bsd-3-clause
|
fulfilio/nereid,usudaysingh/nereid,prakashpp/nereid,riteshshrv/nereid,riteshshrv/nereid,usudaysingh/nereid,fulfilio/nereid,prakashpp/nereid
|
Add a consolidated test suite which could be imported by the tryton test suite
|
# -*- coding: utf-8 -*-
"""
__init__
Nereid Tryton module test cases
:copyright: (c) 2011 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
from unittest import TestSuite
from .configuration import suite as configuration_test_suite
from .test_currency import suite as currency_test_suite
from .test_language import suite as language_test_suite
suite = TestSuite()
suite.addTests([
configuration_test_suite,
currency_test_suite,
language_test_suite
])
|
<commit_before><commit_msg>Add a consolidated test suite which could be imported by the tryton test suite<commit_after>
|
# -*- coding: utf-8 -*-
"""
__init__
Nereid Tryton module test cases
:copyright: (c) 2011 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
from unittest import TestSuite
from .configuration import suite as configuration_test_suite
from .test_currency import suite as currency_test_suite
from .test_language import suite as language_test_suite
suite = TestSuite()
suite.addTests([
configuration_test_suite,
currency_test_suite,
language_test_suite
])
|
Add a consolidated test suite which could be imported by the tryton test suite# -*- coding: utf-8 -*-
"""
__init__
Nereid Tryton module test cases
:copyright: (c) 2011 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
from unittest import TestSuite
from .configuration import suite as configuration_test_suite
from .test_currency import suite as currency_test_suite
from .test_language import suite as language_test_suite
suite = TestSuite()
suite.addTests([
configuration_test_suite,
currency_test_suite,
language_test_suite
])
|
<commit_before><commit_msg>Add a consolidated test suite which could be imported by the tryton test suite<commit_after># -*- coding: utf-8 -*-
"""
__init__
Nereid Tryton module test cases
:copyright: (c) 2011 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
from unittest import TestSuite
from .configuration import suite as configuration_test_suite
from .test_currency import suite as currency_test_suite
from .test_language import suite as language_test_suite
suite = TestSuite()
suite.addTests([
configuration_test_suite,
currency_test_suite,
language_test_suite
])
|
|
07ecc937e0dec60fa39f614826b985bb90d3cf77
|
brukerTitleScanner.py
|
brukerTitleScanner.py
|
import glob
import sys
import os
#directory = input("Enter Directory: ")
#print("Directory: ", directory)
folder = "/home/benno/Dropbox/RESEARCH/bullet/experiments/H2O17atC60/20170510/H2OC60ODCB20170408/"
#folder = askdirectory()
subdirectories = [x[0] for x in os.walk(folder)]
experiments = set()
for i in subdirectories:
# now add these to a set.
experiments.add(i.split(folder)[1].split("/")[0])
for i in experiments:
try:
pathToTitle = folder + i + '/pdata/1/title'
titleFile = open(pathToTitle, mode='r')
#self.files.append(titleFile)
title = list(titleFile)
titleF = [line.strip() for line in title]
print("\n\n {} \n========================".format(i))
for i in title:
print(i.rstrip())
except:
print("Some Error")
print("No title file.")
|
Add tool for inspection of bruker directories
|
Add tool for inspection of bruker directories
|
Python
|
mit
|
bennomeier/pyNMR,kourk0am/pyNMR
|
Add tool for inspection of bruker directories
|
import glob
import sys
import os
#directory = input("Enter Directory: ")
#print("Directory: ", directory)
folder = "/home/benno/Dropbox/RESEARCH/bullet/experiments/H2O17atC60/20170510/H2OC60ODCB20170408/"
#folder = askdirectory()
subdirectories = [x[0] for x in os.walk(folder)]
experiments = set()
for i in subdirectories:
# now add these to a set.
experiments.add(i.split(folder)[1].split("/")[0])
for i in experiments:
try:
pathToTitle = folder + i + '/pdata/1/title'
titleFile = open(pathToTitle, mode='r')
#self.files.append(titleFile)
title = list(titleFile)
titleF = [line.strip() for line in title]
print("\n\n {} \n========================".format(i))
for i in title:
print(i.rstrip())
except:
print("Some Error")
print("No title file.")
|
<commit_before><commit_msg>Add tool for inspection of bruker directories<commit_after>
|
import glob
import sys
import os
#directory = input("Enter Directory: ")
#print("Directory: ", directory)
folder = "/home/benno/Dropbox/RESEARCH/bullet/experiments/H2O17atC60/20170510/H2OC60ODCB20170408/"
#folder = askdirectory()
subdirectories = [x[0] for x in os.walk(folder)]
experiments = set()
for i in subdirectories:
# now add these to a set.
experiments.add(i.split(folder)[1].split("/")[0])
for i in experiments:
try:
pathToTitle = folder + i + '/pdata/1/title'
titleFile = open(pathToTitle, mode='r')
#self.files.append(titleFile)
title = list(titleFile)
titleF = [line.strip() for line in title]
print("\n\n {} \n========================".format(i))
for i in title:
print(i.rstrip())
except:
print("Some Error")
print("No title file.")
|
Add tool for inspection of bruker directoriesimport glob
import sys
import os
#directory = input("Enter Directory: ")
#print("Directory: ", directory)
folder = "/home/benno/Dropbox/RESEARCH/bullet/experiments/H2O17atC60/20170510/H2OC60ODCB20170408/"
#folder = askdirectory()
subdirectories = [x[0] for x in os.walk(folder)]
experiments = set()
for i in subdirectories:
# now add these to a set.
experiments.add(i.split(folder)[1].split("/")[0])
for i in experiments:
try:
pathToTitle = folder + i + '/pdata/1/title'
titleFile = open(pathToTitle, mode='r')
#self.files.append(titleFile)
title = list(titleFile)
titleF = [line.strip() for line in title]
print("\n\n {} \n========================".format(i))
for i in title:
print(i.rstrip())
except:
print("Some Error")
print("No title file.")
|
<commit_before><commit_msg>Add tool for inspection of bruker directories<commit_after>import glob
import sys
import os
#directory = input("Enter Directory: ")
#print("Directory: ", directory)
folder = "/home/benno/Dropbox/RESEARCH/bullet/experiments/H2O17atC60/20170510/H2OC60ODCB20170408/"
#folder = askdirectory()
subdirectories = [x[0] for x in os.walk(folder)]
experiments = set()
for i in subdirectories:
# now add these to a set.
experiments.add(i.split(folder)[1].split("/")[0])
for i in experiments:
try:
pathToTitle = folder + i + '/pdata/1/title'
titleFile = open(pathToTitle, mode='r')
#self.files.append(titleFile)
title = list(titleFile)
titleF = [line.strip() for line in title]
print("\n\n {} \n========================".format(i))
for i in title:
print(i.rstrip())
except:
print("Some Error")
print("No title file.")
|
|
bad78ce8eaddb26cf4e9ffc30851ff5e58513f17
|
scripts/compare_wave_and_flac_reads.py
|
scripts/compare_wave_and_flac_reads.py
|
from pathlib import Path
import random
import time
import soundfile as sf
DIR_PATH = Path('/Users/harold/Desktop/NFC/FLAC Test')
# DIR_PATH = Path('/Volumes/Recordings1/FLAC Test')
FILE_NAME_STEM = 'FLOOD-21C_20180901_194500'
CLIP_COUNT = 10000
CLIP_DURATION = .6
SAMPLE_RATE = 24000
CLIP_LENGTH = int(round(CLIP_DURATION * SAMPLE_RATE))
def main():
start_indices = generate_clip_start_indices()
read_clips(start_indices, 'flac')
read_clips(start_indices, 'wav')
def read_clips(start_indices, extension):
file_path = get_file_path(extension)
with sf.SoundFile(file_path) as file_:
start_time = time.time()
for start_index in start_indices:
samples = read(file_, start_index, CLIP_LENGTH)
# print(samples[:10])
delta_time = time.time() - start_time
rate = CLIP_COUNT / delta_time
print(
f'Read {CLIP_COUNT} clips in {delta_time:.1f} seconds, '
f'a rate of {rate:.1f} clips per second.')
def generate_clip_start_indices():
file_path = get_file_path('wav')
with sf.SoundFile(file_path) as file_:
frame_count = file_.frames
population = range(frame_count - CLIP_LENGTH)
start_indices = random.choices(population, k=CLIP_COUNT)
return start_indices
def get_file_path(extension):
file_name = f'{FILE_NAME_STEM}.{extension}'
return DIR_PATH / file_name
def read(file_, start_index, length):
file_.seek(start_index)
return file_.read(length, dtype='int16')
if __name__ == '__main__':
main()
|
Add script that compares WAVE and FLAC reads.
|
Add script that compares WAVE and FLAC reads.
|
Python
|
mit
|
HaroldMills/Vesper,HaroldMills/Vesper,HaroldMills/Vesper,HaroldMills/Vesper,HaroldMills/Vesper
|
Add script that compares WAVE and FLAC reads.
|
from pathlib import Path
import random
import time
import soundfile as sf
DIR_PATH = Path('/Users/harold/Desktop/NFC/FLAC Test')
# DIR_PATH = Path('/Volumes/Recordings1/FLAC Test')
FILE_NAME_STEM = 'FLOOD-21C_20180901_194500'
CLIP_COUNT = 10000
CLIP_DURATION = .6
SAMPLE_RATE = 24000
CLIP_LENGTH = int(round(CLIP_DURATION * SAMPLE_RATE))
def main():
start_indices = generate_clip_start_indices()
read_clips(start_indices, 'flac')
read_clips(start_indices, 'wav')
def read_clips(start_indices, extension):
file_path = get_file_path(extension)
with sf.SoundFile(file_path) as file_:
start_time = time.time()
for start_index in start_indices:
samples = read(file_, start_index, CLIP_LENGTH)
# print(samples[:10])
delta_time = time.time() - start_time
rate = CLIP_COUNT / delta_time
print(
f'Read {CLIP_COUNT} clips in {delta_time:.1f} seconds, '
f'a rate of {rate:.1f} clips per second.')
def generate_clip_start_indices():
file_path = get_file_path('wav')
with sf.SoundFile(file_path) as file_:
frame_count = file_.frames
population = range(frame_count - CLIP_LENGTH)
start_indices = random.choices(population, k=CLIP_COUNT)
return start_indices
def get_file_path(extension):
file_name = f'{FILE_NAME_STEM}.{extension}'
return DIR_PATH / file_name
def read(file_, start_index, length):
file_.seek(start_index)
return file_.read(length, dtype='int16')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script that compares WAVE and FLAC reads.<commit_after>
|
from pathlib import Path
import random
import time
import soundfile as sf
DIR_PATH = Path('/Users/harold/Desktop/NFC/FLAC Test')
# DIR_PATH = Path('/Volumes/Recordings1/FLAC Test')
FILE_NAME_STEM = 'FLOOD-21C_20180901_194500'
CLIP_COUNT = 10000
CLIP_DURATION = .6
SAMPLE_RATE = 24000
CLIP_LENGTH = int(round(CLIP_DURATION * SAMPLE_RATE))
def main():
start_indices = generate_clip_start_indices()
read_clips(start_indices, 'flac')
read_clips(start_indices, 'wav')
def read_clips(start_indices, extension):
file_path = get_file_path(extension)
with sf.SoundFile(file_path) as file_:
start_time = time.time()
for start_index in start_indices:
samples = read(file_, start_index, CLIP_LENGTH)
# print(samples[:10])
delta_time = time.time() - start_time
rate = CLIP_COUNT / delta_time
print(
f'Read {CLIP_COUNT} clips in {delta_time:.1f} seconds, '
f'a rate of {rate:.1f} clips per second.')
def generate_clip_start_indices():
file_path = get_file_path('wav')
with sf.SoundFile(file_path) as file_:
frame_count = file_.frames
population = range(frame_count - CLIP_LENGTH)
start_indices = random.choices(population, k=CLIP_COUNT)
return start_indices
def get_file_path(extension):
file_name = f'{FILE_NAME_STEM}.{extension}'
return DIR_PATH / file_name
def read(file_, start_index, length):
file_.seek(start_index)
return file_.read(length, dtype='int16')
if __name__ == '__main__':
main()
|
Add script that compares WAVE and FLAC reads.from pathlib import Path
import random
import time
import soundfile as sf
DIR_PATH = Path('/Users/harold/Desktop/NFC/FLAC Test')
# DIR_PATH = Path('/Volumes/Recordings1/FLAC Test')
FILE_NAME_STEM = 'FLOOD-21C_20180901_194500'
CLIP_COUNT = 10000
CLIP_DURATION = .6
SAMPLE_RATE = 24000
CLIP_LENGTH = int(round(CLIP_DURATION * SAMPLE_RATE))
def main():
start_indices = generate_clip_start_indices()
read_clips(start_indices, 'flac')
read_clips(start_indices, 'wav')
def read_clips(start_indices, extension):
file_path = get_file_path(extension)
with sf.SoundFile(file_path) as file_:
start_time = time.time()
for start_index in start_indices:
samples = read(file_, start_index, CLIP_LENGTH)
# print(samples[:10])
delta_time = time.time() - start_time
rate = CLIP_COUNT / delta_time
print(
f'Read {CLIP_COUNT} clips in {delta_time:.1f} seconds, '
f'a rate of {rate:.1f} clips per second.')
def generate_clip_start_indices():
file_path = get_file_path('wav')
with sf.SoundFile(file_path) as file_:
frame_count = file_.frames
population = range(frame_count - CLIP_LENGTH)
start_indices = random.choices(population, k=CLIP_COUNT)
return start_indices
def get_file_path(extension):
file_name = f'{FILE_NAME_STEM}.{extension}'
return DIR_PATH / file_name
def read(file_, start_index, length):
file_.seek(start_index)
return file_.read(length, dtype='int16')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script that compares WAVE and FLAC reads.<commit_after>from pathlib import Path
import random
import time
import soundfile as sf
DIR_PATH = Path('/Users/harold/Desktop/NFC/FLAC Test')
# DIR_PATH = Path('/Volumes/Recordings1/FLAC Test')
FILE_NAME_STEM = 'FLOOD-21C_20180901_194500'
CLIP_COUNT = 10000
CLIP_DURATION = .6
SAMPLE_RATE = 24000
CLIP_LENGTH = int(round(CLIP_DURATION * SAMPLE_RATE))
def main():
start_indices = generate_clip_start_indices()
read_clips(start_indices, 'flac')
read_clips(start_indices, 'wav')
def read_clips(start_indices, extension):
file_path = get_file_path(extension)
with sf.SoundFile(file_path) as file_:
start_time = time.time()
for start_index in start_indices:
samples = read(file_, start_index, CLIP_LENGTH)
# print(samples[:10])
delta_time = time.time() - start_time
rate = CLIP_COUNT / delta_time
print(
f'Read {CLIP_COUNT} clips in {delta_time:.1f} seconds, '
f'a rate of {rate:.1f} clips per second.')
def generate_clip_start_indices():
file_path = get_file_path('wav')
with sf.SoundFile(file_path) as file_:
frame_count = file_.frames
population = range(frame_count - CLIP_LENGTH)
start_indices = random.choices(population, k=CLIP_COUNT)
return start_indices
def get_file_path(extension):
file_name = f'{FILE_NAME_STEM}.{extension}'
return DIR_PATH / file_name
def read(file_, start_index, length):
file_.seek(start_index)
return file_.read(length, dtype='int16')
if __name__ == '__main__':
main()
|
|
c1f888e30651867933ad1e38bebeec2a597ef96d
|
support/infernal2rfam.py
|
support/infernal2rfam.py
|
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Script to convert infernal output files to full_region
"""
# -------------------------------------------------------------------------
import sys
from utils import infernal_utils as iu
# -------------------------------------------------------------------------
if __name__ == '__main__':
input_file = sys.argv[1]
if "--tbl" in sys.argv or "--tblout" in sys.argv:
iu.tblout_to_full_region(input_file, dest_dir=None)
elif "-o" in sys.argv or "--out" in sys.argv:
iu.infernal_to_full_region(input_file, dest_dir=None, filename=None)
else:
print "\nWrong input!\n"
print "Usage infernal_file [-o|--tbl]\n"
print "\n-o (--out): parse infernal output format"
print "\n--tbl (--tblout): parse infernal tblout format"
|
Add script to convert infernal output to full_region
|
Add script to convert infernal output to full_region
|
Python
|
apache-2.0
|
Rfam/rfam-production,Rfam/rfam-production,Rfam/rfam-production
|
Add script to convert infernal output to full_region
|
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Script to convert infernal output files to full_region
"""
# -------------------------------------------------------------------------
import sys
from utils import infernal_utils as iu
# -------------------------------------------------------------------------
if __name__ == '__main__':
input_file = sys.argv[1]
if "--tbl" in sys.argv or "--tblout" in sys.argv:
iu.tblout_to_full_region(input_file, dest_dir=None)
elif "-o" in sys.argv or "--out" in sys.argv:
iu.infernal_to_full_region(input_file, dest_dir=None, filename=None)
else:
print "\nWrong input!\n"
print "Usage infernal_file [-o|--tbl]\n"
print "\n-o (--out): parse infernal output format"
print "\n--tbl (--tblout): parse infernal tblout format"
|
<commit_before><commit_msg>Add script to convert infernal output to full_region<commit_after>
|
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Script to convert infernal output files to full_region
"""
# -------------------------------------------------------------------------
import sys
from utils import infernal_utils as iu
# -------------------------------------------------------------------------
if __name__ == '__main__':
input_file = sys.argv[1]
if "--tbl" in sys.argv or "--tblout" in sys.argv:
iu.tblout_to_full_region(input_file, dest_dir=None)
elif "-o" in sys.argv or "--out" in sys.argv:
iu.infernal_to_full_region(input_file, dest_dir=None, filename=None)
else:
print "\nWrong input!\n"
print "Usage infernal_file [-o|--tbl]\n"
print "\n-o (--out): parse infernal output format"
print "\n--tbl (--tblout): parse infernal tblout format"
|
Add script to convert infernal output to full_region"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Script to convert infernal output files to full_region
"""
# -------------------------------------------------------------------------
import sys
from utils import infernal_utils as iu
# -------------------------------------------------------------------------
if __name__ == '__main__':
input_file = sys.argv[1]
if "--tbl" in sys.argv or "--tblout" in sys.argv:
iu.tblout_to_full_region(input_file, dest_dir=None)
elif "-o" in sys.argv or "--out" in sys.argv:
iu.infernal_to_full_region(input_file, dest_dir=None, filename=None)
else:
print "\nWrong input!\n"
print "Usage infernal_file [-o|--tbl]\n"
print "\n-o (--out): parse infernal output format"
print "\n--tbl (--tblout): parse infernal tblout format"
|
<commit_before><commit_msg>Add script to convert infernal output to full_region<commit_after>"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Script to convert infernal output files to full_region
"""
# -------------------------------------------------------------------------
import sys
from utils import infernal_utils as iu
# -------------------------------------------------------------------------
if __name__ == '__main__':
input_file = sys.argv[1]
if "--tbl" in sys.argv or "--tblout" in sys.argv:
iu.tblout_to_full_region(input_file, dest_dir=None)
elif "-o" in sys.argv or "--out" in sys.argv:
iu.infernal_to_full_region(input_file, dest_dir=None, filename=None)
else:
print "\nWrong input!\n"
print "Usage infernal_file [-o|--tbl]\n"
print "\n-o (--out): parse infernal output format"
print "\n--tbl (--tblout): parse infernal tblout format"
|
|
2bb893673be54286baec3bf11039ccc636ffe6f4
|
tests/test_converters.py
|
tests/test_converters.py
|
import unittest
from werkzeug.routing import ValidationError
from app import create_app, db
from app.models import Noun, Verb
from app.converters import WordClassConverter
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_word_class_converter(self):
word_class_converter = WordClassConverter(None)
self.assertEquals(word_class_converter.to_python("noun"), Noun)
self.assertEquals(word_class_converter.to_python("verb"), Verb)
self.assertRaises(ValidationError, word_class_converter.to_python, "invalid_word_class")
|
Add test for word class converter
|
Add test for word class converter
|
Python
|
mit
|
Encrylize/MyDictionary,Encrylize/MyDictionary,Encrylize/MyDictionary
|
Add test for word class converter
|
import unittest
from werkzeug.routing import ValidationError
from app import create_app, db
from app.models import Noun, Verb
from app.converters import WordClassConverter
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_word_class_converter(self):
word_class_converter = WordClassConverter(None)
self.assertEquals(word_class_converter.to_python("noun"), Noun)
self.assertEquals(word_class_converter.to_python("verb"), Verb)
self.assertRaises(ValidationError, word_class_converter.to_python, "invalid_word_class")
|
<commit_before><commit_msg>Add test for word class converter<commit_after>
|
import unittest
from werkzeug.routing import ValidationError
from app import create_app, db
from app.models import Noun, Verb
from app.converters import WordClassConverter
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_word_class_converter(self):
word_class_converter = WordClassConverter(None)
self.assertEquals(word_class_converter.to_python("noun"), Noun)
self.assertEquals(word_class_converter.to_python("verb"), Verb)
self.assertRaises(ValidationError, word_class_converter.to_python, "invalid_word_class")
|
Add test for word class converterimport unittest
from werkzeug.routing import ValidationError
from app import create_app, db
from app.models import Noun, Verb
from app.converters import WordClassConverter
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_word_class_converter(self):
word_class_converter = WordClassConverter(None)
self.assertEquals(word_class_converter.to_python("noun"), Noun)
self.assertEquals(word_class_converter.to_python("verb"), Verb)
self.assertRaises(ValidationError, word_class_converter.to_python, "invalid_word_class")
|
<commit_before><commit_msg>Add test for word class converter<commit_after>import unittest
from werkzeug.routing import ValidationError
from app import create_app, db
from app.models import Noun, Verb
from app.converters import WordClassConverter
class TestUtils(unittest.TestCase):
def setUp(self):
self.app = create_app("testing")
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_ctx.pop()
def test_word_class_converter(self):
word_class_converter = WordClassConverter(None)
self.assertEquals(word_class_converter.to_python("noun"), Noun)
self.assertEquals(word_class_converter.to_python("verb"), Verb)
self.assertRaises(ValidationError, word_class_converter.to_python, "invalid_word_class")
|
|
cb9a3e5b17eb0049e0e9318c92b4e37b505df058
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
install_requires = ['Django>=1.5']
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict>=1.1')
setup(
name='django-auth-policy',
version='0.9.4',
zip_safe=False,
description='Enforces a couple of common authentication policies for the '
'Django web framework.',
author='Fox-IT B.V.',
author_email='fox@fox-it.com',
maintainer='Rudolph Froger',
maintainer_email='rudolphfroger@estrate.nl',
url='https://github.com/rudolphfroger/django-auth-policy',
license='BSD',
packages=['django_auth_policy'],
package_data={'django_auth_policy': ['locale/*/LC_MESSAGES/*.mo',
'locale/*/LC_MESSAGES/*.po']},
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Session',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
install_requires = ['Django>=1.5']
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict>=1.1')
setup(
name='django-auth-policy',
version='0.9.5',
zip_safe=False,
description='Enforces a couple of common authentication policies for the '
'Django web framework.',
author='Fox-IT B.V.',
author_email='fox@fox-it.com',
maintainer='Rudolph Froger',
maintainer_email='rudolphfroger@estrate.nl',
url='https://github.com/rudolphfroger/django-auth-policy',
license='BSD',
packages=find_packages(exclude=["testsite", "testsite.*", "*.tests", "*.tests.*", "tests.*", "tests"]),
package_data={'django_auth_policy': ['locale/*/LC_MESSAGES/*.mo',
'locale/*/LC_MESSAGES/*.po']},
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Session',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
Use find_packages to find migrations and management commands
|
Use find_packages to find migrations and management commands
|
Python
|
bsd-3-clause
|
mcella/django-auth-policy,mcella/django-auth-policy,Dreamsolution/django-auth-policy,Dreamsolution/django-auth-policy
|
#!/usr/bin/env python
from setuptools import setup
install_requires = ['Django>=1.5']
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict>=1.1')
setup(
name='django-auth-policy',
version='0.9.4',
zip_safe=False,
description='Enforces a couple of common authentication policies for the '
'Django web framework.',
author='Fox-IT B.V.',
author_email='fox@fox-it.com',
maintainer='Rudolph Froger',
maintainer_email='rudolphfroger@estrate.nl',
url='https://github.com/rudolphfroger/django-auth-policy',
license='BSD',
packages=['django_auth_policy'],
package_data={'django_auth_policy': ['locale/*/LC_MESSAGES/*.mo',
'locale/*/LC_MESSAGES/*.po']},
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Session',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
Use find_packages to find migrations and management commands
|
#!/usr/bin/env python
from setuptools import setup, find_packages
install_requires = ['Django>=1.5']
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict>=1.1')
setup(
name='django-auth-policy',
version='0.9.5',
zip_safe=False,
description='Enforces a couple of common authentication policies for the '
'Django web framework.',
author='Fox-IT B.V.',
author_email='fox@fox-it.com',
maintainer='Rudolph Froger',
maintainer_email='rudolphfroger@estrate.nl',
url='https://github.com/rudolphfroger/django-auth-policy',
license='BSD',
packages=find_packages(exclude=["testsite", "testsite.*", "*.tests", "*.tests.*", "tests.*", "tests"]),
package_data={'django_auth_policy': ['locale/*/LC_MESSAGES/*.mo',
'locale/*/LC_MESSAGES/*.po']},
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Session',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup
install_requires = ['Django>=1.5']
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict>=1.1')
setup(
name='django-auth-policy',
version='0.9.4',
zip_safe=False,
description='Enforces a couple of common authentication policies for the '
'Django web framework.',
author='Fox-IT B.V.',
author_email='fox@fox-it.com',
maintainer='Rudolph Froger',
maintainer_email='rudolphfroger@estrate.nl',
url='https://github.com/rudolphfroger/django-auth-policy',
license='BSD',
packages=['django_auth_policy'],
package_data={'django_auth_policy': ['locale/*/LC_MESSAGES/*.mo',
'locale/*/LC_MESSAGES/*.po']},
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Session',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
<commit_msg>Use find_packages to find migrations and management commands<commit_after>
|
#!/usr/bin/env python
from setuptools import setup, find_packages
install_requires = ['Django>=1.5']
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict>=1.1')
setup(
name='django-auth-policy',
version='0.9.5',
zip_safe=False,
description='Enforces a couple of common authentication policies for the '
'Django web framework.',
author='Fox-IT B.V.',
author_email='fox@fox-it.com',
maintainer='Rudolph Froger',
maintainer_email='rudolphfroger@estrate.nl',
url='https://github.com/rudolphfroger/django-auth-policy',
license='BSD',
packages=find_packages(exclude=["testsite", "testsite.*", "*.tests", "*.tests.*", "tests.*", "tests"]),
package_data={'django_auth_policy': ['locale/*/LC_MESSAGES/*.mo',
'locale/*/LC_MESSAGES/*.po']},
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Session',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
#!/usr/bin/env python
from setuptools import setup
install_requires = ['Django>=1.5']
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict>=1.1')
setup(
name='django-auth-policy',
version='0.9.4',
zip_safe=False,
description='Enforces a couple of common authentication policies for the '
'Django web framework.',
author='Fox-IT B.V.',
author_email='fox@fox-it.com',
maintainer='Rudolph Froger',
maintainer_email='rudolphfroger@estrate.nl',
url='https://github.com/rudolphfroger/django-auth-policy',
license='BSD',
packages=['django_auth_policy'],
package_data={'django_auth_policy': ['locale/*/LC_MESSAGES/*.mo',
'locale/*/LC_MESSAGES/*.po']},
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Session',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
Use find_packages to find migrations and management commands#!/usr/bin/env python
from setuptools import setup, find_packages
install_requires = ['Django>=1.5']
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict>=1.1')
setup(
name='django-auth-policy',
version='0.9.5',
zip_safe=False,
description='Enforces a couple of common authentication policies for the '
'Django web framework.',
author='Fox-IT B.V.',
author_email='fox@fox-it.com',
maintainer='Rudolph Froger',
maintainer_email='rudolphfroger@estrate.nl',
url='https://github.com/rudolphfroger/django-auth-policy',
license='BSD',
packages=find_packages(exclude=["testsite", "testsite.*", "*.tests", "*.tests.*", "tests.*", "tests"]),
package_data={'django_auth_policy': ['locale/*/LC_MESSAGES/*.mo',
'locale/*/LC_MESSAGES/*.po']},
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Session',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup
install_requires = ['Django>=1.5']
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict>=1.1')
setup(
name='django-auth-policy',
version='0.9.4',
zip_safe=False,
description='Enforces a couple of common authentication policies for the '
'Django web framework.',
author='Fox-IT B.V.',
author_email='fox@fox-it.com',
maintainer='Rudolph Froger',
maintainer_email='rudolphfroger@estrate.nl',
url='https://github.com/rudolphfroger/django-auth-policy',
license='BSD',
packages=['django_auth_policy'],
package_data={'django_auth_policy': ['locale/*/LC_MESSAGES/*.mo',
'locale/*/LC_MESSAGES/*.po']},
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Session',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
<commit_msg>Use find_packages to find migrations and management commands<commit_after>#!/usr/bin/env python
from setuptools import setup, find_packages
install_requires = ['Django>=1.5']
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict>=1.1')
setup(
name='django-auth-policy',
version='0.9.5',
zip_safe=False,
description='Enforces a couple of common authentication policies for the '
'Django web framework.',
author='Fox-IT B.V.',
author_email='fox@fox-it.com',
maintainer='Rudolph Froger',
maintainer_email='rudolphfroger@estrate.nl',
url='https://github.com/rudolphfroger/django-auth-policy',
license='BSD',
packages=find_packages(exclude=["testsite", "testsite.*", "*.tests", "*.tests.*", "tests.*", "tests"]),
package_data={'django_auth_policy': ['locale/*/LC_MESSAGES/*.mo',
'locale/*/LC_MESSAGES/*.po']},
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Session',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
c0b15483dc9f08cd203242e55cadf1101e98f692
|
helpers/graph_creator.py
|
helpers/graph_creator.py
|
import os
from alpha_vantage.timeseries import TimesSeries
from alpha_vantage.techindicators import TechIndicators
import matplotlib.pyplot as plt
if __name__ == '__main__':
"""
Simple script to create the reference pictures
"""
ts = TimesSeries(key=os.environ['API_KEY'], output_format='pandas')
data, meta_data = ts.get_intraday(symbol='MSFT',interval='1min', outputsize='full')
data['close'].plot()
plt.savefig('../images/docs_ts_msft_example.png')
ti = TechIndicators(key=os.environ['API_KEY'], output_format='pandas')
data, meta_data = ti.get_bbands(symbol='MSFT', interval='1min')
plt.savefig('../images/docs_ti_msft_example.png')
|
Add helpers for creating images
|
chore: Add helpers for creating images
|
Python
|
mit
|
RomelTorres/alpha_vantage
|
chore: Add helpers for creating images
|
import os
from alpha_vantage.timeseries import TimesSeries
from alpha_vantage.techindicators import TechIndicators
import matplotlib.pyplot as plt
if __name__ == '__main__':
"""
Simple script to create the reference pictures
"""
ts = TimesSeries(key=os.environ['API_KEY'], output_format='pandas')
data, meta_data = ts.get_intraday(symbol='MSFT',interval='1min', outputsize='full')
data['close'].plot()
plt.savefig('../images/docs_ts_msft_example.png')
ti = TechIndicators(key=os.environ['API_KEY'], output_format='pandas')
data, meta_data = ti.get_bbands(symbol='MSFT', interval='1min')
plt.savefig('../images/docs_ti_msft_example.png')
|
<commit_before><commit_msg>chore: Add helpers for creating images<commit_after>
|
import os
from alpha_vantage.timeseries import TimesSeries
from alpha_vantage.techindicators import TechIndicators
import matplotlib.pyplot as plt
if __name__ == '__main__':
"""
Simple script to create the reference pictures
"""
ts = TimesSeries(key=os.environ['API_KEY'], output_format='pandas')
data, meta_data = ts.get_intraday(symbol='MSFT',interval='1min', outputsize='full')
data['close'].plot()
plt.savefig('../images/docs_ts_msft_example.png')
ti = TechIndicators(key=os.environ['API_KEY'], output_format='pandas')
data, meta_data = ti.get_bbands(symbol='MSFT', interval='1min')
plt.savefig('../images/docs_ti_msft_example.png')
|
chore: Add helpers for creating imagesimport os
from alpha_vantage.timeseries import TimesSeries
from alpha_vantage.techindicators import TechIndicators
import matplotlib.pyplot as plt
if __name__ == '__main__':
"""
Simple script to create the reference pictures
"""
ts = TimesSeries(key=os.environ['API_KEY'], output_format='pandas')
data, meta_data = ts.get_intraday(symbol='MSFT',interval='1min', outputsize='full')
data['close'].plot()
plt.savefig('../images/docs_ts_msft_example.png')
ti = TechIndicators(key=os.environ['API_KEY'], output_format='pandas')
data, meta_data = ti.get_bbands(symbol='MSFT', interval='1min')
plt.savefig('../images/docs_ti_msft_example.png')
|
<commit_before><commit_msg>chore: Add helpers for creating images<commit_after>import os
from alpha_vantage.timeseries import TimesSeries
from alpha_vantage.techindicators import TechIndicators
import matplotlib.pyplot as plt
if __name__ == '__main__':
"""
Simple script to create the reference pictures
"""
ts = TimesSeries(key=os.environ['API_KEY'], output_format='pandas')
data, meta_data = ts.get_intraday(symbol='MSFT',interval='1min', outputsize='full')
data['close'].plot()
plt.savefig('../images/docs_ts_msft_example.png')
ti = TechIndicators(key=os.environ['API_KEY'], output_format='pandas')
data, meta_data = ti.get_bbands(symbol='MSFT', interval='1min')
plt.savefig('../images/docs_ti_msft_example.png')
|
|
6f1578156f3fea396374a1d833fc8014ca07eaf0
|
websockets/test_client_server.py
|
websockets/test_client_server.py
|
import unittest
import tulip
from .client import *
from .server import *
@tulip.coroutine
def echo(ws, uri):
ws.send((yield from ws.recv()))
class ClientServerTests(unittest.TestCase):
def setUp(self):
self.loop = tulip.new_event_loop()
tulip.set_event_loop(self.loop)
server_task = serve(echo, 'localhost', 8642)
self.sockets = self.loop.run_until_complete(server_task)
client_coroutine = connect('ws://localhost:8642/')
self.client = self.loop.run_until_complete(client_coroutine)
def tearDown(self):
self.loop.run_until_complete(self.client.wait_close())
for socket in sockets:
self.loop.stop_serving(socket)
self.loop.close()
def test_basic(self):
self.client.send("Hello!")
reply = self.loop.run_until_complete(self.client.recv())
self.assertEqual(reply, "Hello!")
|
Add basic test for the client and server APIs.
|
Add basic test for the client and server APIs.
|
Python
|
bsd-3-clause
|
aaugustin/websockets,aaugustin/websockets,biddyweb/websockets,aaugustin/websockets,dommert/pywebsockets,aaugustin/websockets,andrewyoung1991/websockets
|
Add basic test for the client and server APIs.
|
import unittest
import tulip
from .client import *
from .server import *
@tulip.coroutine
def echo(ws, uri):
ws.send((yield from ws.recv()))
class ClientServerTests(unittest.TestCase):
def setUp(self):
self.loop = tulip.new_event_loop()
tulip.set_event_loop(self.loop)
server_task = serve(echo, 'localhost', 8642)
self.sockets = self.loop.run_until_complete(server_task)
client_coroutine = connect('ws://localhost:8642/')
self.client = self.loop.run_until_complete(client_coroutine)
def tearDown(self):
self.loop.run_until_complete(self.client.wait_close())
for socket in sockets:
self.loop.stop_serving(socket)
self.loop.close()
def test_basic(self):
self.client.send("Hello!")
reply = self.loop.run_until_complete(self.client.recv())
self.assertEqual(reply, "Hello!")
|
<commit_before><commit_msg>Add basic test for the client and server APIs.<commit_after>
|
import unittest
import tulip
from .client import *
from .server import *
@tulip.coroutine
def echo(ws, uri):
ws.send((yield from ws.recv()))
class ClientServerTests(unittest.TestCase):
def setUp(self):
self.loop = tulip.new_event_loop()
tulip.set_event_loop(self.loop)
server_task = serve(echo, 'localhost', 8642)
self.sockets = self.loop.run_until_complete(server_task)
client_coroutine = connect('ws://localhost:8642/')
self.client = self.loop.run_until_complete(client_coroutine)
def tearDown(self):
self.loop.run_until_complete(self.client.wait_close())
for socket in sockets:
self.loop.stop_serving(socket)
self.loop.close()
def test_basic(self):
self.client.send("Hello!")
reply = self.loop.run_until_complete(self.client.recv())
self.assertEqual(reply, "Hello!")
|
Add basic test for the client and server APIs.import unittest
import tulip
from .client import *
from .server import *
@tulip.coroutine
def echo(ws, uri):
ws.send((yield from ws.recv()))
class ClientServerTests(unittest.TestCase):
def setUp(self):
self.loop = tulip.new_event_loop()
tulip.set_event_loop(self.loop)
server_task = serve(echo, 'localhost', 8642)
self.sockets = self.loop.run_until_complete(server_task)
client_coroutine = connect('ws://localhost:8642/')
self.client = self.loop.run_until_complete(client_coroutine)
def tearDown(self):
self.loop.run_until_complete(self.client.wait_close())
for socket in sockets:
self.loop.stop_serving(socket)
self.loop.close()
def test_basic(self):
self.client.send("Hello!")
reply = self.loop.run_until_complete(self.client.recv())
self.assertEqual(reply, "Hello!")
|
<commit_before><commit_msg>Add basic test for the client and server APIs.<commit_after>import unittest
import tulip
from .client import *
from .server import *
@tulip.coroutine
def echo(ws, uri):
ws.send((yield from ws.recv()))
class ClientServerTests(unittest.TestCase):
def setUp(self):
self.loop = tulip.new_event_loop()
tulip.set_event_loop(self.loop)
server_task = serve(echo, 'localhost', 8642)
self.sockets = self.loop.run_until_complete(server_task)
client_coroutine = connect('ws://localhost:8642/')
self.client = self.loop.run_until_complete(client_coroutine)
def tearDown(self):
self.loop.run_until_complete(self.client.wait_close())
for socket in sockets:
self.loop.stop_serving(socket)
self.loop.close()
def test_basic(self):
self.client.send("Hello!")
reply = self.loop.run_until_complete(self.client.recv())
self.assertEqual(reply, "Hello!")
|
|
173bc64a3322402a9b06dbba2e618013e1204bc8
|
tests/test_ghostscript.py
|
tests/test_ghostscript.py
|
import subprocess
import unittest
class GhostscriptTest(unittest.TestCase):
def test_installed(self):
process = subprocess.Popen(
['gs', '--version'],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 0)
self.assertEqual(stderr, "")
self.assertRegexpMatches(stdout, r'9\.\d\d')
|
Add a test to check that ghostscript is installed.
|
Add a test to check that ghostscript is installed.
|
Python
|
mit
|
YPlan/treepoem
|
Add a test to check that ghostscript is installed.
|
import subprocess
import unittest
class GhostscriptTest(unittest.TestCase):
def test_installed(self):
process = subprocess.Popen(
['gs', '--version'],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 0)
self.assertEqual(stderr, "")
self.assertRegexpMatches(stdout, r'9\.\d\d')
|
<commit_before><commit_msg>Add a test to check that ghostscript is installed.<commit_after>
|
import subprocess
import unittest
class GhostscriptTest(unittest.TestCase):
def test_installed(self):
process = subprocess.Popen(
['gs', '--version'],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 0)
self.assertEqual(stderr, "")
self.assertRegexpMatches(stdout, r'9\.\d\d')
|
Add a test to check that ghostscript is installed.import subprocess
import unittest
class GhostscriptTest(unittest.TestCase):
def test_installed(self):
process = subprocess.Popen(
['gs', '--version'],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 0)
self.assertEqual(stderr, "")
self.assertRegexpMatches(stdout, r'9\.\d\d')
|
<commit_before><commit_msg>Add a test to check that ghostscript is installed.<commit_after>import subprocess
import unittest
class GhostscriptTest(unittest.TestCase):
def test_installed(self):
process = subprocess.Popen(
['gs', '--version'],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 0)
self.assertEqual(stderr, "")
self.assertRegexpMatches(stdout, r'9\.\d\d')
|
|
468edaf50f1cbad52a61c5d5cd160de0fa128cbc
|
tests.py
|
tests.py
|
import unittest
from app import app
class TestScorepy(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
def tearDown(self):
pass
def test_index_response(self):
response = self.app.get('/')
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
Add basic unit test for index status code
|
Add basic unit test for index status code
|
Python
|
mit
|
rtfoley/scorepy,rtfoley/scorepy,rtfoley/scorepy
|
Add basic unit test for index status code
|
import unittest
from app import app
class TestScorepy(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
def tearDown(self):
pass
def test_index_response(self):
response = self.app.get('/')
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic unit test for index status code<commit_after>
|
import unittest
from app import app
class TestScorepy(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
def tearDown(self):
pass
def test_index_response(self):
response = self.app.get('/')
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
Add basic unit test for index status codeimport unittest
from app import app
class TestScorepy(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
def tearDown(self):
pass
def test_index_response(self):
response = self.app.get('/')
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic unit test for index status code<commit_after>import unittest
from app import app
class TestScorepy(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
def tearDown(self):
pass
def test_index_response(self):
response = self.app.get('/')
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
|
5743f3e2a895c913dee0d4ff784970cd5c25a945
|
tools/perf/perf_tools/pica.py
|
tools/perf/perf_tools/pica.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class Pica(page_measurement.PageMeasurement):
def CreatePageSet(self, _, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/pica.json',
'pages': [
{
'url': 'http://www.polymer-project.org/' +
'polymer-all/projects/pica/index.html'
}
]
}, os.path.abspath(__file__))
def CustomizeBrowserOptions(self, options):
# Pica requires non-deterministic Date and Math.random calls
# See http://crbug.com/255641
options.wpr_make_javascript_deterministic = False
def WillNavigateToPage(self, page, tab):
page.script_to_evaluate_on_commit = """
document.addEventListener('WebComponentsReady', function() {
var unused = document.body.offsetHeight; // force layout
window.__pica_load_time = performance.now();
});
"""
def MeasurePage(self, _, tab, results):
def _IsDone():
return tab.EvaluateJavaScript('window.__pica_load_time != undefined')
util.WaitFor(_IsDone, 60)
result = int(tab.EvaluateJavaScript('__pica_load_time'))
results.Add('Total', 'ms', result)
|
Add Telemetry measurement for Polymer demo app
|
Add Telemetry measurement for Polymer demo app
This is a rough first version that simply measures time until
first full layout.
Current timing of ten runs:
*RESULT Total: Total= [2828,2292,2477,2454,2491,2463,2515,2501,2484,2595] ms
Avg Total: 2510.000000ms
Sd Total: 134.763167ms
R=tonyg
Review URL: https://chromiumcodereview.appspot.com/18118005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@209306 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
fujunwei/chromium-crosswalk,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,ltilve/chromium,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,mogoweb/chromium-crosswalk,chuan9/chromium-crosswalk,jaruba/chromium.src,ltilve/chromium,littlstar/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk,mogoweb/chromium-crosswalk,littlstar/chromium.src,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,ltilve/chromium,M4sse/chromium.src,ondra-novak/chromium.src,markYoungH/chromium.src,ondra-novak/chromium.src,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,patrickm/chromium.src,chuan9/chromium-crosswalk,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,jaruba/chromium.src,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,patrickm/chromium.src,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,littlstar/chromium.src,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,dednal/chromium.src,ltilve/chromium,markYoungH/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,dednal/chromium.src,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,Fireblend/chromium-crosswalk,dushu1203/chromium.src,patrickm/chromium.src,ltilve/chromium,Fireblend/chromium-crosswalk,dednal/chromium.src,Just-D/chromium-1,littlstar/chromium.src,mogoweb/chromium-crosswalk,jaruba/chromium.src,fujunwei/chromium-crosswalk,mogoweb/chromium-crosswalk,markYoungH/chromium.src,Just-D/chromium-1,littlstar/chromium.src,axinging/chromium-crosswalk,anirudhSK/chromium,dushu1203/chromium.src,Jonekee/chromium.src,ChromiumWebApps/chromium,Pluto-tv/chromium-crosswalk,mogoweb/chromium-crosswalk,ChromiumWebApps/chromium,Just-D/chromium-1,Chilledheart/chromium,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,dushu1203/chromium.src,axinging/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,ltilve/chromium,dednal/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,Jonekee/chromium.src,M4sse/chromium.src,dednal/chromium.src,jaruba/chromium.src,M4sse/chromium.src,chuan9/chromium-crosswalk,patrickm/chromium.src,Jonekee/chromium.src,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,Chilledheart/chromium,ChromiumWebApps/chromium,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,ltilve/chromium,Chilledheart/chromium,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,ltilve/chromium,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,dednal/chromium.src,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,Jonekee/chromium.src,anirudhSK/chromium,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,jaruba/chromium.src,dednal/chromium.src,markYoungH/chromium.src,ChromiumWebApps/chromium,markYoungH/chromium.src,dushu1203/chromium.src,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,Just-D/chromium-1,dushu1203/chromium.src,markYoungH/chromium.src,dednal/chromium.src,Jonekee/chromium.src,axinging/chromium-crosswalk,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,mogoweb/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,mogoweb/chromium-crosswalk,ondra-novak/chromium.src,Jonekee/chromium.src,ChromiumWebApps/chromium,anirudhSK/chromium,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ChromiumWebApps/chromium,patrickm/chromium.src,ChromiumWebApps/chromium,patrickm/chromium.src,dednal/chromium.src,littlstar/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,ondra-novak/chromium.src,axinging/chromium-crosswalk,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,anirudhSK/chromium,ondra-novak/chromium.src,Chilledheart/chromium,anirudhSK/chromium,M4sse/chromium.src,fujunwei/chromium-crosswalk,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,mogoweb/chromium-crosswalk,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,littlstar/chromium.src,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,jaruba/chromium.src,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,mogoweb/chromium-crosswalk,markYoungH/chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,dednal/chromium.src
|
Add Telemetry measurement for Polymer demo app
This is a rough first version that simply measures time until
first full layout.
Current timing of ten runs:
*RESULT Total: Total= [2828,2292,2477,2454,2491,2463,2515,2501,2484,2595] ms
Avg Total: 2510.000000ms
Sd Total: 134.763167ms
R=tonyg
Review URL: https://chromiumcodereview.appspot.com/18118005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@209306 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class Pica(page_measurement.PageMeasurement):
def CreatePageSet(self, _, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/pica.json',
'pages': [
{
'url': 'http://www.polymer-project.org/' +
'polymer-all/projects/pica/index.html'
}
]
}, os.path.abspath(__file__))
def CustomizeBrowserOptions(self, options):
# Pica requires non-deterministic Date and Math.random calls
# See http://crbug.com/255641
options.wpr_make_javascript_deterministic = False
def WillNavigateToPage(self, page, tab):
page.script_to_evaluate_on_commit = """
document.addEventListener('WebComponentsReady', function() {
var unused = document.body.offsetHeight; // force layout
window.__pica_load_time = performance.now();
});
"""
def MeasurePage(self, _, tab, results):
def _IsDone():
return tab.EvaluateJavaScript('window.__pica_load_time != undefined')
util.WaitFor(_IsDone, 60)
result = int(tab.EvaluateJavaScript('__pica_load_time'))
results.Add('Total', 'ms', result)
|
<commit_before><commit_msg>Add Telemetry measurement for Polymer demo app
This is a rough first version that simply measures time until
first full layout.
Current timing of ten runs:
*RESULT Total: Total= [2828,2292,2477,2454,2491,2463,2515,2501,2484,2595] ms
Avg Total: 2510.000000ms
Sd Total: 134.763167ms
R=tonyg
Review URL: https://chromiumcodereview.appspot.com/18118005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@209306 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class Pica(page_measurement.PageMeasurement):
def CreatePageSet(self, _, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/pica.json',
'pages': [
{
'url': 'http://www.polymer-project.org/' +
'polymer-all/projects/pica/index.html'
}
]
}, os.path.abspath(__file__))
def CustomizeBrowserOptions(self, options):
# Pica requires non-deterministic Date and Math.random calls
# See http://crbug.com/255641
options.wpr_make_javascript_deterministic = False
def WillNavigateToPage(self, page, tab):
page.script_to_evaluate_on_commit = """
document.addEventListener('WebComponentsReady', function() {
var unused = document.body.offsetHeight; // force layout
window.__pica_load_time = performance.now();
});
"""
def MeasurePage(self, _, tab, results):
def _IsDone():
return tab.EvaluateJavaScript('window.__pica_load_time != undefined')
util.WaitFor(_IsDone, 60)
result = int(tab.EvaluateJavaScript('__pica_load_time'))
results.Add('Total', 'ms', result)
|
Add Telemetry measurement for Polymer demo app
This is a rough first version that simply measures time until
first full layout.
Current timing of ten runs:
*RESULT Total: Total= [2828,2292,2477,2454,2491,2463,2515,2501,2484,2595] ms
Avg Total: 2510.000000ms
Sd Total: 134.763167ms
R=tonyg
Review URL: https://chromiumcodereview.appspot.com/18118005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@209306 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class Pica(page_measurement.PageMeasurement):
def CreatePageSet(self, _, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/pica.json',
'pages': [
{
'url': 'http://www.polymer-project.org/' +
'polymer-all/projects/pica/index.html'
}
]
}, os.path.abspath(__file__))
def CustomizeBrowserOptions(self, options):
# Pica requires non-deterministic Date and Math.random calls
# See http://crbug.com/255641
options.wpr_make_javascript_deterministic = False
def WillNavigateToPage(self, page, tab):
page.script_to_evaluate_on_commit = """
document.addEventListener('WebComponentsReady', function() {
var unused = document.body.offsetHeight; // force layout
window.__pica_load_time = performance.now();
});
"""
def MeasurePage(self, _, tab, results):
def _IsDone():
return tab.EvaluateJavaScript('window.__pica_load_time != undefined')
util.WaitFor(_IsDone, 60)
result = int(tab.EvaluateJavaScript('__pica_load_time'))
results.Add('Total', 'ms', result)
|
<commit_before><commit_msg>Add Telemetry measurement for Polymer demo app
This is a rough first version that simply measures time until
first full layout.
Current timing of ten runs:
*RESULT Total: Total= [2828,2292,2477,2454,2491,2463,2515,2501,2484,2595] ms
Avg Total: 2510.000000ms
Sd Total: 134.763167ms
R=tonyg
Review URL: https://chromiumcodereview.appspot.com/18118005
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@209306 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class Pica(page_measurement.PageMeasurement):
def CreatePageSet(self, _, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/pica.json',
'pages': [
{
'url': 'http://www.polymer-project.org/' +
'polymer-all/projects/pica/index.html'
}
]
}, os.path.abspath(__file__))
def CustomizeBrowserOptions(self, options):
# Pica requires non-deterministic Date and Math.random calls
# See http://crbug.com/255641
options.wpr_make_javascript_deterministic = False
def WillNavigateToPage(self, page, tab):
page.script_to_evaluate_on_commit = """
document.addEventListener('WebComponentsReady', function() {
var unused = document.body.offsetHeight; // force layout
window.__pica_load_time = performance.now();
});
"""
def MeasurePage(self, _, tab, results):
def _IsDone():
return tab.EvaluateJavaScript('window.__pica_load_time != undefined')
util.WaitFor(_IsDone, 60)
result = int(tab.EvaluateJavaScript('__pica_load_time'))
results.Add('Total', 'ms', result)
|
|
d7a031b7c701b01f646ea60966f9cab34a076db7
|
tests/test_ensure_ind.py
|
tests/test_ensure_ind.py
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Lead Developer: Feras Saad <fsaad@mit.edu>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import itertools
import unittest
import numpy as np
from gpmcc.state import State
from gpmcc.utils import validation as vu
class EnsureIndependentTest(unittest.TestCase):
def test_naive_bayes(self):
D = np.random.normal(size=(10,1))
T = np.repeat(D, 10, axis=1)
Ci = list(itertools.combinations(range(10), 2))
state = State(T, ['normal']*10, Ci=Ci, seed=0)
state.transition(N=10, do_progress=0)
vu.validate_crp_constrained_partition(state.Zv, [], Ci)
def test_complex_relationships(self):
D = np.random.normal(size=(10,1))
T = np.repeat(D, 10, axis=1)
Ci = [(2,8), (0,3)]
state = State(T, ['normal']*10, Ci=Ci, seed=0)
state.transition(N=10, do_progress=0)
vu.validate_crp_constrained_partition(state.Zv, [], Ci)
if __name__ == '__main__':
unittest.main()
|
Add test for ENSURE INDEPENDENT. Depdendent is disabled for now, pending conditional models.
|
Add test for ENSURE INDEPENDENT. Depdendent is disabled for now, pending conditional models.
|
Python
|
apache-2.0
|
probcomp/cgpm,probcomp/cgpm
|
Add test for ENSURE INDEPENDENT. Depdendent is disabled for now, pending conditional models.
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Lead Developer: Feras Saad <fsaad@mit.edu>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import itertools
import unittest
import numpy as np
from gpmcc.state import State
from gpmcc.utils import validation as vu
class EnsureIndependentTest(unittest.TestCase):
def test_naive_bayes(self):
D = np.random.normal(size=(10,1))
T = np.repeat(D, 10, axis=1)
Ci = list(itertools.combinations(range(10), 2))
state = State(T, ['normal']*10, Ci=Ci, seed=0)
state.transition(N=10, do_progress=0)
vu.validate_crp_constrained_partition(state.Zv, [], Ci)
def test_complex_relationships(self):
D = np.random.normal(size=(10,1))
T = np.repeat(D, 10, axis=1)
Ci = [(2,8), (0,3)]
state = State(T, ['normal']*10, Ci=Ci, seed=0)
state.transition(N=10, do_progress=0)
vu.validate_crp_constrained_partition(state.Zv, [], Ci)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for ENSURE INDEPENDENT. Depdendent is disabled for now, pending conditional models.<commit_after>
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Lead Developer: Feras Saad <fsaad@mit.edu>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import itertools
import unittest
import numpy as np
from gpmcc.state import State
from gpmcc.utils import validation as vu
class EnsureIndependentTest(unittest.TestCase):
def test_naive_bayes(self):
D = np.random.normal(size=(10,1))
T = np.repeat(D, 10, axis=1)
Ci = list(itertools.combinations(range(10), 2))
state = State(T, ['normal']*10, Ci=Ci, seed=0)
state.transition(N=10, do_progress=0)
vu.validate_crp_constrained_partition(state.Zv, [], Ci)
def test_complex_relationships(self):
D = np.random.normal(size=(10,1))
T = np.repeat(D, 10, axis=1)
Ci = [(2,8), (0,3)]
state = State(T, ['normal']*10, Ci=Ci, seed=0)
state.transition(N=10, do_progress=0)
vu.validate_crp_constrained_partition(state.Zv, [], Ci)
if __name__ == '__main__':
unittest.main()
|
Add test for ENSURE INDEPENDENT. Depdendent is disabled for now, pending conditional models.# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Lead Developer: Feras Saad <fsaad@mit.edu>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import itertools
import unittest
import numpy as np
from gpmcc.state import State
from gpmcc.utils import validation as vu
class EnsureIndependentTest(unittest.TestCase):
def test_naive_bayes(self):
D = np.random.normal(size=(10,1))
T = np.repeat(D, 10, axis=1)
Ci = list(itertools.combinations(range(10), 2))
state = State(T, ['normal']*10, Ci=Ci, seed=0)
state.transition(N=10, do_progress=0)
vu.validate_crp_constrained_partition(state.Zv, [], Ci)
def test_complex_relationships(self):
D = np.random.normal(size=(10,1))
T = np.repeat(D, 10, axis=1)
Ci = [(2,8), (0,3)]
state = State(T, ['normal']*10, Ci=Ci, seed=0)
state.transition(N=10, do_progress=0)
vu.validate_crp_constrained_partition(state.Zv, [], Ci)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for ENSURE INDEPENDENT. Depdendent is disabled for now, pending conditional models.<commit_after># -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Lead Developer: Feras Saad <fsaad@mit.edu>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import itertools
import unittest
import numpy as np
from gpmcc.state import State
from gpmcc.utils import validation as vu
class EnsureIndependentTest(unittest.TestCase):
def test_naive_bayes(self):
D = np.random.normal(size=(10,1))
T = np.repeat(D, 10, axis=1)
Ci = list(itertools.combinations(range(10), 2))
state = State(T, ['normal']*10, Ci=Ci, seed=0)
state.transition(N=10, do_progress=0)
vu.validate_crp_constrained_partition(state.Zv, [], Ci)
def test_complex_relationships(self):
D = np.random.normal(size=(10,1))
T = np.repeat(D, 10, axis=1)
Ci = [(2,8), (0,3)]
state = State(T, ['normal']*10, Ci=Ci, seed=0)
state.transition(N=10, do_progress=0)
vu.validate_crp_constrained_partition(state.Zv, [], Ci)
if __name__ == '__main__':
unittest.main()
|
|
0911e03350962214867918ae8ad16b42ca0cae77
|
tests/test_repository.py
|
tests/test_repository.py
|
# Copyright 2015 Ian Cordasco
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twine import repository
def test_gpg_signature_structure_is_preserved():
data = {
'gpg_signature': ('filename.asc', 'filecontent'),
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('gpg_signature', ('filename.asc', 'filecontent'))]
def test_content_structure_is_preserved():
data = {
'content': ('filename', 'filecontent'),
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('content', ('filename', 'filecontent'))]
def test_iterables_are_flattened():
data = {
'platform': ['UNKNOWN'],
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('platform', 'UNKNOWN')]
data = {
'platform': ['UNKNOWN', 'ANOTHERPLATFORM'],
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('platform', 'UNKNOWN'),
('platform', 'ANOTHERPLATFORM')]
|
Add tests for GPG signature handling
|
Add tests for GPG signature handling
This will ideally prevent #137 from regressing.
|
Python
|
apache-2.0
|
pypa/twine,sigmavirus24/twine
|
Add tests for GPG signature handling
This will ideally prevent #137 from regressing.
|
# Copyright 2015 Ian Cordasco
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twine import repository
def test_gpg_signature_structure_is_preserved():
data = {
'gpg_signature': ('filename.asc', 'filecontent'),
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('gpg_signature', ('filename.asc', 'filecontent'))]
def test_content_structure_is_preserved():
data = {
'content': ('filename', 'filecontent'),
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('content', ('filename', 'filecontent'))]
def test_iterables_are_flattened():
data = {
'platform': ['UNKNOWN'],
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('platform', 'UNKNOWN')]
data = {
'platform': ['UNKNOWN', 'ANOTHERPLATFORM'],
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('platform', 'UNKNOWN'),
('platform', 'ANOTHERPLATFORM')]
|
<commit_before><commit_msg>Add tests for GPG signature handling
This will ideally prevent #137 from regressing.<commit_after>
|
# Copyright 2015 Ian Cordasco
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twine import repository
def test_gpg_signature_structure_is_preserved():
data = {
'gpg_signature': ('filename.asc', 'filecontent'),
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('gpg_signature', ('filename.asc', 'filecontent'))]
def test_content_structure_is_preserved():
data = {
'content': ('filename', 'filecontent'),
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('content', ('filename', 'filecontent'))]
def test_iterables_are_flattened():
data = {
'platform': ['UNKNOWN'],
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('platform', 'UNKNOWN')]
data = {
'platform': ['UNKNOWN', 'ANOTHERPLATFORM'],
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('platform', 'UNKNOWN'),
('platform', 'ANOTHERPLATFORM')]
|
Add tests for GPG signature handling
This will ideally prevent #137 from regressing.# Copyright 2015 Ian Cordasco
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twine import repository
def test_gpg_signature_structure_is_preserved():
data = {
'gpg_signature': ('filename.asc', 'filecontent'),
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('gpg_signature', ('filename.asc', 'filecontent'))]
def test_content_structure_is_preserved():
data = {
'content': ('filename', 'filecontent'),
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('content', ('filename', 'filecontent'))]
def test_iterables_are_flattened():
data = {
'platform': ['UNKNOWN'],
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('platform', 'UNKNOWN')]
data = {
'platform': ['UNKNOWN', 'ANOTHERPLATFORM'],
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('platform', 'UNKNOWN'),
('platform', 'ANOTHERPLATFORM')]
|
<commit_before><commit_msg>Add tests for GPG signature handling
This will ideally prevent #137 from regressing.<commit_after># Copyright 2015 Ian Cordasco
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twine import repository
def test_gpg_signature_structure_is_preserved():
data = {
'gpg_signature': ('filename.asc', 'filecontent'),
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('gpg_signature', ('filename.asc', 'filecontent'))]
def test_content_structure_is_preserved():
data = {
'content': ('filename', 'filecontent'),
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('content', ('filename', 'filecontent'))]
def test_iterables_are_flattened():
data = {
'platform': ['UNKNOWN'],
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('platform', 'UNKNOWN')]
data = {
'platform': ['UNKNOWN', 'ANOTHERPLATFORM'],
}
tuples = repository.Repository._convert_data_to_list_of_tuples(data)
assert tuples == [('platform', 'UNKNOWN'),
('platform', 'ANOTHERPLATFORM')]
|
|
6970a747a002503e8cefb71f178631e74eb110f2
|
thinc/neural/tests/unit/Model/test_properties.py
|
thinc/neural/tests/unit/Model/test_properties.py
|
import pytest
from ....base import Model
from ....ops import NumpyOps
@pytest.fixture
def model():
model = Model(ops=NumpyOps())
return model
def test_can_get_describe_params(model):
describe_params = list(model.describe_params)
def test_cant_set_describe_params(model):
with pytest.raises(AttributeError):
model.describe_params = 'hi'
def test_can_get_shape(model):
shape = model.shape
def test_can_set_shape(model):
model.shape = 'hi'
def test_can_get_input_shape(model):
input_shape = model.input_shape
def test_can_set_input_shape(model):
model.input_shape = (10,)
def test_can_get_output_shape(model):
output_shape = model.output_shape
def test_can_set_output_shape(model):
model.output_shape = (5,)
|
Add tests for Model properties
|
Add tests for Model properties
|
Python
|
mit
|
spacy-io/thinc,explosion/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc
|
Add tests for Model properties
|
import pytest
from ....base import Model
from ....ops import NumpyOps
@pytest.fixture
def model():
model = Model(ops=NumpyOps())
return model
def test_can_get_describe_params(model):
describe_params = list(model.describe_params)
def test_cant_set_describe_params(model):
with pytest.raises(AttributeError):
model.describe_params = 'hi'
def test_can_get_shape(model):
shape = model.shape
def test_can_set_shape(model):
model.shape = 'hi'
def test_can_get_input_shape(model):
input_shape = model.input_shape
def test_can_set_input_shape(model):
model.input_shape = (10,)
def test_can_get_output_shape(model):
output_shape = model.output_shape
def test_can_set_output_shape(model):
model.output_shape = (5,)
|
<commit_before><commit_msg>Add tests for Model properties<commit_after>
|
import pytest
from ....base import Model
from ....ops import NumpyOps
@pytest.fixture
def model():
model = Model(ops=NumpyOps())
return model
def test_can_get_describe_params(model):
describe_params = list(model.describe_params)
def test_cant_set_describe_params(model):
with pytest.raises(AttributeError):
model.describe_params = 'hi'
def test_can_get_shape(model):
shape = model.shape
def test_can_set_shape(model):
model.shape = 'hi'
def test_can_get_input_shape(model):
input_shape = model.input_shape
def test_can_set_input_shape(model):
model.input_shape = (10,)
def test_can_get_output_shape(model):
output_shape = model.output_shape
def test_can_set_output_shape(model):
model.output_shape = (5,)
|
Add tests for Model propertiesimport pytest
from ....base import Model
from ....ops import NumpyOps
@pytest.fixture
def model():
model = Model(ops=NumpyOps())
return model
def test_can_get_describe_params(model):
describe_params = list(model.describe_params)
def test_cant_set_describe_params(model):
with pytest.raises(AttributeError):
model.describe_params = 'hi'
def test_can_get_shape(model):
shape = model.shape
def test_can_set_shape(model):
model.shape = 'hi'
def test_can_get_input_shape(model):
input_shape = model.input_shape
def test_can_set_input_shape(model):
model.input_shape = (10,)
def test_can_get_output_shape(model):
output_shape = model.output_shape
def test_can_set_output_shape(model):
model.output_shape = (5,)
|
<commit_before><commit_msg>Add tests for Model properties<commit_after>import pytest
from ....base import Model
from ....ops import NumpyOps
@pytest.fixture
def model():
model = Model(ops=NumpyOps())
return model
def test_can_get_describe_params(model):
describe_params = list(model.describe_params)
def test_cant_set_describe_params(model):
with pytest.raises(AttributeError):
model.describe_params = 'hi'
def test_can_get_shape(model):
shape = model.shape
def test_can_set_shape(model):
model.shape = 'hi'
def test_can_get_input_shape(model):
input_shape = model.input_shape
def test_can_set_input_shape(model):
model.input_shape = (10,)
def test_can_get_output_shape(model):
output_shape = model.output_shape
def test_can_set_output_shape(model):
model.output_shape = (5,)
|
|
d2341921c16d60de2999a428add3f5bb3cf134fd
|
tests/functional/test_cli_verify.py
|
tests/functional/test_cli_verify.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import unittest
from tests.functional import utils
class VerifyTestCase(unittest.TestCase):
def setUp(self):
super(VerifyTestCase, self).setUp()
self.rally = utils.Rally()
def _verify_start_and_get_results_in_json(self, set_name):
self.rally("verify start %s" % set_name)
results = json.loads(self.rally("verify results --json"))
failed_tests = results["failures"] * 100.0 / results["tests"]
if failed_tests >= 50:
self.fail("Number of failed tests more than 50%.")
show_output = self.rally("verify show")
total_raw = show_output.split("\n").pop(5)[1:-1].replace(" ", "")
total = total_raw.split('|')
self.assertEqual(set_name, total[2])
self.assertEqual(results["tests"], int(total[3]))
self.assertEqual(results["failures"], int(total[4]))
self.assertEqual("finished", total[6])
def test_image_set(self):
self._verify_start_and_get_results_in_json("image")
def test_smoke_set(self):
self._verify_start_and_get_results_in_json("smoke")
|
Add functional tests for rally verify
|
Add functional tests for rally verify
Functional tests for:
- launch verification for "image" set
- launch verification for "smoke" set
Change-Id: Ia7073e1ed57fb3efd18800a51289316a922b9d19
|
Python
|
apache-2.0
|
afaheem88/rally,eayunstack/rally,go-bears/rally,gluke77/rally,vefimova/rally,eayunstack/rally,amit0701/rally,paboldin/rally,pandeyop/rally,vganapath/rally,eonpatapon/rally,pyKun/rally,redhat-openstack/rally,openstack/rally,group-policy/rally,amit0701/rally,openstack/rally,yeming233/rally,eonpatapon/rally,openstack/rally,aplanas/rally,eayunstack/rally,shdowofdeath/rally,pyKun/rally,cernops/rally,vganapath/rally,paboldin/rally,gluke77/rally,vefimova/rally,pandeyop/rally,vganapath/rally,openstack/rally,cernops/rally,vponomaryov/rally,vponomaryov/rally,paboldin/rally,go-bears/rally,shdowofdeath/rally,aforalee/RRally,amit0701/rally,group-policy/rally,afaheem88/rally,vganapath/rally,varunarya10/rally,aforalee/RRally,redhat-openstack/rally,gluke77/rally,aplanas/rally,gluke77/rally,yeming233/rally,group-policy/rally,varunarya10/rally
|
Add functional tests for rally verify
Functional tests for:
- launch verification for "image" set
- launch verification for "smoke" set
Change-Id: Ia7073e1ed57fb3efd18800a51289316a922b9d19
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import unittest
from tests.functional import utils
class VerifyTestCase(unittest.TestCase):
def setUp(self):
super(VerifyTestCase, self).setUp()
self.rally = utils.Rally()
def _verify_start_and_get_results_in_json(self, set_name):
self.rally("verify start %s" % set_name)
results = json.loads(self.rally("verify results --json"))
failed_tests = results["failures"] * 100.0 / results["tests"]
if failed_tests >= 50:
self.fail("Number of failed tests more than 50%.")
show_output = self.rally("verify show")
total_raw = show_output.split("\n").pop(5)[1:-1].replace(" ", "")
total = total_raw.split('|')
self.assertEqual(set_name, total[2])
self.assertEqual(results["tests"], int(total[3]))
self.assertEqual(results["failures"], int(total[4]))
self.assertEqual("finished", total[6])
def test_image_set(self):
self._verify_start_and_get_results_in_json("image")
def test_smoke_set(self):
self._verify_start_and_get_results_in_json("smoke")
|
<commit_before><commit_msg>Add functional tests for rally verify
Functional tests for:
- launch verification for "image" set
- launch verification for "smoke" set
Change-Id: Ia7073e1ed57fb3efd18800a51289316a922b9d19<commit_after>
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import unittest
from tests.functional import utils
class VerifyTestCase(unittest.TestCase):
def setUp(self):
super(VerifyTestCase, self).setUp()
self.rally = utils.Rally()
def _verify_start_and_get_results_in_json(self, set_name):
self.rally("verify start %s" % set_name)
results = json.loads(self.rally("verify results --json"))
failed_tests = results["failures"] * 100.0 / results["tests"]
if failed_tests >= 50:
self.fail("Number of failed tests more than 50%.")
show_output = self.rally("verify show")
total_raw = show_output.split("\n").pop(5)[1:-1].replace(" ", "")
total = total_raw.split('|')
self.assertEqual(set_name, total[2])
self.assertEqual(results["tests"], int(total[3]))
self.assertEqual(results["failures"], int(total[4]))
self.assertEqual("finished", total[6])
def test_image_set(self):
self._verify_start_and_get_results_in_json("image")
def test_smoke_set(self):
self._verify_start_and_get_results_in_json("smoke")
|
Add functional tests for rally verify
Functional tests for:
- launch verification for "image" set
- launch verification for "smoke" set
Change-Id: Ia7073e1ed57fb3efd18800a51289316a922b9d19# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import unittest
from tests.functional import utils
class VerifyTestCase(unittest.TestCase):
def setUp(self):
super(VerifyTestCase, self).setUp()
self.rally = utils.Rally()
def _verify_start_and_get_results_in_json(self, set_name):
self.rally("verify start %s" % set_name)
results = json.loads(self.rally("verify results --json"))
failed_tests = results["failures"] * 100.0 / results["tests"]
if failed_tests >= 50:
self.fail("Number of failed tests more than 50%.")
show_output = self.rally("verify show")
total_raw = show_output.split("\n").pop(5)[1:-1].replace(" ", "")
total = total_raw.split('|')
self.assertEqual(set_name, total[2])
self.assertEqual(results["tests"], int(total[3]))
self.assertEqual(results["failures"], int(total[4]))
self.assertEqual("finished", total[6])
def test_image_set(self):
self._verify_start_and_get_results_in_json("image")
def test_smoke_set(self):
self._verify_start_and_get_results_in_json("smoke")
|
<commit_before><commit_msg>Add functional tests for rally verify
Functional tests for:
- launch verification for "image" set
- launch verification for "smoke" set
Change-Id: Ia7073e1ed57fb3efd18800a51289316a922b9d19<commit_after># Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import unittest
from tests.functional import utils
class VerifyTestCase(unittest.TestCase):
def setUp(self):
super(VerifyTestCase, self).setUp()
self.rally = utils.Rally()
def _verify_start_and_get_results_in_json(self, set_name):
self.rally("verify start %s" % set_name)
results = json.loads(self.rally("verify results --json"))
failed_tests = results["failures"] * 100.0 / results["tests"]
if failed_tests >= 50:
self.fail("Number of failed tests more than 50%.")
show_output = self.rally("verify show")
total_raw = show_output.split("\n").pop(5)[1:-1].replace(" ", "")
total = total_raw.split('|')
self.assertEqual(set_name, total[2])
self.assertEqual(results["tests"], int(total[3]))
self.assertEqual(results["failures"], int(total[4]))
self.assertEqual("finished", total[6])
def test_image_set(self):
self._verify_start_and_get_results_in_json("image")
def test_smoke_set(self):
self._verify_start_and_get_results_in_json("smoke")
|
|
f78109b5afb8972b2e5b3115e7d892c50f775040
|
others/HideAndSeek/solve.py
|
others/HideAndSeek/solve.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import permutations, chain
# Board
Empty = 1
Elephant = 2
Lion = 3
Zebra = 5
Gazelle = 7
Rhino = 11
# E L | G L
# Z G Z | Z E
# R L E | L R G
# -------------
# R E Z |
# G G | R L
# L R | G E Z
board = [Elephant, Empty, Lion, Zebra, Gazelle, Zebra, Rhino, Lion, Elephant,
Gazelle, Empty, Lion, Empty, Zebra, Elephant, Lion, Rhino, Gazelle,
Rhino, Elephant, Zebra, Gazelle, Empty, Gazelle, Lion, Empty, Rhino,
Empty, Empty, Empty, Empty, Rhino, Lion, Gazelle, Elephant, Zebra]
# Masks (distinct with rotations)
# ββββ
# ββββ
# ββββ
#
# ββββββ
# ββ
# ββββββ
#
# ββ ββ
# ββ ββ
# ββββββ
#
# ββββ
# ββββββ
# ββ ββ
def rotate(mask):
return [mask[2], mask[5], mask[8],
mask[1], mask[4], mask[7],
mask[0], mask[3], mask[6]]
basic_masks = [
[
[0, 0, 1, 1, 0, 0, 1, 0, 0]
],
[
[0, 0, 0, 1, 0, 1, 0, 0, 0]
],
[
[0, 1, 0, 0, 1, 0, 0, 0, 0]
],
[
[0, 0, 1, 0, 0, 0, 0, 1, 0]
]
]
length = len(basic_masks)
for i in range(0, length):
rotated_mask = rotate(basic_masks[i][0])
while rotated_mask not in basic_masks[i]:
basic_masks[i].append(rotated_mask)
rotated_mask = rotate(rotated_mask)
mask_group_permutations = list(permutations(basic_masks))
mask_permutations = []
for mask_group in mask_group_permutations:
for masks in mask_group:
mask_permutations.append(masks)
target = Gazelle ** 5
for m in mask_permutations:
masks = list(chain.from_iterable(m))
if len(masks) == len(board):
result = 1
for i in range(0, len(board)):
result *= board[i] ** masks[i]
if result == target:
print(masks)
|
Add a Python solution for Hide&Seek game
|
Add a Python solution for Hide&Seek game
|
Python
|
cc0-1.0
|
boltomli/PicatEuler
|
Add a Python solution for Hide&Seek game
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import permutations, chain
# Board
Empty = 1
Elephant = 2
Lion = 3
Zebra = 5
Gazelle = 7
Rhino = 11
# E L | G L
# Z G Z | Z E
# R L E | L R G
# -------------
# R E Z |
# G G | R L
# L R | G E Z
board = [Elephant, Empty, Lion, Zebra, Gazelle, Zebra, Rhino, Lion, Elephant,
Gazelle, Empty, Lion, Empty, Zebra, Elephant, Lion, Rhino, Gazelle,
Rhino, Elephant, Zebra, Gazelle, Empty, Gazelle, Lion, Empty, Rhino,
Empty, Empty, Empty, Empty, Rhino, Lion, Gazelle, Elephant, Zebra]
# Masks (distinct with rotations)
# ββββ
# ββββ
# ββββ
#
# ββββββ
# ββ
# ββββββ
#
# ββ ββ
# ββ ββ
# ββββββ
#
# ββββ
# ββββββ
# ββ ββ
def rotate(mask):
return [mask[2], mask[5], mask[8],
mask[1], mask[4], mask[7],
mask[0], mask[3], mask[6]]
basic_masks = [
[
[0, 0, 1, 1, 0, 0, 1, 0, 0]
],
[
[0, 0, 0, 1, 0, 1, 0, 0, 0]
],
[
[0, 1, 0, 0, 1, 0, 0, 0, 0]
],
[
[0, 0, 1, 0, 0, 0, 0, 1, 0]
]
]
length = len(basic_masks)
for i in range(0, length):
rotated_mask = rotate(basic_masks[i][0])
while rotated_mask not in basic_masks[i]:
basic_masks[i].append(rotated_mask)
rotated_mask = rotate(rotated_mask)
mask_group_permutations = list(permutations(basic_masks))
mask_permutations = []
for mask_group in mask_group_permutations:
for masks in mask_group:
mask_permutations.append(masks)
target = Gazelle ** 5
for m in mask_permutations:
masks = list(chain.from_iterable(m))
if len(masks) == len(board):
result = 1
for i in range(0, len(board)):
result *= board[i] ** masks[i]
if result == target:
print(masks)
|
<commit_before><commit_msg>Add a Python solution for Hide&Seek game<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import permutations, chain
# Board
Empty = 1
Elephant = 2
Lion = 3
Zebra = 5
Gazelle = 7
Rhino = 11
# E L | G L
# Z G Z | Z E
# R L E | L R G
# -------------
# R E Z |
# G G | R L
# L R | G E Z
board = [Elephant, Empty, Lion, Zebra, Gazelle, Zebra, Rhino, Lion, Elephant,
Gazelle, Empty, Lion, Empty, Zebra, Elephant, Lion, Rhino, Gazelle,
Rhino, Elephant, Zebra, Gazelle, Empty, Gazelle, Lion, Empty, Rhino,
Empty, Empty, Empty, Empty, Rhino, Lion, Gazelle, Elephant, Zebra]
# Masks (distinct with rotations)
# ββββ
# ββββ
# ββββ
#
# ββββββ
# ββ
# ββββββ
#
# ββ ββ
# ββ ββ
# ββββββ
#
# ββββ
# ββββββ
# ββ ββ
def rotate(mask):
return [mask[2], mask[5], mask[8],
mask[1], mask[4], mask[7],
mask[0], mask[3], mask[6]]
basic_masks = [
[
[0, 0, 1, 1, 0, 0, 1, 0, 0]
],
[
[0, 0, 0, 1, 0, 1, 0, 0, 0]
],
[
[0, 1, 0, 0, 1, 0, 0, 0, 0]
],
[
[0, 0, 1, 0, 0, 0, 0, 1, 0]
]
]
length = len(basic_masks)
for i in range(0, length):
rotated_mask = rotate(basic_masks[i][0])
while rotated_mask not in basic_masks[i]:
basic_masks[i].append(rotated_mask)
rotated_mask = rotate(rotated_mask)
mask_group_permutations = list(permutations(basic_masks))
mask_permutations = []
for mask_group in mask_group_permutations:
for masks in mask_group:
mask_permutations.append(masks)
target = Gazelle ** 5
for m in mask_permutations:
masks = list(chain.from_iterable(m))
if len(masks) == len(board):
result = 1
for i in range(0, len(board)):
result *= board[i] ** masks[i]
if result == target:
print(masks)
|
Add a Python solution for Hide&Seek game#! /usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import permutations, chain
# Board
Empty = 1
Elephant = 2
Lion = 3
Zebra = 5
Gazelle = 7
Rhino = 11
# E L | G L
# Z G Z | Z E
# R L E | L R G
# -------------
# R E Z |
# G G | R L
# L R | G E Z
board = [Elephant, Empty, Lion, Zebra, Gazelle, Zebra, Rhino, Lion, Elephant,
Gazelle, Empty, Lion, Empty, Zebra, Elephant, Lion, Rhino, Gazelle,
Rhino, Elephant, Zebra, Gazelle, Empty, Gazelle, Lion, Empty, Rhino,
Empty, Empty, Empty, Empty, Rhino, Lion, Gazelle, Elephant, Zebra]
# Masks (distinct with rotations)
# ββββ
# ββββ
# ββββ
#
# ββββββ
# ββ
# ββββββ
#
# ββ ββ
# ββ ββ
# ββββββ
#
# ββββ
# ββββββ
# ββ ββ
def rotate(mask):
return [mask[2], mask[5], mask[8],
mask[1], mask[4], mask[7],
mask[0], mask[3], mask[6]]
basic_masks = [
[
[0, 0, 1, 1, 0, 0, 1, 0, 0]
],
[
[0, 0, 0, 1, 0, 1, 0, 0, 0]
],
[
[0, 1, 0, 0, 1, 0, 0, 0, 0]
],
[
[0, 0, 1, 0, 0, 0, 0, 1, 0]
]
]
length = len(basic_masks)
for i in range(0, length):
rotated_mask = rotate(basic_masks[i][0])
while rotated_mask not in basic_masks[i]:
basic_masks[i].append(rotated_mask)
rotated_mask = rotate(rotated_mask)
mask_group_permutations = list(permutations(basic_masks))
mask_permutations = []
for mask_group in mask_group_permutations:
for masks in mask_group:
mask_permutations.append(masks)
target = Gazelle ** 5
for m in mask_permutations:
masks = list(chain.from_iterable(m))
if len(masks) == len(board):
result = 1
for i in range(0, len(board)):
result *= board[i] ** masks[i]
if result == target:
print(masks)
|
<commit_before><commit_msg>Add a Python solution for Hide&Seek game<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import permutations, chain
# Board
Empty = 1
Elephant = 2
Lion = 3
Zebra = 5
Gazelle = 7
Rhino = 11
# E L | G L
# Z G Z | Z E
# R L E | L R G
# -------------
# R E Z |
# G G | R L
# L R | G E Z
board = [Elephant, Empty, Lion, Zebra, Gazelle, Zebra, Rhino, Lion, Elephant,
Gazelle, Empty, Lion, Empty, Zebra, Elephant, Lion, Rhino, Gazelle,
Rhino, Elephant, Zebra, Gazelle, Empty, Gazelle, Lion, Empty, Rhino,
Empty, Empty, Empty, Empty, Rhino, Lion, Gazelle, Elephant, Zebra]
# Masks (distinct with rotations)
# ββββ
# ββββ
# ββββ
#
# ββββββ
# ββ
# ββββββ
#
# ββ ββ
# ββ ββ
# ββββββ
#
# ββββ
# ββββββ
# ββ ββ
def rotate(mask):
return [mask[2], mask[5], mask[8],
mask[1], mask[4], mask[7],
mask[0], mask[3], mask[6]]
basic_masks = [
[
[0, 0, 1, 1, 0, 0, 1, 0, 0]
],
[
[0, 0, 0, 1, 0, 1, 0, 0, 0]
],
[
[0, 1, 0, 0, 1, 0, 0, 0, 0]
],
[
[0, 0, 1, 0, 0, 0, 0, 1, 0]
]
]
length = len(basic_masks)
for i in range(0, length):
rotated_mask = rotate(basic_masks[i][0])
while rotated_mask not in basic_masks[i]:
basic_masks[i].append(rotated_mask)
rotated_mask = rotate(rotated_mask)
mask_group_permutations = list(permutations(basic_masks))
mask_permutations = []
for mask_group in mask_group_permutations:
for masks in mask_group:
mask_permutations.append(masks)
target = Gazelle ** 5
for m in mask_permutations:
masks = list(chain.from_iterable(m))
if len(masks) == len(board):
result = 1
for i in range(0, len(board)):
result *= board[i] ** masks[i]
if result == target:
print(masks)
|
|
908e901f7e5b737b5a363b0f817dbf46b45267a0
|
SessionTools/feature_usage_jsons_shuffler.py
|
SessionTools/feature_usage_jsons_shuffler.py
|
# Condenses all the feature files into a single location,
# Split by the names of the features
import sys
from os.path import isfile
import os
import json
path = sys.argv[1]
out_path = sys.argv[2]
paths = []
i = 0
skipped = 0
pretty_print_json_output = False
feature_versions_map = {}
known_files = set()
def flush():
# Create one file per feature version
for k in feature_versions_map.keys():
out_full_path = out_path + "." + k + '.jsons'
# Ensure we created all of the data files
if out_full_path not in known_files:
known_files.add(out_full_path)
if os.path.exists(out_full_path):
print ("Removing existing file: " + out_full_path)
os.remove(out_full_path)
sessions = feature_versions_map[k]
feature_version = k
with open(out_full_path, 'wa') as f:
for session_id in sessions.keys():
data_to_dump = {
'feature_version' : feature_version,
'session_id' : session_id,
'features' : sessions[session_id]
}
if pretty_print_json_output:
f.write(json.dumps(data_to_dump, sort_keys=True, indent=2) + "\n")
else:
f.write(json.dumps(data_to_dump) + "\n")
f.flush()
feature_versions_map.clear()
# Main function
print ('Enumerating feature files')
for root, subdirs, files in os.walk(path):
for ff in files:
i = i + 1
if i % 1000 == 0:
print (i, skipped)
flush()
path = os.path.join(root,ff)
if (path.find('.sorted.gz.features.') == -1 ):
continue
path_split = path.split ('.sorted.gz.features.')
feature_version = path_split[-1]
if not feature_versions_map.has_key(feature_version):
feature_versions_map[feature_version] = {}
session_id = path_split[0].split('/')[-1]
if feature_versions_map[feature_version].has_key(session_id):
# We've already added this session
# This can be used in a version that loads a partially complete file
print ("Session: ") + session_id + " skipped, features already added for: " + feature_version
skipped += 1
continue
feature_versions_map[feature_version][session_id] = []
paths.append(path)
print (feature_version, session_id, path)
with open(path, 'r') as f:
lines = f.readlines()
for ln in lines:
feature_versions_map[feature_version][session_id].append(json.loads(ln))
flush()
|
Add out persistent storage backed shuffler
|
Add out persistent storage backed shuffler
|
Python
|
mit
|
DynamoDS/Coulomb,DynamoDS/Coulomb,DynamoDS/Coulomb
|
Add out persistent storage backed shuffler
|
# Condenses all the feature files into a single location,
# Split by the names of the features
import sys
from os.path import isfile
import os
import json
path = sys.argv[1]
out_path = sys.argv[2]
paths = []
i = 0
skipped = 0
pretty_print_json_output = False
feature_versions_map = {}
known_files = set()
def flush():
# Create one file per feature version
for k in feature_versions_map.keys():
out_full_path = out_path + "." + k + '.jsons'
# Ensure we created all of the data files
if out_full_path not in known_files:
known_files.add(out_full_path)
if os.path.exists(out_full_path):
print ("Removing existing file: " + out_full_path)
os.remove(out_full_path)
sessions = feature_versions_map[k]
feature_version = k
with open(out_full_path, 'wa') as f:
for session_id in sessions.keys():
data_to_dump = {
'feature_version' : feature_version,
'session_id' : session_id,
'features' : sessions[session_id]
}
if pretty_print_json_output:
f.write(json.dumps(data_to_dump, sort_keys=True, indent=2) + "\n")
else:
f.write(json.dumps(data_to_dump) + "\n")
f.flush()
feature_versions_map.clear()
# Main function
print ('Enumerating feature files')
for root, subdirs, files in os.walk(path):
for ff in files:
i = i + 1
if i % 1000 == 0:
print (i, skipped)
flush()
path = os.path.join(root,ff)
if (path.find('.sorted.gz.features.') == -1 ):
continue
path_split = path.split ('.sorted.gz.features.')
feature_version = path_split[-1]
if not feature_versions_map.has_key(feature_version):
feature_versions_map[feature_version] = {}
session_id = path_split[0].split('/')[-1]
if feature_versions_map[feature_version].has_key(session_id):
# We've already added this session
# This can be used in a version that loads a partially complete file
print ("Session: ") + session_id + " skipped, features already added for: " + feature_version
skipped += 1
continue
feature_versions_map[feature_version][session_id] = []
paths.append(path)
print (feature_version, session_id, path)
with open(path, 'r') as f:
lines = f.readlines()
for ln in lines:
feature_versions_map[feature_version][session_id].append(json.loads(ln))
flush()
|
<commit_before><commit_msg>Add out persistent storage backed shuffler<commit_after>
|
# Condenses all the feature files into a single location,
# Split by the names of the features
import sys
from os.path import isfile
import os
import json
path = sys.argv[1]
out_path = sys.argv[2]
paths = []
i = 0
skipped = 0
pretty_print_json_output = False
feature_versions_map = {}
known_files = set()
def flush():
# Create one file per feature version
for k in feature_versions_map.keys():
out_full_path = out_path + "." + k + '.jsons'
# Ensure we created all of the data files
if out_full_path not in known_files:
known_files.add(out_full_path)
if os.path.exists(out_full_path):
print ("Removing existing file: " + out_full_path)
os.remove(out_full_path)
sessions = feature_versions_map[k]
feature_version = k
with open(out_full_path, 'wa') as f:
for session_id in sessions.keys():
data_to_dump = {
'feature_version' : feature_version,
'session_id' : session_id,
'features' : sessions[session_id]
}
if pretty_print_json_output:
f.write(json.dumps(data_to_dump, sort_keys=True, indent=2) + "\n")
else:
f.write(json.dumps(data_to_dump) + "\n")
f.flush()
feature_versions_map.clear()
# Main function
print ('Enumerating feature files')
for root, subdirs, files in os.walk(path):
for ff in files:
i = i + 1
if i % 1000 == 0:
print (i, skipped)
flush()
path = os.path.join(root,ff)
if (path.find('.sorted.gz.features.') == -1 ):
continue
path_split = path.split ('.sorted.gz.features.')
feature_version = path_split[-1]
if not feature_versions_map.has_key(feature_version):
feature_versions_map[feature_version] = {}
session_id = path_split[0].split('/')[-1]
if feature_versions_map[feature_version].has_key(session_id):
# We've already added this session
# This can be used in a version that loads a partially complete file
print ("Session: ") + session_id + " skipped, features already added for: " + feature_version
skipped += 1
continue
feature_versions_map[feature_version][session_id] = []
paths.append(path)
print (feature_version, session_id, path)
with open(path, 'r') as f:
lines = f.readlines()
for ln in lines:
feature_versions_map[feature_version][session_id].append(json.loads(ln))
flush()
|
Add out persistent storage backed shuffler# Condenses all the feature files into a single location,
# Split by the names of the features
import sys
from os.path import isfile
import os
import json
path = sys.argv[1]
out_path = sys.argv[2]
paths = []
i = 0
skipped = 0
pretty_print_json_output = False
feature_versions_map = {}
known_files = set()
def flush():
# Create one file per feature version
for k in feature_versions_map.keys():
out_full_path = out_path + "." + k + '.jsons'
# Ensure we created all of the data files
if out_full_path not in known_files:
known_files.add(out_full_path)
if os.path.exists(out_full_path):
print ("Removing existing file: " + out_full_path)
os.remove(out_full_path)
sessions = feature_versions_map[k]
feature_version = k
with open(out_full_path, 'wa') as f:
for session_id in sessions.keys():
data_to_dump = {
'feature_version' : feature_version,
'session_id' : session_id,
'features' : sessions[session_id]
}
if pretty_print_json_output:
f.write(json.dumps(data_to_dump, sort_keys=True, indent=2) + "\n")
else:
f.write(json.dumps(data_to_dump) + "\n")
f.flush()
feature_versions_map.clear()
# Main function
print ('Enumerating feature files')
for root, subdirs, files in os.walk(path):
for ff in files:
i = i + 1
if i % 1000 == 0:
print (i, skipped)
flush()
path = os.path.join(root,ff)
if (path.find('.sorted.gz.features.') == -1 ):
continue
path_split = path.split ('.sorted.gz.features.')
feature_version = path_split[-1]
if not feature_versions_map.has_key(feature_version):
feature_versions_map[feature_version] = {}
session_id = path_split[0].split('/')[-1]
if feature_versions_map[feature_version].has_key(session_id):
# We've already added this session
# This can be used in a version that loads a partially complete file
print ("Session: ") + session_id + " skipped, features already added for: " + feature_version
skipped += 1
continue
feature_versions_map[feature_version][session_id] = []
paths.append(path)
print (feature_version, session_id, path)
with open(path, 'r') as f:
lines = f.readlines()
for ln in lines:
feature_versions_map[feature_version][session_id].append(json.loads(ln))
flush()
|
<commit_before><commit_msg>Add out persistent storage backed shuffler<commit_after># Condenses all the feature files into a single location,
# Split by the names of the features
import sys
from os.path import isfile
import os
import json
path = sys.argv[1]
out_path = sys.argv[2]
paths = []
i = 0
skipped = 0
pretty_print_json_output = False
feature_versions_map = {}
known_files = set()
def flush():
# Create one file per feature version
for k in feature_versions_map.keys():
out_full_path = out_path + "." + k + '.jsons'
# Ensure we created all of the data files
if out_full_path not in known_files:
known_files.add(out_full_path)
if os.path.exists(out_full_path):
print ("Removing existing file: " + out_full_path)
os.remove(out_full_path)
sessions = feature_versions_map[k]
feature_version = k
with open(out_full_path, 'wa') as f:
for session_id in sessions.keys():
data_to_dump = {
'feature_version' : feature_version,
'session_id' : session_id,
'features' : sessions[session_id]
}
if pretty_print_json_output:
f.write(json.dumps(data_to_dump, sort_keys=True, indent=2) + "\n")
else:
f.write(json.dumps(data_to_dump) + "\n")
f.flush()
feature_versions_map.clear()
# Main function
print ('Enumerating feature files')
for root, subdirs, files in os.walk(path):
for ff in files:
i = i + 1
if i % 1000 == 0:
print (i, skipped)
flush()
path = os.path.join(root,ff)
if (path.find('.sorted.gz.features.') == -1 ):
continue
path_split = path.split ('.sorted.gz.features.')
feature_version = path_split[-1]
if not feature_versions_map.has_key(feature_version):
feature_versions_map[feature_version] = {}
session_id = path_split[0].split('/')[-1]
if feature_versions_map[feature_version].has_key(session_id):
# We've already added this session
# This can be used in a version that loads a partially complete file
print ("Session: ") + session_id + " skipped, features already added for: " + feature_version
skipped += 1
continue
feature_versions_map[feature_version][session_id] = []
paths.append(path)
print (feature_version, session_id, path)
with open(path, 'r') as f:
lines = f.readlines()
for ln in lines:
feature_versions_map[feature_version][session_id].append(json.loads(ln))
flush()
|
|
7f0914167c438b51245e6a55333e5eaa7994b27c
|
generate_lm_trie.py
|
generate_lm_trie.py
|
import pytorch_ctc
import json
import argparse
parser = argparse.ArgumentParser(description='LM Trie Generation')
parser.add_argument('--labels', help='path to label json file', default='labels.json')
parser.add_argument('--dictionary', help='path to text dictionary (one word per line)', default='vocab.txt')
parser.add_argument('--kenlm', help='path to binary kenlm language model', default="lm.kenlm")
parser.add_argument('--trie', help='path of trie to output', default='vocab.trie')
def main():
args = parser.parse_args()
with open(args.labels, "r") as fh:
label_data = json.load(fh)
labels = ''.join(label_data)
pytorch_ctc.generate_lm_trie(args.dictionary, args.kenlm, args.trie, labels, labels.index('_'), labels.index(' '))
if __name__ == '__main__':
main()
|
Add utility script for creating LM trie
|
Add utility script for creating LM trie
|
Python
|
mit
|
mit456/deepspeech.pytorch,SeanNaren/deepspeech.pytorch
|
Add utility script for creating LM trie
|
import pytorch_ctc
import json
import argparse
parser = argparse.ArgumentParser(description='LM Trie Generation')
parser.add_argument('--labels', help='path to label json file', default='labels.json')
parser.add_argument('--dictionary', help='path to text dictionary (one word per line)', default='vocab.txt')
parser.add_argument('--kenlm', help='path to binary kenlm language model', default="lm.kenlm")
parser.add_argument('--trie', help='path of trie to output', default='vocab.trie')
def main():
args = parser.parse_args()
with open(args.labels, "r") as fh:
label_data = json.load(fh)
labels = ''.join(label_data)
pytorch_ctc.generate_lm_trie(args.dictionary, args.kenlm, args.trie, labels, labels.index('_'), labels.index(' '))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add utility script for creating LM trie<commit_after>
|
import pytorch_ctc
import json
import argparse
parser = argparse.ArgumentParser(description='LM Trie Generation')
parser.add_argument('--labels', help='path to label json file', default='labels.json')
parser.add_argument('--dictionary', help='path to text dictionary (one word per line)', default='vocab.txt')
parser.add_argument('--kenlm', help='path to binary kenlm language model', default="lm.kenlm")
parser.add_argument('--trie', help='path of trie to output', default='vocab.trie')
def main():
args = parser.parse_args()
with open(args.labels, "r") as fh:
label_data = json.load(fh)
labels = ''.join(label_data)
pytorch_ctc.generate_lm_trie(args.dictionary, args.kenlm, args.trie, labels, labels.index('_'), labels.index(' '))
if __name__ == '__main__':
main()
|
Add utility script for creating LM trieimport pytorch_ctc
import json
import argparse
parser = argparse.ArgumentParser(description='LM Trie Generation')
parser.add_argument('--labels', help='path to label json file', default='labels.json')
parser.add_argument('--dictionary', help='path to text dictionary (one word per line)', default='vocab.txt')
parser.add_argument('--kenlm', help='path to binary kenlm language model', default="lm.kenlm")
parser.add_argument('--trie', help='path of trie to output', default='vocab.trie')
def main():
args = parser.parse_args()
with open(args.labels, "r") as fh:
label_data = json.load(fh)
labels = ''.join(label_data)
pytorch_ctc.generate_lm_trie(args.dictionary, args.kenlm, args.trie, labels, labels.index('_'), labels.index(' '))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add utility script for creating LM trie<commit_after>import pytorch_ctc
import json
import argparse
parser = argparse.ArgumentParser(description='LM Trie Generation')
parser.add_argument('--labels', help='path to label json file', default='labels.json')
parser.add_argument('--dictionary', help='path to text dictionary (one word per line)', default='vocab.txt')
parser.add_argument('--kenlm', help='path to binary kenlm language model', default="lm.kenlm")
parser.add_argument('--trie', help='path of trie to output', default='vocab.trie')
def main():
args = parser.parse_args()
with open(args.labels, "r") as fh:
label_data = json.load(fh)
labels = ''.join(label_data)
pytorch_ctc.generate_lm_trie(args.dictionary, args.kenlm, args.trie, labels, labels.index('_'), labels.index(' '))
if __name__ == '__main__':
main()
|
|
99085b623faa8045d24d3be231fe941f192fbcb2
|
scripts/elastic_stats.py
|
scripts/elastic_stats.py
|
import json
def packaged(data):
for o in data.values():
for mtype, typemapping in o.items():
props = typemapping['properties']
if 'unknown' not in props:
continue
unknown = props['unknown']['properties']
def get_fields():
for field, mapping in unknown.items():
if 'properties' not in mapping:
subfields = None
else:
subfields = mapping['properties']['subfields']['properties'].keys()
yield field, subfields
yield mtype, get_fields()
def print_summary(data):
for mtype, fields in packaged(data):
print '#', mtype
for field, subfields in fields:
print field, ", ".join(subfields) if subfields else "(FIXED)"
print
def print_facet_query(data):
for mtype, fields in packaged(data):
print '//', mtype
for field, subfields in fields:
if not subfields:
continue
qs = '"unknown.%(field)s.%(subfield)s": { "terms": {"size": 5, "field": "unknown.%(field)s.subfields.%(subfield)s"}}'
for subfield in subfields:
print " ", qs % vars(), ','
print
if __name__ == '__main__':
from sys import argv
args = argv[1:]
fpath = args.pop(0)
with open(fpath) as fp:
data = json.load(fp)
if '-f' in args:
print_facet_query(data)
else:
print_summary(data)
|
Make script for parsing elastic mappings and generating statistics
|
Make script for parsing elastic mappings and generating statistics
|
Python
|
apache-2.0
|
libris/librisxl,libris/librisxl,libris/librisxl
|
Make script for parsing elastic mappings and generating statistics
|
import json
def packaged(data):
for o in data.values():
for mtype, typemapping in o.items():
props = typemapping['properties']
if 'unknown' not in props:
continue
unknown = props['unknown']['properties']
def get_fields():
for field, mapping in unknown.items():
if 'properties' not in mapping:
subfields = None
else:
subfields = mapping['properties']['subfields']['properties'].keys()
yield field, subfields
yield mtype, get_fields()
def print_summary(data):
for mtype, fields in packaged(data):
print '#', mtype
for field, subfields in fields:
print field, ", ".join(subfields) if subfields else "(FIXED)"
print
def print_facet_query(data):
for mtype, fields in packaged(data):
print '//', mtype
for field, subfields in fields:
if not subfields:
continue
qs = '"unknown.%(field)s.%(subfield)s": { "terms": {"size": 5, "field": "unknown.%(field)s.subfields.%(subfield)s"}}'
for subfield in subfields:
print " ", qs % vars(), ','
print
if __name__ == '__main__':
from sys import argv
args = argv[1:]
fpath = args.pop(0)
with open(fpath) as fp:
data = json.load(fp)
if '-f' in args:
print_facet_query(data)
else:
print_summary(data)
|
<commit_before><commit_msg>Make script for parsing elastic mappings and generating statistics<commit_after>
|
import json
def packaged(data):
for o in data.values():
for mtype, typemapping in o.items():
props = typemapping['properties']
if 'unknown' not in props:
continue
unknown = props['unknown']['properties']
def get_fields():
for field, mapping in unknown.items():
if 'properties' not in mapping:
subfields = None
else:
subfields = mapping['properties']['subfields']['properties'].keys()
yield field, subfields
yield mtype, get_fields()
def print_summary(data):
for mtype, fields in packaged(data):
print '#', mtype
for field, subfields in fields:
print field, ", ".join(subfields) if subfields else "(FIXED)"
print
def print_facet_query(data):
for mtype, fields in packaged(data):
print '//', mtype
for field, subfields in fields:
if not subfields:
continue
qs = '"unknown.%(field)s.%(subfield)s": { "terms": {"size": 5, "field": "unknown.%(field)s.subfields.%(subfield)s"}}'
for subfield in subfields:
print " ", qs % vars(), ','
print
if __name__ == '__main__':
from sys import argv
args = argv[1:]
fpath = args.pop(0)
with open(fpath) as fp:
data = json.load(fp)
if '-f' in args:
print_facet_query(data)
else:
print_summary(data)
|
Make script for parsing elastic mappings and generating statisticsimport json
def packaged(data):
for o in data.values():
for mtype, typemapping in o.items():
props = typemapping['properties']
if 'unknown' not in props:
continue
unknown = props['unknown']['properties']
def get_fields():
for field, mapping in unknown.items():
if 'properties' not in mapping:
subfields = None
else:
subfields = mapping['properties']['subfields']['properties'].keys()
yield field, subfields
yield mtype, get_fields()
def print_summary(data):
for mtype, fields in packaged(data):
print '#', mtype
for field, subfields in fields:
print field, ", ".join(subfields) if subfields else "(FIXED)"
print
def print_facet_query(data):
for mtype, fields in packaged(data):
print '//', mtype
for field, subfields in fields:
if not subfields:
continue
qs = '"unknown.%(field)s.%(subfield)s": { "terms": {"size": 5, "field": "unknown.%(field)s.subfields.%(subfield)s"}}'
for subfield in subfields:
print " ", qs % vars(), ','
print
if __name__ == '__main__':
from sys import argv
args = argv[1:]
fpath = args.pop(0)
with open(fpath) as fp:
data = json.load(fp)
if '-f' in args:
print_facet_query(data)
else:
print_summary(data)
|
<commit_before><commit_msg>Make script for parsing elastic mappings and generating statistics<commit_after>import json
def packaged(data):
for o in data.values():
for mtype, typemapping in o.items():
props = typemapping['properties']
if 'unknown' not in props:
continue
unknown = props['unknown']['properties']
def get_fields():
for field, mapping in unknown.items():
if 'properties' not in mapping:
subfields = None
else:
subfields = mapping['properties']['subfields']['properties'].keys()
yield field, subfields
yield mtype, get_fields()
def print_summary(data):
for mtype, fields in packaged(data):
print '#', mtype
for field, subfields in fields:
print field, ", ".join(subfields) if subfields else "(FIXED)"
print
def print_facet_query(data):
for mtype, fields in packaged(data):
print '//', mtype
for field, subfields in fields:
if not subfields:
continue
qs = '"unknown.%(field)s.%(subfield)s": { "terms": {"size": 5, "field": "unknown.%(field)s.subfields.%(subfield)s"}}'
for subfield in subfields:
print " ", qs % vars(), ','
print
if __name__ == '__main__':
from sys import argv
args = argv[1:]
fpath = args.pop(0)
with open(fpath) as fp:
data = json.load(fp)
if '-f' in args:
print_facet_query(data)
else:
print_summary(data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.